YONG627 commited on
Commit
10aad71
·
1 Parent(s): d2891ae

Upload 137 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. yolov5-code-main/CITATION.cff +14 -0
  2. yolov5-code-main/__pycache__/export.cpython-38.pyc +0 -0
  3. yolov5-code-main/__pycache__/hubconf.cpython-38.pyc +0 -0
  4. yolov5-code-main/__pycache__/ui_main_window.cpython-38.pyc +0 -0
  5. yolov5-code-main/__pycache__/val.cpython-38.pyc +0 -0
  6. yolov5-code-main/base_ui.py +72 -0
  7. yolov5-code-main/benchmarks.py +169 -0
  8. yolov5-code-main/demo.ipynb +653 -0
  9. yolov5-code-main/detect.py +261 -0
  10. yolov5-code-main/example_request.py +33 -0
  11. yolov5-code-main/export.py +668 -0
  12. yolov5-code-main/gradio_demo.py +26 -0
  13. yolov5-code-main/hub_detect.ipynb +0 -0
  14. yolov5-code-main/hubconf.py +169 -0
  15. yolov5-code-main/models/__init__.py +0 -0
  16. yolov5-code-main/models/__pycache__/__init__.cpython-38.pyc +0 -0
  17. yolov5-code-main/models/__pycache__/common.cpython-38.pyc +0 -0
  18. yolov5-code-main/models/__pycache__/experimental.cpython-38.pyc +0 -0
  19. yolov5-code-main/models/__pycache__/tf.cpython-38.pyc +0 -0
  20. yolov5-code-main/models/__pycache__/yolo.cpython-38.pyc +0 -0
  21. yolov5-code-main/models/common.py +956 -0
  22. yolov5-code-main/models/experimental.py +111 -0
  23. yolov5-code-main/models/hub/anchors.yaml +59 -0
  24. yolov5-code-main/models/hub/yolov3-spp.yaml +51 -0
  25. yolov5-code-main/models/hub/yolov3-tiny.yaml +41 -0
  26. yolov5-code-main/models/hub/yolov3.yaml +51 -0
  27. yolov5-code-main/models/hub/yolov5-bifpn.yaml +48 -0
  28. yolov5-code-main/models/hub/yolov5-fpn.yaml +42 -0
  29. yolov5-code-main/models/hub/yolov5-p2.yaml +54 -0
  30. yolov5-code-main/models/hub/yolov5-p34.yaml +41 -0
  31. yolov5-code-main/models/hub/yolov5-p6.yaml +56 -0
  32. yolov5-code-main/models/hub/yolov5-p7.yaml +67 -0
  33. yolov5-code-main/models/hub/yolov5-panet.yaml +48 -0
  34. yolov5-code-main/models/hub/yolov5l6.yaml +60 -0
  35. yolov5-code-main/models/hub/yolov5m6.yaml +60 -0
  36. yolov5-code-main/models/hub/yolov5n6.yaml +60 -0
  37. yolov5-code-main/models/hub/yolov5s-LeakyReLU.yaml +49 -0
  38. yolov5-code-main/models/hub/yolov5s-ghost.yaml +48 -0
  39. yolov5-code-main/models/hub/yolov5s-transformer.yaml +48 -0
  40. yolov5-code-main/models/hub/yolov5s6.yaml +60 -0
  41. yolov5-code-main/models/hub/yolov5x6.yaml +60 -0
  42. yolov5-code-main/models/segment/yolov5l-seg.yaml +48 -0
  43. yolov5-code-main/models/segment/yolov5m-seg.yaml +48 -0
  44. yolov5-code-main/models/segment/yolov5n-seg.yaml +48 -0
  45. yolov5-code-main/models/segment/yolov5s-seg.yaml +48 -0
  46. yolov5-code-main/models/segment/yolov5x-seg.yaml +48 -0
  47. yolov5-code-main/models/tf.py +608 -0
  48. yolov5-code-main/models/yolo.py +400 -0
  49. yolov5-code-main/models/yolov5l.yaml +48 -0
  50. yolov5-code-main/models/yolov5m.yaml +48 -0
yolov5-code-main/CITATION.cff ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cff-version: 1.2.0
2
+ preferred-citation:
3
+ type: software
4
+ message: If you use YOLOv5, please cite it as below.
5
+ authors:
6
+ - family-names: Jocher
7
+ given-names: Glenn
8
+ orcid: "https://orcid.org/0000-0001-5950-6979"
9
+ title: "YOLOv5 by Ultralytics"
10
+ version: 7.0
11
+ doi: 10.5281/zenodo.3908559
12
+ date-released: 2020-5-29
13
+ license: GPL-3.0
14
+ url: "https://github.com/ultralytics/yolov5"
yolov5-code-main/__pycache__/export.cpython-38.pyc ADDED
Binary file (25.4 kB). View file
 
yolov5-code-main/__pycache__/hubconf.cpython-38.pyc ADDED
Binary file (5.52 kB). View file
 
yolov5-code-main/__pycache__/ui_main_window.cpython-38.pyc ADDED
Binary file (2.5 kB). View file
 
yolov5-code-main/__pycache__/val.cpython-38.pyc ADDED
Binary file (14 kB). View file
 
yolov5-code-main/base_ui.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import sys
3
+ import torch
4
+ from PySide6.QtWidgets import QMainWindow, QApplication, QFileDialog
5
+ from PySide6.QtGui import QPixmap, QImage
6
+ from PySide6.QtCore import QTimer
7
+
8
+ from ui_main_window import Ui_MainWindow
9
+
10
+
11
+ def convert2QImage(img):
12
+ height, width, channel = img.shape
13
+ return QImage(img, width, height, width * channel, QImage.Format_RGB888)
14
+
15
+
16
+ class MainWindow(QMainWindow, Ui_MainWindow):
17
+ def __init__(self):
18
+ super(MainWindow, self).__init__()
19
+ self.setupUi(self)
20
+ self.model = torch.hub.load("E:/BBX/Document/pytorch/食道胃病变检测/yolov5", "custom", path="E:/BBX/Document/pytorch/食道胃病变检测/YOLO5/yolov5-master/runs/exp45/weights/best.pt", source="local")
21
+ self.timer = QTimer()
22
+ self.timer.setInterval(1)
23
+ self.video = None
24
+ self.bind_slots()
25
+
26
+ def image_pred(self, file_path):
27
+ results = self.model(file_path)
28
+ image = results.render()[0]
29
+ return convert2QImage(image)
30
+
31
+ def open_image(self):
32
+ print("点击了检测图片!")
33
+ self.timer.stop()
34
+ file_path = QFileDialog.getOpenFileName(self, dir="E:/BBX/Document/pytorch/息肉病变检测/Kvasir-SEG/images", filter="*.jpg;*.png;*.jpeg")
35
+ if file_path[0]:
36
+ file_path = file_path[0]
37
+ qimage = self.image_pred(file_path)
38
+ self.input.setPixmap(QPixmap(file_path))
39
+ self.output.setPixmap(QPixmap.fromImage(qimage))
40
+
41
+ def video_pred(self):
42
+ ret, frame = self.video.read()
43
+ if not ret:
44
+ self.timer.stop()
45
+ else:
46
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
47
+ self.input.setPixmap(QPixmap.fromImage(convert2QImage(frame)))
48
+ results = self.model(frame)
49
+ image = results.render()[0]
50
+ self.output.setPixmap(QPixmap.fromImage(convert2QImage(image)))
51
+
52
+ def open_video(self):
53
+ print("点击了检测视频!")
54
+ file_path = QFileDialog.getOpenFileName(self, dir="./datasets", filter="*.mp4")
55
+ if file_path[0]:
56
+ file_path = file_path[0]
57
+ self.video = cv2.VideoCapture(file_path)
58
+ self.timer.start()
59
+
60
+ def bind_slots(self):
61
+ self.det_image.clicked.connect(self.open_image)
62
+ self.det_video.clicked.connect(self.open_video)
63
+ self.timer.timeout.connect(self.video_pred)
64
+
65
+
66
+ if __name__ == "__main__":
67
+ app = QApplication(sys.argv)
68
+
69
+ window = MainWindow()
70
+ window.show()
71
+
72
+ app.exec()
yolov5-code-main/benchmarks.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Run YOLOv5 benchmarks on all supported export formats
4
+
5
+ Format | `export.py --include` | Model
6
+ --- | --- | ---
7
+ PyTorch | - | yolov5s.pt
8
+ TorchScript | `torchscript` | yolov5s.torchscript
9
+ ONNX | `onnx` | yolov5s.onnx
10
+ OpenVINO | `openvino` | yolov5s_openvino_model/
11
+ TensorRT | `engine` | yolov5s.engine
12
+ CoreML | `coreml` | yolov5s.mlmodel
13
+ TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolov5s.pb
15
+ TensorFlow Lite | `tflite` | yolov5s.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolov5s_web_model/
18
+
19
+ Requirements:
20
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
21
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
22
+ $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT
23
+
24
+ Usage:
25
+ $ python benchmarks.py --weights yolov5s.pt --img 640
26
+ """
27
+
28
+ import argparse
29
+ import platform
30
+ import sys
31
+ import time
32
+ from pathlib import Path
33
+
34
+ import pandas as pd
35
+
36
+ FILE = Path(__file__).resolve()
37
+ ROOT = FILE.parents[0] # YOLOv5 root directory
38
+ if str(ROOT) not in sys.path:
39
+ sys.path.append(str(ROOT)) # add ROOT to PATH
40
+ # ROOT = ROOT.relative_to(Path.cwd()) # relative
41
+
42
+ import export
43
+ from models.experimental import attempt_load
44
+ from models.yolo import SegmentationModel
45
+ from segment.val import run as val_seg
46
+ from utils import notebook_init
47
+ from utils.general import LOGGER, check_yaml, file_size, print_args
48
+ from utils.torch_utils import select_device
49
+ from val import run as val_det
50
+
51
+
52
+ def run(
53
+ weights=ROOT / 'yolov5s.pt', # weights path
54
+ imgsz=640, # inference size (pixels)
55
+ batch_size=1, # batch size
56
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
57
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
58
+ half=False, # use FP16 half-precision inference
59
+ test=False, # test exports only
60
+ pt_only=False, # test PyTorch only
61
+ hard_fail=False, # throw error on benchmark failure
62
+ ):
63
+ y, t = [], time.time()
64
+ device = select_device(device)
65
+ model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc.
66
+ for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU)
67
+ try:
68
+ assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
69
+ assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
70
+ if 'cpu' in device.type:
71
+ assert cpu, 'inference not supported on CPU'
72
+ if 'cuda' in device.type:
73
+ assert gpu, 'inference not supported on GPU'
74
+
75
+ # Export
76
+ if f == '-':
77
+ w = weights # PyTorch format
78
+ else:
79
+ w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others
80
+ assert suffix in str(w), 'export failed'
81
+
82
+ # Validate
83
+ if model_type == SegmentationModel:
84
+ result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
85
+ metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls))
86
+ else: # DetectionModel:
87
+ result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half)
88
+ metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls))
89
+ speed = result[2][1] # times (preprocess, inference, postprocess)
90
+ y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference
91
+ except Exception as e:
92
+ if hard_fail:
93
+ assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}'
94
+ LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}')
95
+ y.append([name, None, None, None]) # mAP, t_inference
96
+ if pt_only and i == 0:
97
+ break # break after PyTorch
98
+
99
+ # Print results
100
+ LOGGER.info('\n')
101
+ parse_opt()
102
+ notebook_init() # print system info
103
+ c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', '']
104
+ py = pd.DataFrame(y, columns=c)
105
+ LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)')
106
+ LOGGER.info(str(py if map else py.iloc[:, :2]))
107
+ if hard_fail and isinstance(hard_fail, str):
108
+ metrics = py['mAP50-95'].array # values to compare to floor
109
+ floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
110
+ assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}'
111
+ return py
112
+
113
+
114
+ def test(
115
+ weights=ROOT / 'yolov5s.pt', # weights path
116
+ imgsz=640, # inference size (pixels)
117
+ batch_size=1, # batch size
118
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
119
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
120
+ half=False, # use FP16 half-precision inference
121
+ test=False, # test exports only
122
+ pt_only=False, # test PyTorch only
123
+ hard_fail=False, # throw error on benchmark failure
124
+ ):
125
+ y, t = [], time.time()
126
+ device = select_device(device)
127
+ for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable)
128
+ try:
129
+ w = weights if f == '-' else \
130
+ export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights
131
+ assert suffix in str(w), 'export failed'
132
+ y.append([name, True])
133
+ except Exception:
134
+ y.append([name, False]) # mAP, t_inference
135
+
136
+ # Print results
137
+ LOGGER.info('\n')
138
+ parse_opt()
139
+ notebook_init() # print system info
140
+ py = pd.DataFrame(y, columns=['Format', 'Export'])
141
+ LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)')
142
+ LOGGER.info(str(py))
143
+ return py
144
+
145
+
146
+ def parse_opt():
147
+ parser = argparse.ArgumentParser()
148
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
149
+ parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)')
150
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
151
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
152
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
153
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
154
+ parser.add_argument('--test', action='store_true', help='test exports only')
155
+ parser.add_argument('--pt-only', action='store_true', help='test PyTorch only')
156
+ parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric')
157
+ opt = parser.parse_args()
158
+ opt.data = check_yaml(opt.data) # check YAML
159
+ print_args(vars(opt))
160
+ return opt
161
+
162
+
163
+ def main(opt):
164
+ test(**vars(opt)) if opt.test else run(**vars(opt))
165
+
166
+
167
+ if __name__ == '__main__':
168
+ opt = parse_opt()
169
+ main(opt)
yolov5-code-main/demo.ipynb ADDED
@@ -0,0 +1,653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 4,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import torchvision.models as models\n",
10
+ "from torchinfo import summary"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 2,
16
+ "metadata": {},
17
+ "outputs": [],
18
+ "source": [
19
+ "model = models.mobilenet_v3_small(pretrained=True, progress=True)"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "code",
24
+ "execution_count": 8,
25
+ "metadata": {},
26
+ "outputs": [
27
+ {
28
+ "data": {
29
+ "text/plain": [
30
+ "==========================================================================================\n",
31
+ "Layer (type:depth-idx) Output Shape Param #\n",
32
+ "==========================================================================================\n",
33
+ "Sequential [1, 576, 20, 20] --\n",
34
+ "├─ConvBNActivation: 1-1 [1, 16, 320, 320] --\n",
35
+ "│ └─Conv2d: 2-1 [1, 16, 320, 320] 432\n",
36
+ "│ └─BatchNorm2d: 2-2 [1, 16, 320, 320] 32\n",
37
+ "│ └─Hardswish: 2-3 [1, 16, 320, 320] --\n",
38
+ "├─InvertedResidual: 1-2 [1, 16, 160, 160] --\n",
39
+ "│ └─Sequential: 2-4 [1, 16, 160, 160] --\n",
40
+ "│ │ └─ConvBNActivation: 3-1 [1, 16, 160, 160] 176\n",
41
+ "│ │ └─SqueezeExcitation: 3-2 [1, 16, 160, 160] 280\n",
42
+ "│ │ └─ConvBNActivation: 3-3 [1, 16, 160, 160] 288\n",
43
+ "├─InvertedResidual: 1-3 [1, 24, 80, 80] --\n",
44
+ "│ └─Sequential: 2-5 [1, 24, 80, 80] --\n",
45
+ "│ │ └─ConvBNActivation: 3-4 [1, 72, 160, 160] 1,296\n",
46
+ "│ │ └─ConvBNActivation: 3-5 [1, 72, 80, 80] 792\n",
47
+ "│ │ └─ConvBNActivation: 3-6 [1, 24, 80, 80] 1,776\n",
48
+ "├─InvertedResidual: 1-4 [1, 24, 80, 80] --\n",
49
+ "│ └─Sequential: 2-6 [1, 24, 80, 80] --\n",
50
+ "│ │ └─ConvBNActivation: 3-7 [1, 88, 80, 80] 2,288\n",
51
+ "│ │ └─ConvBNActivation: 3-8 [1, 88, 80, 80] 968\n",
52
+ "│ │ └─ConvBNActivation: 3-9 [1, 24, 80, 80] 2,160\n",
53
+ "├─InvertedResidual: 1-5 [1, 40, 40, 40] --\n",
54
+ "│ └─Sequential: 2-7 [1, 40, 40, 40] --\n",
55
+ "│ │ └─ConvBNActivation: 3-10 [1, 96, 80, 80] 2,496\n",
56
+ "│ │ └─ConvBNActivation: 3-11 [1, 96, 40, 40] 2,592\n",
57
+ "│ │ └─SqueezeExcitation: 3-12 [1, 96, 40, 40] 4,728\n",
58
+ "│ │ └─ConvBNActivation: 3-13 [1, 40, 40, 40] 3,920\n",
59
+ "├─InvertedResidual: 1-6 [1, 40, 40, 40] --\n",
60
+ "│ └─Sequential: 2-8 [1, 40, 40, 40] --\n",
61
+ "│ │ └─ConvBNActivation: 3-14 [1, 240, 40, 40] 10,080\n",
62
+ "│ │ └─ConvBNActivation: 3-15 [1, 240, 40, 40] 6,480\n",
63
+ "│ │ └─SqueezeExcitation: 3-16 [1, 240, 40, 40] 31,024\n",
64
+ "│ │ └─ConvBNActivation: 3-17 [1, 40, 40, 40] 9,680\n",
65
+ "├─InvertedResidual: 1-7 [1, 40, 40, 40] --\n",
66
+ "│ └─Sequential: 2-9 [1, 40, 40, 40] --\n",
67
+ "│ │ └─ConvBNActivation: 3-18 [1, 240, 40, 40] 10,080\n",
68
+ "│ │ └─ConvBNActivation: 3-19 [1, 240, 40, 40] 6,480\n",
69
+ "│ │ └─SqueezeExcitation: 3-20 [1, 240, 40, 40] 31,024\n",
70
+ "│ │ └─ConvBNActivation: 3-21 [1, 40, 40, 40] 9,680\n",
71
+ "├─InvertedResidual: 1-8 [1, 48, 40, 40] --\n",
72
+ "│ └─Sequential: 2-10 [1, 48, 40, 40] --\n",
73
+ "│ │ └─ConvBNActivation: 3-22 [1, 120, 40, 40] 5,040\n",
74
+ "│ │ └─ConvBNActivation: 3-23 [1, 120, 40, 40] 3,240\n",
75
+ "│ │ └─SqueezeExcitation: 3-24 [1, 120, 40, 40] 7,832\n",
76
+ "│ │ └─ConvBNActivation: 3-25 [1, 48, 40, 40] 5,856\n",
77
+ "├─InvertedResidual: 1-9 [1, 48, 40, 40] --\n",
78
+ "│ └─Sequential: 2-11 [1, 48, 40, 40] --\n",
79
+ "│ │ └─ConvBNActivation: 3-26 [1, 144, 40, 40] 7,200\n",
80
+ "│ │ └─ConvBNActivation: 3-27 [1, 144, 40, 40] 3,888\n",
81
+ "│ │ └─SqueezeExcitation: 3-28 [1, 144, 40, 40] 11,704\n",
82
+ "│ │ └─ConvBNActivation: 3-29 [1, 48, 40, 40] 7,008\n",
83
+ "├─InvertedResidual: 1-10 [1, 96, 20, 20] --\n",
84
+ "│ └─Sequential: 2-12 [1, 96, 20, 20] --\n",
85
+ "│ │ └─ConvBNActivation: 3-30 [1, 288, 40, 40] 14,400\n",
86
+ "│ │ └─ConvBNActivation: 3-31 [1, 288, 20, 20] 7,776\n",
87
+ "│ │ └─SqueezeExcitation: 3-32 [1, 288, 20, 20] 41,832\n",
88
+ "│ │ └─ConvBNActivation: 3-33 [1, 96, 20, 20] 27,840\n",
89
+ "├─InvertedResidual: 1-11 [1, 96, 20, 20] --\n",
90
+ "│ └─Sequential: 2-13 [1, 96, 20, 20] --\n",
91
+ "│ │ └─ConvBNActivation: 3-34 [1, 576, 20, 20] 56,448\n",
92
+ "│ │ └─ConvBNActivation: 3-35 [1, 576, 20, 20] 15,552\n",
93
+ "│ │ └─SqueezeExcitation: 3-36 [1, 576, 20, 20] 166,608\n",
94
+ "│ │ └─ConvBNActivation: 3-37 [1, 96, 20, 20] 55,488\n",
95
+ "├─InvertedResidual: 1-12 [1, 96, 20, 20] --\n",
96
+ "│ └─Sequential: 2-14 [1, 96, 20, 20] --\n",
97
+ "│ │ └─ConvBNActivation: 3-38 [1, 576, 20, 20] 56,448\n",
98
+ "│ │ └─ConvBNActivation: 3-39 [1, 576, 20, 20] 15,552\n",
99
+ "│ │ └─SqueezeExcitation: 3-40 [1, 576, 20, 20] 166,608\n",
100
+ "│ │ └─ConvBNActivation: 3-41 [1, 96, 20, 20] 55,488\n",
101
+ "├─ConvBNActivation: 1-13 [1, 576, 20, 20] --\n",
102
+ "│ └─Conv2d: 2-15 [1, 576, 20, 20] 55,296\n",
103
+ "│ └─BatchNorm2d: 2-16 [1, 576, 20, 20] 1,152\n",
104
+ "│ └─Hardswish: 2-17 [1, 576, 20, 20] --\n",
105
+ "==========================================================================================\n",
106
+ "Total params: 927,008\n",
107
+ "Trainable params: 927,008\n",
108
+ "Non-trainable params: 0\n",
109
+ "Total mult-adds (M): 444.86\n",
110
+ "==========================================================================================\n",
111
+ "Input size (MB): 4.92\n",
112
+ "Forward/backward pass size (MB): 184.55\n",
113
+ "Params size (MB): 3.71\n",
114
+ "Estimated Total Size (MB): 193.17\n",
115
+ "=========================================================================================="
116
+ ]
117
+ },
118
+ "execution_count": 8,
119
+ "metadata": {},
120
+ "output_type": "execute_result"
121
+ }
122
+ ],
123
+ "source": [
124
+ "summary(model.features, input_size=(1, 3, 640, 640))"
125
+ ]
126
+ },
127
+ {
128
+ "cell_type": "code",
129
+ "execution_count": 7,
130
+ "metadata": {},
131
+ "outputs": [
132
+ {
133
+ "data": {
134
+ "text/plain": [
135
+ "MobileNetV3(\n",
136
+ " (features): Sequential(\n",
137
+ " (0): ConvBNActivation(\n",
138
+ " (0): Conv2d(3, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
139
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
140
+ " (2): Hardswish()\n",
141
+ " )\n",
142
+ " (1): InvertedResidual(\n",
143
+ " (block): Sequential(\n",
144
+ " (0): ConvBNActivation(\n",
145
+ " (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=16, bias=False)\n",
146
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
147
+ " (2): ReLU(inplace=True)\n",
148
+ " )\n",
149
+ " (1): SqueezeExcitation(\n",
150
+ " (fc1): Conv2d(16, 8, kernel_size=(1, 1), stride=(1, 1))\n",
151
+ " (relu): ReLU(inplace=True)\n",
152
+ " (fc2): Conv2d(8, 16, kernel_size=(1, 1), stride=(1, 1))\n",
153
+ " )\n",
154
+ " (2): ConvBNActivation(\n",
155
+ " (0): Conv2d(16, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
156
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
157
+ " (2): Identity()\n",
158
+ " )\n",
159
+ " )\n",
160
+ " )\n",
161
+ " (2): InvertedResidual(\n",
162
+ " (block): Sequential(\n",
163
+ " (0): ConvBNActivation(\n",
164
+ " (0): Conv2d(16, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
165
+ " (1): BatchNorm2d(72, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
166
+ " (2): ReLU(inplace=True)\n",
167
+ " )\n",
168
+ " (1): ConvBNActivation(\n",
169
+ " (0): Conv2d(72, 72, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=72, bias=False)\n",
170
+ " (1): BatchNorm2d(72, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
171
+ " (2): ReLU(inplace=True)\n",
172
+ " )\n",
173
+ " (2): ConvBNActivation(\n",
174
+ " (0): Conv2d(72, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
175
+ " (1): BatchNorm2d(24, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
176
+ " (2): Identity()\n",
177
+ " )\n",
178
+ " )\n",
179
+ " )\n",
180
+ " (3): InvertedResidual(\n",
181
+ " (block): Sequential(\n",
182
+ " (0): ConvBNActivation(\n",
183
+ " (0): Conv2d(24, 88, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
184
+ " (1): BatchNorm2d(88, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
185
+ " (2): ReLU(inplace=True)\n",
186
+ " )\n",
187
+ " (1): ConvBNActivation(\n",
188
+ " (0): Conv2d(88, 88, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=88, bias=False)\n",
189
+ " (1): BatchNorm2d(88, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
190
+ " (2): ReLU(inplace=True)\n",
191
+ " )\n",
192
+ " (2): ConvBNActivation(\n",
193
+ " (0): Conv2d(88, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
194
+ " (1): BatchNorm2d(24, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
195
+ " (2): Identity()\n",
196
+ " )\n",
197
+ " )\n",
198
+ " )\n",
199
+ " (4): InvertedResidual(\n",
200
+ " (block): Sequential(\n",
201
+ " (0): ConvBNActivation(\n",
202
+ " (0): Conv2d(24, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
203
+ " (1): BatchNorm2d(96, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
204
+ " (2): Hardswish()\n",
205
+ " )\n",
206
+ " (1): ConvBNActivation(\n",
207
+ " (0): Conv2d(96, 96, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), groups=96, bias=False)\n",
208
+ " (1): BatchNorm2d(96, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
209
+ " (2): Hardswish()\n",
210
+ " )\n",
211
+ " (2): SqueezeExcitation(\n",
212
+ " (fc1): Conv2d(96, 24, kernel_size=(1, 1), stride=(1, 1))\n",
213
+ " (relu): ReLU(inplace=True)\n",
214
+ " (fc2): Conv2d(24, 96, kernel_size=(1, 1), stride=(1, 1))\n",
215
+ " )\n",
216
+ " (3): ConvBNActivation(\n",
217
+ " (0): Conv2d(96, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
218
+ " (1): BatchNorm2d(40, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
219
+ " (2): Identity()\n",
220
+ " )\n",
221
+ " )\n",
222
+ " )\n",
223
+ " (5): InvertedResidual(\n",
224
+ " (block): Sequential(\n",
225
+ " (0): ConvBNActivation(\n",
226
+ " (0): Conv2d(40, 240, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
227
+ " (1): BatchNorm2d(240, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
228
+ " (2): Hardswish()\n",
229
+ " )\n",
230
+ " (1): ConvBNActivation(\n",
231
+ " (0): Conv2d(240, 240, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=240, bias=False)\n",
232
+ " (1): BatchNorm2d(240, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
233
+ " (2): Hardswish()\n",
234
+ " )\n",
235
+ " (2): SqueezeExcitation(\n",
236
+ " (fc1): Conv2d(240, 64, kernel_size=(1, 1), stride=(1, 1))\n",
237
+ " (relu): ReLU(inplace=True)\n",
238
+ " (fc2): Conv2d(64, 240, kernel_size=(1, 1), stride=(1, 1))\n",
239
+ " )\n",
240
+ " (3): ConvBNActivation(\n",
241
+ " (0): Conv2d(240, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
242
+ " (1): BatchNorm2d(40, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
243
+ " (2): Identity()\n",
244
+ " )\n",
245
+ " )\n",
246
+ " )\n",
247
+ " (6): InvertedResidual(\n",
248
+ " (block): Sequential(\n",
249
+ " (0): ConvBNActivation(\n",
250
+ " (0): Conv2d(40, 240, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
251
+ " (1): BatchNorm2d(240, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
252
+ " (2): Hardswish()\n",
253
+ " )\n",
254
+ " (1): ConvBNActivation(\n",
255
+ " (0): Conv2d(240, 240, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=240, bias=False)\n",
256
+ " (1): BatchNorm2d(240, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
257
+ " (2): Hardswish()\n",
258
+ " )\n",
259
+ " (2): SqueezeExcitation(\n",
260
+ " (fc1): Conv2d(240, 64, kernel_size=(1, 1), stride=(1, 1))\n",
261
+ " (relu): ReLU(inplace=True)\n",
262
+ " (fc2): Conv2d(64, 240, kernel_size=(1, 1), stride=(1, 1))\n",
263
+ " )\n",
264
+ " (3): ConvBNActivation(\n",
265
+ " (0): Conv2d(240, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
266
+ " (1): BatchNorm2d(40, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
267
+ " (2): Identity()\n",
268
+ " )\n",
269
+ " )\n",
270
+ " )\n",
271
+ " (7): InvertedResidual(\n",
272
+ " (block): Sequential(\n",
273
+ " (0): ConvBNActivation(\n",
274
+ " (0): Conv2d(40, 120, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
275
+ " (1): BatchNorm2d(120, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
276
+ " (2): Hardswish()\n",
277
+ " )\n",
278
+ " (1): ConvBNActivation(\n",
279
+ " (0): Conv2d(120, 120, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=120, bias=False)\n",
280
+ " (1): BatchNorm2d(120, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
281
+ " (2): Hardswish()\n",
282
+ " )\n",
283
+ " (2): SqueezeExcitation(\n",
284
+ " (fc1): Conv2d(120, 32, kernel_size=(1, 1), stride=(1, 1))\n",
285
+ " (relu): ReLU(inplace=True)\n",
286
+ " (fc2): Conv2d(32, 120, kernel_size=(1, 1), stride=(1, 1))\n",
287
+ " )\n",
288
+ " (3): ConvBNActivation(\n",
289
+ " (0): Conv2d(120, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
290
+ " (1): BatchNorm2d(48, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
291
+ " (2): Identity()\n",
292
+ " )\n",
293
+ " )\n",
294
+ " )\n",
295
+ " (8): InvertedResidual(\n",
296
+ " (block): Sequential(\n",
297
+ " (0): ConvBNActivation(\n",
298
+ " (0): Conv2d(48, 144, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
299
+ " (1): BatchNorm2d(144, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
300
+ " (2): Hardswish()\n",
301
+ " )\n",
302
+ " (1): ConvBNActivation(\n",
303
+ " (0): Conv2d(144, 144, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=144, bias=False)\n",
304
+ " (1): BatchNorm2d(144, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
305
+ " (2): Hardswish()\n",
306
+ " )\n",
307
+ " (2): SqueezeExcitation(\n",
308
+ " (fc1): Conv2d(144, 40, kernel_size=(1, 1), stride=(1, 1))\n",
309
+ " (relu): ReLU(inplace=True)\n",
310
+ " (fc2): Conv2d(40, 144, kernel_size=(1, 1), stride=(1, 1))\n",
311
+ " )\n",
312
+ " (3): ConvBNActivation(\n",
313
+ " (0): Conv2d(144, 48, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
314
+ " (1): BatchNorm2d(48, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
315
+ " (2): Identity()\n",
316
+ " )\n",
317
+ " )\n",
318
+ " )\n",
319
+ " (9): InvertedResidual(\n",
320
+ " (block): Sequential(\n",
321
+ " (0): ConvBNActivation(\n",
322
+ " (0): Conv2d(48, 288, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
323
+ " (1): BatchNorm2d(288, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
324
+ " (2): Hardswish()\n",
325
+ " )\n",
326
+ " (1): ConvBNActivation(\n",
327
+ " (0): Conv2d(288, 288, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), groups=288, bias=False)\n",
328
+ " (1): BatchNorm2d(288, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
329
+ " (2): Hardswish()\n",
330
+ " )\n",
331
+ " (2): SqueezeExcitation(\n",
332
+ " (fc1): Conv2d(288, 72, kernel_size=(1, 1), stride=(1, 1))\n",
333
+ " (relu): ReLU(inplace=True)\n",
334
+ " (fc2): Conv2d(72, 288, kernel_size=(1, 1), stride=(1, 1))\n",
335
+ " )\n",
336
+ " (3): ConvBNActivation(\n",
337
+ " (0): Conv2d(288, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
338
+ " (1): BatchNorm2d(96, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
339
+ " (2): Identity()\n",
340
+ " )\n",
341
+ " )\n",
342
+ " )\n",
343
+ " (10): InvertedResidual(\n",
344
+ " (block): Sequential(\n",
345
+ " (0): ConvBNActivation(\n",
346
+ " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
347
+ " (1): BatchNorm2d(576, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
348
+ " (2): Hardswish()\n",
349
+ " )\n",
350
+ " (1): ConvBNActivation(\n",
351
+ " (0): Conv2d(576, 576, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=576, bias=False)\n",
352
+ " (1): BatchNorm2d(576, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
353
+ " (2): Hardswish()\n",
354
+ " )\n",
355
+ " (2): SqueezeExcitation(\n",
356
+ " (fc1): Conv2d(576, 144, kernel_size=(1, 1), stride=(1, 1))\n",
357
+ " (relu): ReLU(inplace=True)\n",
358
+ " (fc2): Conv2d(144, 576, kernel_size=(1, 1), stride=(1, 1))\n",
359
+ " )\n",
360
+ " (3): ConvBNActivation(\n",
361
+ " (0): Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
362
+ " (1): BatchNorm2d(96, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
363
+ " (2): Identity()\n",
364
+ " )\n",
365
+ " )\n",
366
+ " )\n",
367
+ " (11): InvertedResidual(\n",
368
+ " (block): Sequential(\n",
369
+ " (0): ConvBNActivation(\n",
370
+ " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
371
+ " (1): BatchNorm2d(576, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
372
+ " (2): Hardswish()\n",
373
+ " )\n",
374
+ " (1): ConvBNActivation(\n",
375
+ " (0): Conv2d(576, 576, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=576, bias=False)\n",
376
+ " (1): BatchNorm2d(576, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
377
+ " (2): Hardswish()\n",
378
+ " )\n",
379
+ " (2): SqueezeExcitation(\n",
380
+ " (fc1): Conv2d(576, 144, kernel_size=(1, 1), stride=(1, 1))\n",
381
+ " (relu): ReLU(inplace=True)\n",
382
+ " (fc2): Conv2d(144, 576, kernel_size=(1, 1), stride=(1, 1))\n",
383
+ " )\n",
384
+ " (3): ConvBNActivation(\n",
385
+ " (0): Conv2d(576, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
386
+ " (1): BatchNorm2d(96, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
387
+ " (2): Identity()\n",
388
+ " )\n",
389
+ " )\n",
390
+ " )\n",
391
+ " (12): ConvBNActivation(\n",
392
+ " (0): Conv2d(96, 576, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
393
+ " (1): BatchNorm2d(576, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
394
+ " (2): Hardswish()\n",
395
+ " )\n",
396
+ " )\n",
397
+ " (avgpool): AdaptiveAvgPool2d(output_size=1)\n",
398
+ " (classifier): Sequential(\n",
399
+ " (0): Linear(in_features=576, out_features=1024, bias=True)\n",
400
+ " (1): Hardswish()\n",
401
+ " (2): Dropout(p=0.2, inplace=True)\n",
402
+ " (3): Linear(in_features=1024, out_features=1000, bias=True)\n",
403
+ " )\n",
404
+ ")"
405
+ ]
406
+ },
407
+ "execution_count": 7,
408
+ "metadata": {},
409
+ "output_type": "execute_result"
410
+ }
411
+ ],
412
+ "source": [
413
+ "model"
414
+ ]
415
+ },
416
+ {
417
+ "cell_type": "code",
418
+ "execution_count": 9,
419
+ "metadata": {},
420
+ "outputs": [
421
+ {
422
+ "data": {
423
+ "text/plain": [
424
+ "torch.nn.modules.container.Sequential"
425
+ ]
426
+ },
427
+ "execution_count": 9,
428
+ "metadata": {},
429
+ "output_type": "execute_result"
430
+ }
431
+ ],
432
+ "source": [
433
+ "type(model.features)"
434
+ ]
435
+ },
436
+ {
437
+ "cell_type": "code",
438
+ "execution_count": 10,
439
+ "metadata": {},
440
+ "outputs": [
441
+ {
442
+ "data": {
443
+ "text/plain": [
444
+ "Sequential(\n",
445
+ " (0): ConvBNActivation(\n",
446
+ " (0): Conv2d(3, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
447
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
448
+ " (2): Hardswish()\n",
449
+ " )\n",
450
+ " (1): InvertedResidual(\n",
451
+ " (block): Sequential(\n",
452
+ " (0): ConvBNActivation(\n",
453
+ " (0): Conv2d(16, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=16, bias=False)\n",
454
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
455
+ " (2): ReLU(inplace=True)\n",
456
+ " )\n",
457
+ " (1): SqueezeExcitation(\n",
458
+ " (fc1): Conv2d(16, 8, kernel_size=(1, 1), stride=(1, 1))\n",
459
+ " (relu): ReLU(inplace=True)\n",
460
+ " (fc2): Conv2d(8, 16, kernel_size=(1, 1), stride=(1, 1))\n",
461
+ " )\n",
462
+ " (2): ConvBNActivation(\n",
463
+ " (0): Conv2d(16, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
464
+ " (1): BatchNorm2d(16, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
465
+ " (2): Identity()\n",
466
+ " )\n",
467
+ " )\n",
468
+ " )\n",
469
+ " (2): InvertedResidual(\n",
470
+ " (block): Sequential(\n",
471
+ " (0): ConvBNActivation(\n",
472
+ " (0): Conv2d(16, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
473
+ " (1): BatchNorm2d(72, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
474
+ " (2): ReLU(inplace=True)\n",
475
+ " )\n",
476
+ " (1): ConvBNActivation(\n",
477
+ " (0): Conv2d(72, 72, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=72, bias=False)\n",
478
+ " (1): BatchNorm2d(72, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
479
+ " (2): ReLU(inplace=True)\n",
480
+ " )\n",
481
+ " (2): ConvBNActivation(\n",
482
+ " (0): Conv2d(72, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
483
+ " (1): BatchNorm2d(24, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
484
+ " (2): Identity()\n",
485
+ " )\n",
486
+ " )\n",
487
+ " )\n",
488
+ " (3): InvertedResidual(\n",
489
+ " (block): Sequential(\n",
490
+ " (0): ConvBNActivation(\n",
491
+ " (0): Conv2d(24, 88, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
492
+ " (1): BatchNorm2d(88, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
493
+ " (2): ReLU(inplace=True)\n",
494
+ " )\n",
495
+ " (1): ConvBNActivation(\n",
496
+ " (0): Conv2d(88, 88, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=88, bias=False)\n",
497
+ " (1): BatchNorm2d(88, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
498
+ " (2): ReLU(inplace=True)\n",
499
+ " )\n",
500
+ " (2): ConvBNActivation(\n",
501
+ " (0): Conv2d(88, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
502
+ " (1): BatchNorm2d(24, eps=0.001, momentum=0.01, affine=True, track_running_stats=True)\n",
503
+ " (2): Identity()\n",
504
+ " )\n",
505
+ " )\n",
506
+ " )\n",
507
+ ")"
508
+ ]
509
+ },
510
+ "execution_count": 10,
511
+ "metadata": {},
512
+ "output_type": "execute_result"
513
+ }
514
+ ],
515
+ "source": [
516
+ "model.features[:4]\n",
517
+ "model.features[4:9]\n",
518
+ "model.features[9:]"
519
+ ]
520
+ },
521
+ {
522
+ "cell_type": "code",
523
+ "execution_count": 1,
524
+ "metadata": {},
525
+ "outputs": [
526
+ {
527
+ "name": "stdout",
528
+ "output_type": "stream",
529
+ "text": [
530
+ "Migrating database to v0.19.1\n"
531
+ ]
532
+ }
533
+ ],
534
+ "source": [
535
+ "import fiftyone as fo\n",
536
+ "import fiftyone.zoo as foz"
537
+ ]
538
+ },
539
+ {
540
+ "cell_type": "code",
541
+ "execution_count": 2,
542
+ "metadata": {},
543
+ "outputs": [
544
+ {
545
+ "name": "stdout",
546
+ "output_type": "stream",
547
+ "text": [
548
+ "Downloading split 'validation' to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\validation' if necessary\n",
549
+ "Downloading 'https://storage.googleapis.com/openimages/2018_04/validation/validation-images-with-rotation.csv' to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\validation\\metadata\\image_ids.csv'\n",
550
+ "Downloading 'https://storage.googleapis.com/openimages/v5/class-descriptions-boxable.csv' to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\validation\\metadata\\classes.csv'\n",
551
+ "Downloading 'https://storage.googleapis.com/openimages/v5/classes-segmentation.txt' to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\validation\\metadata\\segmentation_classes.csv'\n",
552
+ "Downloading 'https://storage.googleapis.com/openimages/2018_04/bbox_labels_600_hierarchy.json' to 'C:\\Users\\13716\\AppData\\Local\\Temp\\tmpvqo81zzr\\metadata\\hierarchy.json'\n",
553
+ "Downloading 'https://storage.googleapis.com/openimages/v5/validation-annotations-object-segmentation.csv' to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\validation\\labels\\segmentations.csv'\n",
554
+ "Only found 44 (<100) samples matching your requirements\n",
555
+ "Downloading 44 images\n",
556
+ " 100% |█████████████████████| 44/44 [6.7s elapsed, 0s remaining, 10.0 files/s] \n",
557
+ "Dataset info written to 'C:\\Users\\13716\\fiftyone\\open-images-v7\\info.json'\n",
558
+ "Loading 'open-images-v7' split 'validation'\n",
559
+ " 100% |███████████████████| 44/44 [6.0s elapsed, 0s remaining, 7.2 samples/s] \n",
560
+ "Dataset 'mouse' created\n"
561
+ ]
562
+ }
563
+ ],
564
+ "source": [
565
+ "dataset = foz.load_zoo_dataset(\n",
566
+ " \"open-images-v7\",\n",
567
+ " split=\"validation\",\n",
568
+ " label_types=[\"detections\"],\n",
569
+ " classes=[\"Mouse\"],\n",
570
+ " max_samples=100,\n",
571
+ " seed=51,\n",
572
+ " shuffle=True,\n",
573
+ " dataset_name=\"mouse\"\n",
574
+ ")"
575
+ ]
576
+ },
577
+ {
578
+ "cell_type": "code",
579
+ "execution_count": 4,
580
+ "metadata": {},
581
+ "outputs": [
582
+ {
583
+ "data": {
584
+ "text/html": [
585
+ "\n",
586
+ " <iframe\n",
587
+ " width=\"100%\"\n",
588
+ " height=\"800\"\n",
589
+ " src=\"http://localhost:5151/?notebook=True&subscription=cd5b5a45-6741-431b-91a8-baf242bcdbe4\"\n",
590
+ " frameborder=\"0\"\n",
591
+ " allowfullscreen\n",
592
+ " \n",
593
+ " ></iframe>\n",
594
+ " "
595
+ ],
596
+ "text/plain": [
597
+ "<IPython.lib.display.IFrame at 0x2abcb20cac0>"
598
+ ]
599
+ },
600
+ "metadata": {},
601
+ "output_type": "display_data"
602
+ },
603
+ {
604
+ "data": {
605
+ "text/plain": [
606
+ "Dataset: mouse\n",
607
+ "Media type: image\n",
608
+ "Num samples: 44\n",
609
+ "Selected samples: 0\n",
610
+ "Selected labels: 0\n",
611
+ "Session URL: http://localhost:5151/"
612
+ ]
613
+ },
614
+ "execution_count": 4,
615
+ "metadata": {},
616
+ "output_type": "execute_result"
617
+ }
618
+ ],
619
+ "source": [
620
+ "fo.launch_app(dataset)"
621
+ ]
622
+ },
623
+ {
624
+ "cell_type": "code",
625
+ "execution_count": null,
626
+ "metadata": {},
627
+ "outputs": [],
628
+ "source": []
629
+ }
630
+ ],
631
+ "metadata": {
632
+ "kernelspec": {
633
+ "display_name": "yolov5",
634
+ "language": "python",
635
+ "name": "python3"
636
+ },
637
+ "language_info": {
638
+ "codemirror_mode": {
639
+ "name": "ipython",
640
+ "version": 3
641
+ },
642
+ "file_extension": ".py",
643
+ "mimetype": "text/x-python",
644
+ "name": "python",
645
+ "nbconvert_exporter": "python",
646
+ "pygments_lexer": "ipython3",
647
+ "version": "3.8.16"
648
+ },
649
+ "orig_nbformat": 4
650
+ },
651
+ "nbformat": 4,
652
+ "nbformat_minor": 2
653
+ }
yolov5-code-main/detect.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
+
5
+ Usage - sources:
6
+ $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
+ img.jpg # image
8
+ vid.mp4 # video
9
+ screen # screenshot
10
+ path/ # directory
11
+ list.txt # list of images
12
+ list.streams # list of streams
13
+ 'path/*.jpg' # glob
14
+ 'https://youtu.be/Zgi9g1ksQHc' # YouTube
15
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
+
17
+ Usage - formats:
18
+ $ python detect.py --weights yolov5s.pt # PyTorch
19
+ yolov5s.torchscript # TorchScript
20
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
+ yolov5s_openvino_model # OpenVINO
22
+ yolov5s.engine # TensorRT
23
+ yolov5s.mlmodel # CoreML (macOS-only)
24
+ yolov5s_saved_model # TensorFlow SavedModel
25
+ yolov5s.pb # TensorFlow GraphDef
26
+ yolov5s.tflite # TensorFlow Lite
27
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolov5s_paddle_model # PaddlePaddle
29
+ """
30
+
31
+ import argparse
32
+ import os
33
+ import platform
34
+ import sys
35
+ from pathlib import Path
36
+
37
+ import torch
38
+
39
+ FILE = Path(__file__).resolve()
40
+ ROOT = FILE.parents[0] # YOLOv5 root directory
41
+ if str(ROOT) not in sys.path:
42
+ sys.path.append(str(ROOT)) # add ROOT to PATH
43
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
44
+
45
+ from models.common import DetectMultiBackend
46
+ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
47
+ from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
48
+ increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
49
+ from utils.plots import Annotator, colors, save_one_box
50
+ from utils.torch_utils import select_device, smart_inference_mode
51
+
52
+
53
+ @smart_inference_mode()
54
+ def run(
55
+ weights=ROOT / 'yolov5s.pt', # model path or triton URL
56
+ source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
57
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
58
+ imgsz=(640, 640), # inference size (height, width)
59
+ conf_thres=0.25, # confidence threshold
60
+ iou_thres=0.45, # NMS IOU threshold
61
+ max_det=1000, # maximum detections per image
62
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
63
+ view_img=False, # show results
64
+ save_txt=False, # save results to *.txt
65
+ save_conf=False, # save confidences in --save-txt labels
66
+ save_crop=False, # save cropped prediction boxes
67
+ nosave=False, # do not save images/videos
68
+ classes=None, # filter by class: --class 0, or --class 0 2 3
69
+ agnostic_nms=False, # class-agnostic NMS
70
+ augment=False, # augmented inference
71
+ visualize=False, # visualize features
72
+ update=False, # update all models
73
+ project=ROOT / 'runs/detect', # save results to project/name
74
+ name='exp', # save results to project/name
75
+ exist_ok=False, # existing project/name ok, do not increment
76
+ line_thickness=3, # bounding box thickness (pixels)
77
+ hide_labels=False, # hide labels
78
+ hide_conf=False, # hide confidences
79
+ half=False, # use FP16 half-precision inference
80
+ dnn=False, # use OpenCV DNN for ONNX inference
81
+ vid_stride=1, # video frame-rate stride
82
+ ):
83
+ source = str(source)
84
+ save_img = not nosave and not source.endswith('.txt') # save inference images
85
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
86
+ is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
87
+ webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
88
+ screenshot = source.lower().startswith('screen')
89
+ if is_url and is_file:
90
+ source = check_file(source) # download
91
+
92
+ # Directories
93
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
94
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
95
+
96
+ # Load model
97
+ device = select_device(device)
98
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
99
+ stride, names, pt = model.stride, model.names, model.pt
100
+ imgsz = check_img_size(imgsz, s=stride) # check image size
101
+
102
+ # Dataloader
103
+ bs = 1 # batch_size
104
+ if webcam:
105
+ view_img = check_imshow(warn=True)
106
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
107
+ bs = len(dataset)
108
+ elif screenshot:
109
+ dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
110
+ else:
111
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
112
+ vid_path, vid_writer = [None] * bs, [None] * bs
113
+
114
+ # Run inference
115
+ model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
116
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
117
+ for path, im, im0s, vid_cap, s in dataset:
118
+ with dt[0]:
119
+ im = torch.from_numpy(im).to(model.device)
120
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
121
+ im /= 255 # 0 - 255 to 0.0 - 1.0
122
+ if len(im.shape) == 3:
123
+ im = im[None] # expand for batch dim
124
+
125
+ # Inference
126
+ with dt[1]:
127
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
128
+ pred = model(im, augment=augment, visualize=visualize)
129
+
130
+ # NMS
131
+ with dt[2]:
132
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
133
+
134
+ # Second-stage classifier (optional)
135
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
136
+
137
+ # Process predictions
138
+ for i, det in enumerate(pred): # per image
139
+ seen += 1
140
+ if webcam: # batch_size >= 1
141
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
142
+ s += f'{i}: '
143
+ else:
144
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
145
+
146
+ p = Path(p) # to Path
147
+ save_path = str(save_dir / p.name) # im.jpg
148
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
149
+ s += '%gx%g ' % im.shape[2:] # print string
150
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
151
+ imc = im0.copy() if save_crop else im0 # for save_crop
152
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
153
+ if len(det):
154
+ # Rescale boxes from img_size to im0 size
155
+ det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
156
+
157
+ # Print results
158
+ for c in det[:, 5].unique():
159
+ n = (det[:, 5] == c).sum() # detections per class
160
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
161
+
162
+ # Write results
163
+ for *xyxy, conf, cls in reversed(det):
164
+ if save_txt: # Write to file
165
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
166
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
167
+ with open(f'{txt_path}.txt', 'a') as f:
168
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
169
+
170
+ if save_img or save_crop or view_img: # Add bbox to image
171
+ c = int(cls) # integer class
172
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
173
+ annotator.box_label(xyxy, label, color=colors(c, True))
174
+ if save_crop:
175
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
176
+
177
+ # Stream results
178
+ im0 = annotator.result()
179
+ if view_img:
180
+ if platform.system() == 'Linux' and p not in windows:
181
+ windows.append(p)
182
+ cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
183
+ cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
184
+ cv2.imshow(str(p), im0)
185
+ cv2.waitKey(1) # 1 millisecond
186
+
187
+ # Save results (image with detections)
188
+ if save_img:
189
+ if dataset.mode == 'image':
190
+ cv2.imwrite(save_path, im0)
191
+ else: # 'video' or 'stream'
192
+ if vid_path[i] != save_path: # new video
193
+ vid_path[i] = save_path
194
+ if isinstance(vid_writer[i], cv2.VideoWriter):
195
+ vid_writer[i].release() # release previous video writer
196
+ if vid_cap: # video
197
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
198
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
199
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
200
+ else: # stream
201
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
202
+ save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
203
+ vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
204
+ vid_writer[i].write(im0)
205
+
206
+ # Print time (inference-only)
207
+ LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
208
+
209
+ # Print results
210
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
211
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
212
+ if save_txt or save_img:
213
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
214
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
215
+ if update:
216
+ strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
217
+
218
+
219
+ def parse_opt():
220
+ parser = argparse.ArgumentParser()
221
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
222
+ parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
223
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
224
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
225
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
226
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
227
+ parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
228
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
229
+ parser.add_argument('--view-img', action='store_true', help='show results')
230
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
231
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
232
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
233
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
234
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
235
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
236
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
237
+ parser.add_argument('--visualize', action='store_true', help='visualize features')
238
+ parser.add_argument('--update', action='store_true', help='update all models')
239
+ parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
240
+ parser.add_argument('--name', default='exp', help='save results to project/name')
241
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
242
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
243
+ parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
244
+ parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
245
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
246
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
247
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
248
+ opt = parser.parse_args()
249
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
250
+ print_args(vars(opt))
251
+ return opt
252
+
253
+
254
+ def main(opt):
255
+ check_requirements(exclude=('tensorboard', 'thop'))
256
+ run(**vars(opt))
257
+
258
+
259
+ if __name__ == '__main__':
260
+ opt = parse_opt()
261
+ main(opt)
yolov5-code-main/example_request.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Perform test request
4
+ """
5
+
6
+ import pprint
7
+ import cv2
8
+ import numpy as np
9
+ import matplotlib.pyplot as plt
10
+
11
+ import requests
12
+
13
+ DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s'
14
+ IMAGE = 'data/images/zidane.jpg'
15
+
16
+ # Read image
17
+ # with open(IMAGE, 'rb') as f:
18
+ # image_data = f.read()
19
+
20
+ img = cv2.imread(IMAGE)
21
+
22
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
23
+
24
+ img = cv2.imencode(".jpg", img)[1].tobytes()
25
+
26
+ response = requests.post(DETECTION_URL, data=img)
27
+
28
+ img = cv2.imdecode(np.frombuffer(response.content, dtype=np.uint8), cv2.IMREAD_COLOR)
29
+
30
+ plt.imshow(img)
31
+ plt.show()
32
+
33
+ # pprint.pprint(response)
yolov5-code-main/export.py ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
+
5
+ Format | `export.py --include` | Model
6
+ --- | --- | ---
7
+ PyTorch | - | yolov5s.pt
8
+ TorchScript | `torchscript` | yolov5s.torchscript
9
+ ONNX | `onnx` | yolov5s.onnx
10
+ OpenVINO | `openvino` | yolov5s_openvino_model/
11
+ TensorRT | `engine` | yolov5s.engine
12
+ CoreML | `coreml` | yolov5s.mlmodel
13
+ TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolov5s.pb
15
+ TensorFlow Lite | `tflite` | yolov5s.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolov5s_web_model/
18
+ PaddlePaddle | `paddle` | yolov5s_paddle_model/
19
+
20
+ Requirements:
21
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
22
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
23
+
24
+ Usage:
25
+ $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
26
+
27
+ Inference:
28
+ $ python detect.py --weights yolov5s.pt # PyTorch
29
+ yolov5s.torchscript # TorchScript
30
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
31
+ yolov5s_openvino_model # OpenVINO
32
+ yolov5s.engine # TensorRT
33
+ yolov5s.mlmodel # CoreML (macOS-only)
34
+ yolov5s_saved_model # TensorFlow SavedModel
35
+ yolov5s.pb # TensorFlow GraphDef
36
+ yolov5s.tflite # TensorFlow Lite
37
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
38
+ yolov5s_paddle_model # PaddlePaddle
39
+
40
+ TensorFlow.js:
41
+ $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
42
+ $ npm install
43
+ $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
44
+ $ npm start
45
+ """
46
+
47
+ import argparse
48
+ import contextlib
49
+ import json
50
+ import os
51
+ import platform
52
+ import re
53
+ import subprocess
54
+ import sys
55
+ import time
56
+ import warnings
57
+ from pathlib import Path
58
+
59
+ import pandas as pd
60
+ import torch
61
+ from torch.utils.mobile_optimizer import optimize_for_mobile
62
+
63
+ FILE = Path(__file__).resolve()
64
+ ROOT = FILE.parents[0] # YOLOv5 root directory
65
+ if str(ROOT) not in sys.path:
66
+ sys.path.append(str(ROOT)) # add ROOT to PATH
67
+ if platform.system() != 'Windows':
68
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
69
+
70
+ from models.experimental import attempt_load
71
+ from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
72
+ from utils.dataloaders import LoadImages
73
+ from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version,
74
+ check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save)
75
+ from utils.torch_utils import select_device, smart_inference_mode
76
+
77
+ MACOS = platform.system() == 'Darwin' # macOS environment
78
+
79
+
80
+ def export_formats():
81
+ # YOLOv5 export formats
82
+ x = [
83
+ ['PyTorch', '-', '.pt', True, True],
84
+ ['TorchScript', 'torchscript', '.torchscript', True, True],
85
+ ['ONNX', 'onnx', '.onnx', True, True],
86
+ ['OpenVINO', 'openvino', '_openvino_model', True, False],
87
+ ['TensorRT', 'engine', '.engine', False, True],
88
+ ['CoreML', 'coreml', '.mlmodel', True, False],
89
+ ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True],
90
+ ['TensorFlow GraphDef', 'pb', '.pb', True, True],
91
+ ['TensorFlow Lite', 'tflite', '.tflite', True, False],
92
+ ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False],
93
+ ['TensorFlow.js', 'tfjs', '_web_model', False, False],
94
+ ['PaddlePaddle', 'paddle', '_paddle_model', True, True],]
95
+ return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU'])
96
+
97
+
98
+ def try_export(inner_func):
99
+ # YOLOv5 export decorator, i..e @try_export
100
+ inner_args = get_default_args(inner_func)
101
+
102
+ def outer_func(*args, **kwargs):
103
+ prefix = inner_args['prefix']
104
+ try:
105
+ with Profile() as dt:
106
+ f, model = inner_func(*args, **kwargs)
107
+ LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)')
108
+ return f, model
109
+ except Exception as e:
110
+ LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}')
111
+ return None, None
112
+
113
+ return outer_func
114
+
115
+
116
+ @try_export
117
+ def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')):
118
+ # YOLOv5 TorchScript model export
119
+ LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...')
120
+ f = file.with_suffix('.torchscript')
121
+
122
+ ts = torch.jit.trace(model, im, strict=False)
123
+ d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names}
124
+ extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
125
+ if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
126
+ optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
127
+ else:
128
+ ts.save(str(f), _extra_files=extra_files)
129
+ return f, None
130
+
131
+
132
+ @try_export
133
+ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')):
134
+ # YOLOv5 ONNX export
135
+ check_requirements('onnx>=1.12.0')
136
+ import onnx
137
+
138
+ LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...')
139
+ f = file.with_suffix('.onnx')
140
+
141
+ output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0']
142
+ if dynamic:
143
+ dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640)
144
+ if isinstance(model, SegmentationModel):
145
+ dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
146
+ dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160)
147
+ elif isinstance(model, DetectionModel):
148
+ dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85)
149
+
150
+ torch.onnx.export(
151
+ model.cpu() if dynamic else model, # --dynamic only compatible with cpu
152
+ im.cpu() if dynamic else im,
153
+ f,
154
+ verbose=False,
155
+ opset_version=opset,
156
+ do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
157
+ input_names=['images'],
158
+ output_names=output_names,
159
+ dynamic_axes=dynamic or None)
160
+
161
+ # Checks
162
+ model_onnx = onnx.load(f) # load onnx model
163
+ onnx.checker.check_model(model_onnx) # check onnx model
164
+
165
+ # Metadata
166
+ d = {'stride': int(max(model.stride)), 'names': model.names}
167
+ for k, v in d.items():
168
+ meta = model_onnx.metadata_props.add()
169
+ meta.key, meta.value = k, str(v)
170
+ onnx.save(model_onnx, f)
171
+
172
+ # Simplify
173
+ if simplify:
174
+ try:
175
+ cuda = torch.cuda.is_available()
176
+ check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1'))
177
+ import onnxsim
178
+
179
+ LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...')
180
+ model_onnx, check = onnxsim.simplify(model_onnx)
181
+ assert check, 'assert check failed'
182
+ onnx.save(model_onnx, f)
183
+ except Exception as e:
184
+ LOGGER.info(f'{prefix} simplifier failure: {e}')
185
+ return f, model_onnx
186
+
187
+
188
+ @try_export
189
+ def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')):
190
+ # YOLOv5 OpenVINO export
191
+ check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/
192
+ import openvino.inference_engine as ie
193
+
194
+ LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...')
195
+ f = str(file).replace('.pt', f'_openvino_model{os.sep}')
196
+
197
+ args = [
198
+ 'mo',
199
+ '--input_model',
200
+ str(file.with_suffix('.onnx')),
201
+ '--output_dir',
202
+ f,
203
+ '--data_type',
204
+ ('FP16' if half else 'FP32'),]
205
+ subprocess.run(args, check=True, env=os.environ) # export
206
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
207
+ return f, None
208
+
209
+
210
+ @try_export
211
+ def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')):
212
+ # YOLOv5 Paddle export
213
+ check_requirements(('paddlepaddle', 'x2paddle'))
214
+ import x2paddle
215
+ from x2paddle.convert import pytorch2paddle
216
+
217
+ LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...')
218
+ f = str(file).replace('.pt', f'_paddle_model{os.sep}')
219
+
220
+ pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export
221
+ yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml
222
+ return f, None
223
+
224
+
225
+ @try_export
226
+ def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')):
227
+ # YOLOv5 CoreML export
228
+ check_requirements('coremltools')
229
+ import coremltools as ct
230
+
231
+ LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...')
232
+ f = file.with_suffix('.mlmodel')
233
+
234
+ ts = torch.jit.trace(model, im, strict=False) # TorchScript model
235
+ ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])])
236
+ bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None)
237
+ if bits < 32:
238
+ if MACOS: # quantization only supported on macOS
239
+ with warnings.catch_warnings():
240
+ warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning
241
+ ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
242
+ else:
243
+ print(f'{prefix} quantization only supported on macOS, skipping...')
244
+ ct_model.save(f)
245
+ return f, ct_model
246
+
247
+
248
+ @try_export
249
+ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')):
250
+ # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
251
+ assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`'
252
+ try:
253
+ import tensorrt as trt
254
+ except Exception:
255
+ if platform.system() == 'Linux':
256
+ check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
257
+ import tensorrt as trt
258
+
259
+ if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
260
+ grid = model.model[-1].anchor_grid
261
+ model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
262
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
263
+ model.model[-1].anchor_grid = grid
264
+ else: # TensorRT >= 8
265
+ check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0
266
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
267
+ onnx = file.with_suffix('.onnx')
268
+
269
+ LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...')
270
+ assert onnx.exists(), f'failed to export ONNX file: {onnx}'
271
+ f = file.with_suffix('.engine') # TensorRT engine file
272
+ logger = trt.Logger(trt.Logger.INFO)
273
+ if verbose:
274
+ logger.min_severity = trt.Logger.Severity.VERBOSE
275
+
276
+ builder = trt.Builder(logger)
277
+ config = builder.create_builder_config()
278
+ config.max_workspace_size = workspace * 1 << 30
279
+ # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice
280
+
281
+ flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH))
282
+ network = builder.create_network(flag)
283
+ parser = trt.OnnxParser(network, logger)
284
+ if not parser.parse_from_file(str(onnx)):
285
+ raise RuntimeError(f'failed to load ONNX file: {onnx}')
286
+
287
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
288
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
289
+ for inp in inputs:
290
+ LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
291
+ for out in outputs:
292
+ LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
293
+
294
+ if dynamic:
295
+ if im.shape[0] <= 1:
296
+ LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument')
297
+ profile = builder.create_optimization_profile()
298
+ for inp in inputs:
299
+ profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
300
+ config.add_optimization_profile(profile)
301
+
302
+ LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}')
303
+ if builder.platform_has_fast_fp16 and half:
304
+ config.set_flag(trt.BuilderFlag.FP16)
305
+ with builder.build_engine(network, config) as engine, open(f, 'wb') as t:
306
+ t.write(engine.serialize())
307
+ return f, None
308
+
309
+
310
+ @try_export
311
+ def export_saved_model(model,
312
+ im,
313
+ file,
314
+ dynamic,
315
+ tf_nms=False,
316
+ agnostic_nms=False,
317
+ topk_per_class=100,
318
+ topk_all=100,
319
+ iou_thres=0.45,
320
+ conf_thres=0.25,
321
+ keras=False,
322
+ prefix=colorstr('TensorFlow SavedModel:')):
323
+ # YOLOv5 TensorFlow SavedModel export
324
+ try:
325
+ import tensorflow as tf
326
+ except Exception:
327
+ check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}")
328
+ import tensorflow as tf
329
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
330
+
331
+ from models.tf import TFModel
332
+
333
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
334
+ f = str(file).replace('.pt', '_saved_model')
335
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
336
+
337
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
338
+ im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
339
+ _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
340
+ inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
341
+ outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
342
+ keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
343
+ keras_model.trainable = False
344
+ keras_model.summary()
345
+ if keras:
346
+ keras_model.save(f, save_format='tf')
347
+ else:
348
+ spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
349
+ m = tf.function(lambda x: keras_model(x)) # full model
350
+ m = m.get_concrete_function(spec)
351
+ frozen_func = convert_variables_to_constants_v2(m)
352
+ tfm = tf.Module()
353
+ tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
354
+ tfm.__call__(im)
355
+ tf.saved_model.save(tfm,
356
+ f,
357
+ options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version(
358
+ tf.__version__, '2.6') else tf.saved_model.SaveOptions())
359
+ return f, keras_model
360
+
361
+
362
+ @try_export
363
+ def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')):
364
+ # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
365
+ import tensorflow as tf
366
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
367
+
368
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
369
+ f = file.with_suffix('.pb')
370
+
371
+ m = tf.function(lambda x: keras_model(x)) # full model
372
+ m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
373
+ frozen_func = convert_variables_to_constants_v2(m)
374
+ frozen_func.graph.as_graph_def()
375
+ tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
376
+ return f, None
377
+
378
+
379
+ @try_export
380
+ def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')):
381
+ # YOLOv5 TensorFlow Lite export
382
+ import tensorflow as tf
383
+
384
+ LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
385
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
386
+ f = str(file).replace('.pt', '-fp16.tflite')
387
+
388
+ converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
389
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
390
+ converter.target_spec.supported_types = [tf.float16]
391
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
392
+ if int8:
393
+ from models.tf import representative_dataset_gen
394
+ dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False)
395
+ converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
396
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
397
+ converter.target_spec.supported_types = []
398
+ converter.inference_input_type = tf.uint8 # or tf.int8
399
+ converter.inference_output_type = tf.uint8 # or tf.int8
400
+ converter.experimental_new_quantizer = True
401
+ f = str(file).replace('.pt', '-int8.tflite')
402
+ if nms or agnostic_nms:
403
+ converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
404
+
405
+ tflite_model = converter.convert()
406
+ open(f, 'wb').write(tflite_model)
407
+ return f, None
408
+
409
+
410
+ @try_export
411
+ def export_edgetpu(file, prefix=colorstr('Edge TPU:')):
412
+ # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
413
+ cmd = 'edgetpu_compiler --version'
414
+ help_url = 'https://coral.ai/docs/edgetpu/compiler/'
415
+ assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}'
416
+ if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0:
417
+ LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}')
418
+ sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system
419
+ for c in (
420
+ 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -',
421
+ 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
422
+ 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'):
423
+ subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True)
424
+ ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
425
+
426
+ LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...')
427
+ f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model
428
+ f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model
429
+
430
+ subprocess.run([
431
+ 'edgetpu_compiler',
432
+ '-s',
433
+ '-d',
434
+ '-k',
435
+ '10',
436
+ '--out_dir',
437
+ str(file.parent),
438
+ f_tfl,], check=True)
439
+ return f, None
440
+
441
+
442
+ @try_export
443
+ def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')):
444
+ # YOLOv5 TensorFlow.js export
445
+ check_requirements('tensorflowjs')
446
+ import tensorflowjs as tfjs
447
+
448
+ LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...')
449
+ f = str(file).replace('.pt', '_web_model') # js dir
450
+ f_pb = file.with_suffix('.pb') # *.pb path
451
+ f_json = f'{f}/model.json' # *.json path
452
+
453
+ args = [
454
+ 'tensorflowjs_converter',
455
+ '--input_format=tf_frozen_model',
456
+ '--quantize_uint8' if int8 else '',
457
+ '--output_node_names=Identity,Identity_1,Identity_2,Identity_3',
458
+ str(f_pb),
459
+ str(f),]
460
+ subprocess.run([arg for arg in args if arg], check=True)
461
+
462
+ json = Path(f_json).read_text()
463
+ with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order
464
+ subst = re.sub(
465
+ r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
466
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
467
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
468
+ r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, '
469
+ r'"Identity_1": {"name": "Identity_1"}, '
470
+ r'"Identity_2": {"name": "Identity_2"}, '
471
+ r'"Identity_3": {"name": "Identity_3"}}}', json)
472
+ j.write(subst)
473
+ return f, None
474
+
475
+
476
+ def add_tflite_metadata(file, metadata, num_outputs):
477
+ # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata
478
+ with contextlib.suppress(ImportError):
479
+ # check_requirements('tflite_support')
480
+ from tflite_support import flatbuffers
481
+ from tflite_support import metadata as _metadata
482
+ from tflite_support import metadata_schema_py_generated as _metadata_fb
483
+
484
+ tmp_file = Path('/tmp/meta.txt')
485
+ with open(tmp_file, 'w') as meta_f:
486
+ meta_f.write(str(metadata))
487
+
488
+ model_meta = _metadata_fb.ModelMetadataT()
489
+ label_file = _metadata_fb.AssociatedFileT()
490
+ label_file.name = tmp_file.name
491
+ model_meta.associatedFiles = [label_file]
492
+
493
+ subgraph = _metadata_fb.SubGraphMetadataT()
494
+ subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
495
+ subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
496
+ model_meta.subgraphMetadata = [subgraph]
497
+
498
+ b = flatbuffers.Builder(0)
499
+ b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
500
+ metadata_buf = b.Output()
501
+
502
+ populator = _metadata.MetadataPopulator.with_model_file(file)
503
+ populator.load_metadata_buffer(metadata_buf)
504
+ populator.load_associated_files([str(tmp_file)])
505
+ populator.populate()
506
+ tmp_file.unlink()
507
+
508
+
509
+ @smart_inference_mode()
510
+ def run(
511
+ data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path'
512
+ weights=ROOT / 'yolov5s.pt', # weights path
513
+ imgsz=(640, 640), # image (height, width)
514
+ batch_size=1, # batch size
515
+ device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu
516
+ include=('torchscript', 'onnx'), # include formats
517
+ half=False, # FP16 half-precision export
518
+ inplace=False, # set YOLOv5 Detect() inplace=True
519
+ keras=False, # use Keras
520
+ optimize=False, # TorchScript: optimize for mobile
521
+ int8=False, # CoreML/TF INT8 quantization
522
+ dynamic=False, # ONNX/TF/TensorRT: dynamic axes
523
+ simplify=False, # ONNX: simplify model
524
+ opset=12, # ONNX: opset version
525
+ verbose=False, # TensorRT: verbose log
526
+ workspace=4, # TensorRT: workspace size (GB)
527
+ nms=False, # TF: add NMS to model
528
+ agnostic_nms=False, # TF: add agnostic NMS to model
529
+ topk_per_class=100, # TF.js NMS: topk per class to keep
530
+ topk_all=100, # TF.js NMS: topk for all classes to keep
531
+ iou_thres=0.45, # TF.js NMS: IoU threshold
532
+ conf_thres=0.25, # TF.js NMS: confidence threshold
533
+ ):
534
+ t = time.time()
535
+ include = [x.lower() for x in include] # to lowercase
536
+ fmts = tuple(export_formats()['Argument'][1:]) # --include arguments
537
+ flags = [x in include for x in fmts]
538
+ assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}'
539
+ jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
540
+ file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights
541
+
542
+ # Load PyTorch model
543
+ device = select_device(device)
544
+ if half:
545
+ assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0'
546
+ assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both'
547
+ model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
548
+
549
+ # Checks
550
+ imgsz *= 2 if len(imgsz) == 1 else 1 # expand
551
+ if optimize:
552
+ assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu'
553
+
554
+ # Input
555
+ gs = int(max(model.stride)) # grid size (max stride)
556
+ imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
557
+ im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
558
+
559
+ # Update model
560
+ model.eval()
561
+ for k, m in model.named_modules():
562
+ if isinstance(m, Detect):
563
+ m.inplace = inplace
564
+ m.dynamic = dynamic
565
+ m.export = True
566
+
567
+ for _ in range(2):
568
+ y = model(im) # dry runs
569
+ if half and not coreml:
570
+ im, model = im.half(), model.half() # to FP16
571
+ shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
572
+ metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata
573
+ LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
574
+
575
+ # Exports
576
+ f = [''] * len(fmts) # exported filenames
577
+ warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning
578
+ if jit: # TorchScript
579
+ f[0], _ = export_torchscript(model, im, file, optimize)
580
+ if engine: # TensorRT required before ONNX
581
+ f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
582
+ if onnx or xml: # OpenVINO requires ONNX
583
+ f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
584
+ if xml: # OpenVINO
585
+ f[3], _ = export_openvino(file, metadata, half)
586
+ if coreml: # CoreML
587
+ f[4], _ = export_coreml(model, im, file, int8, half)
588
+ if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
589
+ assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.'
590
+ assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.'
591
+ f[5], s_model = export_saved_model(model.cpu(),
592
+ im,
593
+ file,
594
+ dynamic,
595
+ tf_nms=nms or agnostic_nms or tfjs,
596
+ agnostic_nms=agnostic_nms or tfjs,
597
+ topk_per_class=topk_per_class,
598
+ topk_all=topk_all,
599
+ iou_thres=iou_thres,
600
+ conf_thres=conf_thres,
601
+ keras=keras)
602
+ if pb or tfjs: # pb prerequisite to tfjs
603
+ f[6], _ = export_pb(s_model, file)
604
+ if tflite or edgetpu:
605
+ f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms)
606
+ if edgetpu:
607
+ f[8], _ = export_edgetpu(file)
608
+ add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
609
+ if tfjs:
610
+ f[9], _ = export_tfjs(file, int8)
611
+ if paddle: # PaddlePaddle
612
+ f[10], _ = export_paddle(model, im, file, metadata)
613
+
614
+ # Finish
615
+ f = [str(x) for x in f if x] # filter out '' and None
616
+ if any(f):
617
+ cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
618
+ det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
619
+ dir = Path('segment' if seg else 'classify' if cls else '')
620
+ h = '--half' if half else '' # --half FP16 inference arg
621
+ s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \
622
+ '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else ''
623
+ LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)'
624
+ f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
625
+ f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
626
+ f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
627
+ f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
628
+ f'\nVisualize: https://netron.app')
629
+ return f # return list of exported files/dirs
630
+
631
+
632
+ def parse_opt(known=False):
633
+ parser = argparse.ArgumentParser()
634
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path')
635
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)')
636
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)')
637
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
638
+ parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
639
+ parser.add_argument('--half', action='store_true', help='FP16 half-precision export')
640
+ parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True')
641
+ parser.add_argument('--keras', action='store_true', help='TF: use Keras')
642
+ parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile')
643
+ parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization')
644
+ parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes')
645
+ parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model')
646
+ parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version')
647
+ parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log')
648
+ parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)')
649
+ parser.add_argument('--nms', action='store_true', help='TF: add NMS to model')
650
+ parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model')
651
+ parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep')
652
+ parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep')
653
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold')
654
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold')
655
+ parser.add_argument( '--include', nargs='+', default=['torchscript'],help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle')
656
+ opt = parser.parse_known_args()[0] if known else parser.parse_args()
657
+ print_args(vars(opt))
658
+ return opt
659
+
660
+
661
+ def main(opt):
662
+ for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]):
663
+ run(**vars(opt))
664
+
665
+
666
+ if __name__ == '__main__':
667
+ opt = parse_opt()
668
+ main(opt)
yolov5-code-main/gradio_demo.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+
4
+ model = torch.hub.load("./", "custom", path="yolov5s.pt", source="local")
5
+
6
+ title = "✨基于YOLOV5的肠道息肉病变检测平台✨"
7
+
8
+ desc = "(●'◡'●)"
9
+ base_conf, base_iou = 0.15, 0.5
10
+
11
+
12
+ def det_image(img, conf_最小置信度, iou_最大置信度):
13
+ model.conf = conf_最小置信度
14
+ model.iou = iou_最大置信度
15
+
16
+ return model(img).render()[0]
17
+
18
+
19
+ gr.Interface(inputs=["image", gr.Slider(minimum=0, maximum=1, value=base_conf), gr.Slider(minimum=0, maximum=1, value=base_iou)],
20
+ outputs=["image"],
21
+ fn=det_image,
22
+ title=title,
23
+ description=desc,
24
+ article = "(●ˇ∀ˇ●)",
25
+ live=True,
26
+ examples=[["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/301.jpg", base_conf, base_iou], ["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/402.jpg", 0.3, base_iou], ["E:/BBX/Document/pytorch/息肉病变检测/YOLO5/yolov5-master/data/images/503.jpg", base_conf, base_iou]]).launch(auth=("admin", "1234"),share=True)
yolov5-code-main/hub_detect.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
yolov5-code-main/hubconf.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
4
+
5
+ Usage:
6
+ import torch
7
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
8
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
9
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
10
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
11
+ """
12
+
13
+ import torch
14
+
15
+
16
+ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
17
+ """Creates or loads a YOLOv5 model
18
+
19
+ Arguments:
20
+ name (str): model name 'yolov5s' or path 'path/to/best.pt'
21
+ pretrained (bool): load pretrained weights into the model
22
+ channels (int): number of input channels
23
+ classes (int): number of model classes
24
+ autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
25
+ verbose (bool): print all information to screen
26
+ device (str, torch.device, None): device to use for model parameters
27
+
28
+ Returns:
29
+ YOLOv5 model
30
+ """
31
+ from pathlib import Path
32
+
33
+ from models.common import AutoShape, DetectMultiBackend
34
+ from models.experimental import attempt_load
35
+ from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
36
+ from utils.downloads import attempt_download
37
+ from utils.general import LOGGER, check_requirements, intersect_dicts, logging
38
+ from utils.torch_utils import select_device
39
+
40
+ if not verbose:
41
+ LOGGER.setLevel(logging.WARNING)
42
+ check_requirements(exclude=('opencv-python', 'tensorboard', 'thop'))
43
+ name = Path(name)
44
+ path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path
45
+ try:
46
+ device = select_device(device)
47
+ if pretrained and channels == 3 and classes == 80:
48
+ try:
49
+ model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
50
+ if autoshape:
51
+ if model.pt and isinstance(model.model, ClassificationModel):
52
+ LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. '
53
+ 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).')
54
+ elif model.pt and isinstance(model.model, SegmentationModel):
55
+ LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. '
56
+ 'You will not be able to run inference with this model.')
57
+ else:
58
+ model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
59
+ except Exception:
60
+ model = attempt_load(path, device=device, fuse=False) # arbitrary model
61
+ else:
62
+ cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path
63
+ model = DetectionModel(cfg, channels, classes) # create model
64
+ if pretrained:
65
+ ckpt = torch.load(attempt_download(path), map_location=device) # load
66
+ csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
67
+ csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect
68
+ model.load_state_dict(csd, strict=False) # load
69
+ if len(ckpt['model'].names) == classes:
70
+ model.names = ckpt['model'].names # set class names attribute
71
+ if not verbose:
72
+ LOGGER.setLevel(logging.INFO) # reset to default
73
+ return model.to(device)
74
+
75
+ except Exception as e:
76
+ help_url = 'https://github.com/ultralytics/yolov5/issues/36'
77
+ s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.'
78
+ raise Exception(s) from e
79
+
80
+
81
+ def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None):
82
+ # YOLOv5 custom or local model
83
+ return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
84
+
85
+
86
+ def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
87
+ # YOLOv5-nano model https://github.com/ultralytics/yolov5
88
+ return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device)
89
+
90
+
91
+ def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
92
+ # YOLOv5-small model https://github.com/ultralytics/yolov5
93
+ return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device)
94
+
95
+
96
+ def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
97
+ # YOLOv5-medium model https://github.com/ultralytics/yolov5
98
+ return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device)
99
+
100
+
101
+ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
102
+ # YOLOv5-large model https://github.com/ultralytics/yolov5
103
+ return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device)
104
+
105
+
106
+ def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
107
+ # YOLOv5-xlarge model https://github.com/ultralytics/yolov5
108
+ return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device)
109
+
110
+
111
+ def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
112
+ # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5
113
+ return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device)
114
+
115
+
116
+ def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
117
+ # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
118
+ return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device)
119
+
120
+
121
+ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
122
+ # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
123
+ return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device)
124
+
125
+
126
+ def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
127
+ # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
128
+ return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device)
129
+
130
+
131
+ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
132
+ # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
133
+ return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device)
134
+
135
+
136
+ if __name__ == '__main__':
137
+ import argparse
138
+ from pathlib import Path
139
+
140
+ import numpy as np
141
+ from PIL import Image
142
+
143
+ from utils.general import cv2, print_args
144
+
145
+ # Argparser
146
+ parser = argparse.ArgumentParser()
147
+ parser.add_argument('--model', type=str, default='yolov5s', help='model name')
148
+ opt = parser.parse_args()
149
+ print_args(vars(opt))
150
+
151
+ # Model
152
+ model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
153
+ # model = custom(path='path/to/model.pt') # custom
154
+
155
+ # Images
156
+ imgs = [
157
+ 'data/images/zidane.jpg', # filename
158
+ Path('data/images/zidane.jpg'), # Path
159
+ 'https://ultralytics.com/images/zidane.jpg', # URI
160
+ cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
161
+ Image.open('data/images/bus.jpg'), # PIL
162
+ np.zeros((320, 640, 3))] # numpy
163
+
164
+ # Inference
165
+ results = model(imgs, size=320) # batched inference
166
+
167
+ # Results
168
+ results.print()
169
+ results.save()
yolov5-code-main/models/__init__.py ADDED
File without changes
yolov5-code-main/models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (150 Bytes). View file
 
yolov5-code-main/models/__pycache__/common.cpython-38.pyc ADDED
Binary file (40.8 kB). View file
 
yolov5-code-main/models/__pycache__/experimental.cpython-38.pyc ADDED
Binary file (4.86 kB). View file
 
yolov5-code-main/models/__pycache__/tf.cpython-38.pyc ADDED
Binary file (26.2 kB). View file
 
yolov5-code-main/models/__pycache__/yolo.cpython-38.pyc ADDED
Binary file (16.3 kB). View file
 
yolov5-code-main/models/common.py ADDED
@@ -0,0 +1,956 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Common modules
4
+ """
5
+
6
+ import ast
7
+ import contextlib
8
+ import json
9
+ import math
10
+ import platform
11
+ import warnings
12
+ import zipfile
13
+ from collections import OrderedDict, namedtuple
14
+ from copy import copy
15
+ from pathlib import Path
16
+ from urllib.parse import urlparse
17
+
18
+ import cv2
19
+ import numpy as np
20
+ import pandas as pd
21
+ import requests
22
+ import torch
23
+ import torch.nn as nn
24
+ import torch.nn.functional as F
25
+ import torchvision.models as models
26
+ from PIL import Image
27
+ from torch.cuda import amp
28
+
29
+ from utils import TryExcept
30
+ from utils.dataloaders import exif_transpose, letterbox
31
+ from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr,
32
+ increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy,
33
+ xyxy2xywh, yaml_load)
34
+ from utils.plots import Annotator, colors, save_one_box
35
+ from utils.torch_utils import copy_attr, smart_inference_mode
36
+
37
+
38
+ def autopad(k, p=None, d=1): # kernel, padding, dilation
39
+ # Pad to 'same' shape outputs
40
+ if d > 1:
41
+ k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
42
+ if p is None:
43
+ p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
44
+ return p
45
+
46
+
47
+ class Conv(nn.Module):
48
+ # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation)
49
+ default_act = nn.SiLU() # default activation
50
+
51
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True):
52
+ super().__init__()
53
+ self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False)
54
+ self.bn = nn.BatchNorm2d(c2)
55
+ self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity()
56
+
57
+ def forward(self, x):
58
+ return self.act(self.bn(self.conv(x)))
59
+
60
+ def forward_fuse(self, x):
61
+ return self.act(self.conv(x))
62
+
63
+
64
+ class DWConv(Conv):
65
+ # Depth-wise convolution
66
+ def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation
67
+ super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act)
68
+
69
+
70
+ class DWConvTranspose2d(nn.ConvTranspose2d):
71
+ # Depth-wise transpose convolution
72
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out
73
+ super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2))
74
+
75
+
76
+ class TransformerLayer(nn.Module):
77
+ # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)
78
+ def __init__(self, c, num_heads):
79
+ super().__init__()
80
+ self.q = nn.Linear(c, c, bias=False)
81
+ self.k = nn.Linear(c, c, bias=False)
82
+ self.v = nn.Linear(c, c, bias=False)
83
+ self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
84
+ self.fc1 = nn.Linear(c, c, bias=False)
85
+ self.fc2 = nn.Linear(c, c, bias=False)
86
+
87
+ def forward(self, x):
88
+ x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
89
+ x = self.fc2(self.fc1(x)) + x
90
+ return x
91
+
92
+
93
+ class TransformerBlock(nn.Module):
94
+ # Vision Transformer https://arxiv.org/abs/2010.11929
95
+ def __init__(self, c1, c2, num_heads, num_layers):
96
+ super().__init__()
97
+ self.conv = None
98
+ if c1 != c2:
99
+ self.conv = Conv(c1, c2)
100
+ self.linear = nn.Linear(c2, c2) # learnable position embedding
101
+ self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
102
+ self.c2 = c2
103
+
104
+ def forward(self, x):
105
+ if self.conv is not None:
106
+ x = self.conv(x)
107
+ b, _, w, h = x.shape
108
+ p = x.flatten(2).permute(2, 0, 1)
109
+ return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
110
+
111
+
112
+ class Bottleneck(nn.Module):
113
+ # Standard bottleneck
114
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
115
+ super().__init__()
116
+ c_ = int(c2 * e) # hidden channels
117
+ self.cv1 = Conv(c1, c_, 1, 1)
118
+ self.cv2 = Conv(c_, c2, 3, 1, g=g)
119
+ self.add = shortcut and c1 == c2
120
+
121
+ def forward(self, x):
122
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
123
+
124
+
125
+ class BottleneckCSP(nn.Module):
126
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
127
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
128
+ super().__init__()
129
+ c_ = int(c2 * e) # hidden channels
130
+ self.cv1 = Conv(c1, c_, 1, 1)
131
+ self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
132
+ self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
133
+ self.cv4 = Conv(2 * c_, c2, 1, 1)
134
+ self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
135
+ self.act = nn.SiLU()
136
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
137
+
138
+ def forward(self, x):
139
+ y1 = self.cv3(self.m(self.cv1(x)))
140
+ y2 = self.cv2(x)
141
+ return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1))))
142
+
143
+
144
+ class CrossConv(nn.Module):
145
+ # Cross Convolution Downsample
146
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False):
147
+ # ch_in, ch_out, kernel, stride, groups, expansion, shortcut
148
+ super().__init__()
149
+ c_ = int(c2 * e) # hidden channels
150
+ self.cv1 = Conv(c1, c_, (1, k), (1, s))
151
+ self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g)
152
+ self.add = shortcut and c1 == c2
153
+
154
+ def forward(self, x):
155
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
156
+
157
+
158
+
159
+
160
+
161
+ # class MobileNetV3(nn.Module):
162
+
163
+ # def __init__(self, slice):
164
+ # super(MobileNetV3, self).__init__()
165
+ # self.model = None
166
+ # if slice == 1:
167
+ # self.model = models.mobilenet_v3_small(pretrained=True).features[:4]
168
+ # elif slice == 2:
169
+ # self.model = models.mobilenet_v3_small(pretrained=True).features[4:9]
170
+ # else:
171
+ # self.model = models.mobilenet_v3_small(pretrained=True).features[9:]
172
+
173
+ # def forward(self, x):
174
+ # return self.model(x)
175
+
176
+
177
+ class MobileNetV3(nn.Module):
178
+
179
+ def __init__(self, slice):
180
+ super(MobileNetV3, self).__init__()
181
+ self.model = None
182
+ if slice == 1:
183
+ self.model = models.mobilenet_v3_small(pretrained=True).features[:4]
184
+ elif slice == 2:
185
+ self.model = models.mobilenet_v3_small(pretrained=True).features[4:9]
186
+ else:
187
+ self.model = models.mobilenet_v3_small(pretrained=True).features[9:]
188
+
189
+ def forward(self, x):
190
+ return self.model(x)
191
+
192
+
193
+ class SE(nn.Module):
194
+
195
+ def __init__(self, in_chnls, ratio):
196
+ super(SE, self).__init__()
197
+ self.squeeze = nn.AdaptiveAvgPool2d((1, 1))
198
+ self.compress = nn.Conv2d(in_chnls, in_chnls // ratio, 1, 1, 0)
199
+ self.excitation = nn.Conv2d(in_chnls // ratio, in_chnls, 1, 1, 0)
200
+
201
+ def forward(self, x):
202
+ out = self.squeeze(x)
203
+ out = self.compress(out)
204
+ out = F.relu(out)
205
+ out = self.excitation(out)
206
+ return x * F.sigmoid(out)
207
+
208
+
209
+ class C2fBottleneck(nn.Module):
210
+ # Standard bottleneck
211
+ def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, groups, kernels, expand
212
+ super().__init__()
213
+ c_ = int(c2 * e) # hidden channels
214
+ self.cv1 = Conv(c1, c_, k[0], 1)
215
+ self.cv2 = Conv(c_, c2, k[1], 1, g=g)
216
+ self.add = shortcut and c1 == c2
217
+
218
+ def forward(self, x):
219
+ return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
220
+
221
+
222
+ class C2f(nn.Module):
223
+ # CSP Bottleneck with 2 convolutions
224
+ def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
225
+ super().__init__()
226
+ self.c = int(c2 * e) # hidden channels
227
+ self.cv1 = Conv(c1, 2 * self.c, 1, 1)
228
+ self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2)
229
+ self.m = nn.ModuleList(C2fBottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))
230
+
231
+ def forward(self, x):
232
+ y = list(self.cv1(x).chunk(2, 1))
233
+ y.extend(m(y[-1]) for m in self.m)
234
+ return self.cv2(torch.cat(y, 1))
235
+
236
+ def forward_split(self, x):
237
+ y = list(self.cv1(x).split((self.c, self.c), 1))
238
+ y.extend(m(y[-1]) for m in self.m)
239
+ return self.cv2(torch.cat(y, 1))
240
+
241
+
242
+ class C3(nn.Module):
243
+ # CSP Bottleneck with 3 convolutions
244
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
245
+ super().__init__()
246
+ c_ = int(c2 * e) # hidden channels
247
+ self.cv1 = Conv(c1, c_, 1, 1)
248
+ self.cv2 = Conv(c1, c_, 1, 1)
249
+ self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2)
250
+ self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)))
251
+
252
+ def forward(self, x):
253
+ return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1))
254
+
255
+
256
+ class C3x(C3):
257
+ # C3 module with cross-convolutions
258
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
259
+ super().__init__(c1, c2, n, shortcut, g, e)
260
+ c_ = int(c2 * e)
261
+ self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)))
262
+
263
+
264
+ class C3TR(C3):
265
+ # C3 module with TransformerBlock()
266
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
267
+ super().__init__(c1, c2, n, shortcut, g, e)
268
+ c_ = int(c2 * e)
269
+ self.m = TransformerBlock(c_, c_, 4, n)
270
+
271
+
272
+ class C3SPP(C3):
273
+ # C3 module with SPP()
274
+ def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5):
275
+ super().__init__(c1, c2, n, shortcut, g, e)
276
+ c_ = int(c2 * e)
277
+ self.m = SPP(c_, c_, k)
278
+
279
+
280
+ class C3Ghost(C3):
281
+ # C3 module with GhostBottleneck()
282
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
283
+ super().__init__(c1, c2, n, shortcut, g, e)
284
+ c_ = int(c2 * e) # hidden channels
285
+ self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n)))
286
+
287
+
288
+ class SPP(nn.Module):
289
+ # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729
290
+ def __init__(self, c1, c2, k=(5, 9, 13)):
291
+ super().__init__()
292
+ c_ = c1 // 2 # hidden channels
293
+ self.cv1 = Conv(c1, c_, 1, 1)
294
+ self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
295
+ self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
296
+
297
+ def forward(self, x):
298
+ x = self.cv1(x)
299
+ with warnings.catch_warnings():
300
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
301
+ return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
302
+
303
+
304
+ class SPPF(nn.Module):
305
+ # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher
306
+ def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13))
307
+ super().__init__()
308
+ c_ = c1 // 2 # hidden channels
309
+ self.cv1 = Conv(c1, c_, 1, 1)
310
+ self.cv2 = Conv(c_ * 4, c2, 1, 1)
311
+ self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2)
312
+
313
+ def forward(self, x):
314
+ x = self.cv1(x)
315
+ with warnings.catch_warnings():
316
+ warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning
317
+ y1 = self.m(x)
318
+ y2 = self.m(y1)
319
+ return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1))
320
+
321
+
322
+ class Focus(nn.Module):
323
+ # Focus wh information into c-space
324
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
325
+ super().__init__()
326
+ self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act)
327
+ # self.contract = Contract(gain=2)
328
+
329
+ def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
330
+ return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1))
331
+ # return self.conv(self.contract(x))
332
+
333
+
334
+ class GhostConv(nn.Module):
335
+ # Ghost Convolution https://github.com/huawei-noah/ghostnet
336
+ def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
337
+ super().__init__()
338
+ c_ = c2 // 2 # hidden channels
339
+ self.cv1 = Conv(c1, c_, k, s, None, g, act=act)
340
+ self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act)
341
+
342
+ def forward(self, x):
343
+ y = self.cv1(x)
344
+ return torch.cat((y, self.cv2(y)), 1)
345
+
346
+
347
+ class GhostBottleneck(nn.Module):
348
+ # Ghost Bottleneck https://github.com/huawei-noah/ghostnet
349
+ def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride
350
+ super().__init__()
351
+ c_ = c2 // 2
352
+ self.conv = nn.Sequential(
353
+ GhostConv(c1, c_, 1, 1), # pw
354
+ DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw
355
+ GhostConv(c_, c2, 1, 1, act=False)) # pw-linear
356
+ self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1,
357
+ act=False)) if s == 2 else nn.Identity()
358
+
359
+ def forward(self, x):
360
+ return self.conv(x) + self.shortcut(x)
361
+
362
+
363
+ class Contract(nn.Module):
364
+ # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40)
365
+ def __init__(self, gain=2):
366
+ super().__init__()
367
+ self.gain = gain
368
+
369
+ def forward(self, x):
370
+ b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain'
371
+ s = self.gain
372
+ x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2)
373
+ x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40)
374
+ return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40)
375
+
376
+
377
+ class Expand(nn.Module):
378
+ # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160)
379
+ def __init__(self, gain=2):
380
+ super().__init__()
381
+ self.gain = gain
382
+
383
+ def forward(self, x):
384
+ b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain'
385
+ s = self.gain
386
+ x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80)
387
+ x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2)
388
+ return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160)
389
+
390
+
391
+ class Concat(nn.Module):
392
+ # Concatenate a list of tensors along dimension
393
+ def __init__(self, dimension=1):
394
+ super().__init__()
395
+ self.d = dimension
396
+
397
+ def forward(self, x):
398
+ return torch.cat(x, self.d)
399
+
400
+
401
+ class DetectMultiBackend(nn.Module):
402
+ # YOLOv5 MultiBackend class for python inference on various backends
403
+ def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):
404
+ # Usage:
405
+ # PyTorch: weights = *.pt
406
+ # TorchScript: *.torchscript
407
+ # ONNX Runtime: *.onnx
408
+ # ONNX OpenCV DNN: *.onnx --dnn
409
+ # OpenVINO: *_openvino_model
410
+ # CoreML: *.mlmodel
411
+ # TensorRT: *.engine
412
+ # TensorFlow SavedModel: *_saved_model
413
+ # TensorFlow GraphDef: *.pb
414
+ # TensorFlow Lite: *.tflite
415
+ # TensorFlow Edge TPU: *_edgetpu.tflite
416
+ # PaddlePaddle: *_paddle_model
417
+ from models.experimental import attempt_download, attempt_load # scoped to avoid circular import
418
+
419
+ super().__init__()
420
+ w = str(weights[0] if isinstance(weights, list) else weights)
421
+ pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)
422
+ fp16 &= pt or jit or onnx or engine # FP16
423
+ nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
424
+ stride = 32 # default stride
425
+ cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
426
+ if not (pt or triton):
427
+ w = attempt_download(w) # download if not local
428
+
429
+ if pt: # PyTorch
430
+ model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)
431
+ stride = max(int(model.stride.max()), 32) # model stride
432
+ names = model.module.names if hasattr(model, 'module') else model.names # get class names
433
+ model.half() if fp16 else model.float()
434
+ self.model = model # explicitly assign for to(), cpu(), cuda(), half()
435
+ elif jit: # TorchScript
436
+ LOGGER.info(f'Loading {w} for TorchScript inference...')
437
+ extra_files = {'config.txt': ''} # model metadata
438
+ model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
439
+ model.half() if fp16 else model.float()
440
+ if extra_files['config.txt']: # load metadata dict
441
+ d = json.loads(extra_files['config.txt'],
442
+ object_hook=lambda d: {int(k) if k.isdigit() else k: v
443
+ for k, v in d.items()})
444
+ stride, names = int(d['stride']), d['names']
445
+ elif dnn: # ONNX OpenCV DNN
446
+ LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
447
+ check_requirements('opencv-python>=4.5.4')
448
+ net = cv2.dnn.readNetFromONNX(w)
449
+ elif onnx: # ONNX Runtime
450
+ LOGGER.info(f'Loading {w} for ONNX Runtime inference...')
451
+ check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))
452
+ import onnxruntime
453
+ providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
454
+ session = onnxruntime.InferenceSession(w, providers=providers)
455
+ output_names = [x.name for x in session.get_outputs()]
456
+ meta = session.get_modelmeta().custom_metadata_map # metadata
457
+ if 'stride' in meta:
458
+ stride, names = int(meta['stride']), eval(meta['names'])
459
+ elif xml: # OpenVINO
460
+ LOGGER.info(f'Loading {w} for OpenVINO inference...')
461
+ check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
462
+ from openvino.runtime import Core, Layout, get_batch
463
+ ie = Core()
464
+ if not Path(w).is_file(): # if not *.xml
465
+ w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
466
+ network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
467
+ if network.get_parameters()[0].get_layout().empty:
468
+ network.get_parameters()[0].set_layout(Layout('NCHW'))
469
+ batch_dim = get_batch(network)
470
+ if batch_dim.is_static:
471
+ batch_size = batch_dim.get_length()
472
+ executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2
473
+ stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata
474
+ elif engine: # TensorRT
475
+ LOGGER.info(f'Loading {w} for TensorRT inference...')
476
+ import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
477
+ check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
478
+ if device.type == 'cpu':
479
+ device = torch.device('cuda:0')
480
+ Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))
481
+ logger = trt.Logger(trt.Logger.INFO)
482
+ with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
483
+ model = runtime.deserialize_cuda_engine(f.read())
484
+ context = model.create_execution_context()
485
+ bindings = OrderedDict()
486
+ output_names = []
487
+ fp16 = False # default updated below
488
+ dynamic = False
489
+ for i in range(model.num_bindings):
490
+ name = model.get_binding_name(i)
491
+ dtype = trt.nptype(model.get_binding_dtype(i))
492
+ if model.binding_is_input(i):
493
+ if -1 in tuple(model.get_binding_shape(i)): # dynamic
494
+ dynamic = True
495
+ context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))
496
+ if dtype == np.float16:
497
+ fp16 = True
498
+ else: # output
499
+ output_names.append(name)
500
+ shape = tuple(context.get_binding_shape(i))
501
+ im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
502
+ bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
503
+ binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
504
+ batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
505
+ elif coreml: # CoreML
506
+ LOGGER.info(f'Loading {w} for CoreML inference...')
507
+ import coremltools as ct
508
+ model = ct.models.MLModel(w)
509
+ elif saved_model: # TF SavedModel
510
+ LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
511
+ import tensorflow as tf
512
+ keras = False # assume TF1 saved_model
513
+ model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
514
+ elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
515
+ LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
516
+ import tensorflow as tf
517
+
518
+ def wrap_frozen_graph(gd, inputs, outputs):
519
+ x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped
520
+ ge = x.graph.as_graph_element
521
+ return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))
522
+
523
+ def gd_outputs(gd):
524
+ name_list, input_list = [], []
525
+ for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef
526
+ name_list.append(node.name)
527
+ input_list.extend(node.input)
528
+ return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))
529
+
530
+ gd = tf.Graph().as_graph_def() # TF GraphDef
531
+ with open(w, 'rb') as f:
532
+ gd.ParseFromString(f.read())
533
+ frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))
534
+ elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
535
+ try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
536
+ from tflite_runtime.interpreter import Interpreter, load_delegate
537
+ except ImportError:
538
+ import tensorflow as tf
539
+ Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,
540
+ if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime
541
+ LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')
542
+ delegate = {
543
+ 'Linux': 'libedgetpu.so.1',
544
+ 'Darwin': 'libedgetpu.1.dylib',
545
+ 'Windows': 'edgetpu.dll'}[platform.system()]
546
+ interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])
547
+ else: # TFLite
548
+ LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')
549
+ interpreter = Interpreter(model_path=w) # load TFLite model
550
+ interpreter.allocate_tensors() # allocate
551
+ input_details = interpreter.get_input_details() # inputs
552
+ output_details = interpreter.get_output_details() # outputs
553
+ # load metadata
554
+ with contextlib.suppress(zipfile.BadZipFile):
555
+ with zipfile.ZipFile(w, 'r') as model:
556
+ meta_file = model.namelist()[0]
557
+ meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
558
+ stride, names = int(meta['stride']), meta['names']
559
+ elif tfjs: # TF.js
560
+ raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')
561
+ elif paddle: # PaddlePaddle
562
+ LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
563
+ check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
564
+ import paddle.inference as pdi
565
+ if not Path(w).is_file(): # if not *.pdmodel
566
+ w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
567
+ weights = Path(w).with_suffix('.pdiparams')
568
+ config = pdi.Config(str(w), str(weights))
569
+ if cuda:
570
+ config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
571
+ predictor = pdi.create_predictor(config)
572
+ input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
573
+ output_names = predictor.get_output_names()
574
+ elif triton: # NVIDIA Triton Inference Server
575
+ LOGGER.info(f'Using {w} as Triton Inference Server...')
576
+ check_requirements('tritonclient[all]')
577
+ from utils.triton import TritonRemoteModel
578
+ model = TritonRemoteModel(url=w)
579
+ nhwc = model.runtime.startswith('tensorflow')
580
+ else:
581
+ raise NotImplementedError(f'ERROR: {w} is not a supported format')
582
+
583
+ # class names
584
+ if 'names' not in locals():
585
+ names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}
586
+ if names[0] == 'n01440764' and len(names) == 1000: # ImageNet
587
+ names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names
588
+
589
+ self.__dict__.update(locals()) # assign all variables to self
590
+
591
+ def forward(self, im, augment=False, visualize=False):
592
+ # YOLOv5 MultiBackend inference
593
+ b, ch, h, w = im.shape # batch, channel, height, width
594
+ if self.fp16 and im.dtype != torch.float16:
595
+ im = im.half() # to FP16
596
+ if self.nhwc:
597
+ im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)
598
+
599
+ if self.pt: # PyTorch
600
+ y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)
601
+ elif self.jit: # TorchScript
602
+ y = self.model(im)
603
+ elif self.dnn: # ONNX OpenCV DNN
604
+ im = im.cpu().numpy() # torch to numpy
605
+ self.net.setInput(im)
606
+ y = self.net.forward()
607
+ elif self.onnx: # ONNX Runtime
608
+ im = im.cpu().numpy() # torch to numpy
609
+ y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
610
+ elif self.xml: # OpenVINO
611
+ im = im.cpu().numpy() # FP32
612
+ y = list(self.executable_network([im]).values())
613
+ elif self.engine: # TensorRT
614
+ if self.dynamic and im.shape != self.bindings['images'].shape:
615
+ i = self.model.get_binding_index('images')
616
+ self.context.set_binding_shape(i, im.shape) # reshape if dynamic
617
+ self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)
618
+ for name in self.output_names:
619
+ i = self.model.get_binding_index(name)
620
+ self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))
621
+ s = self.bindings['images'].shape
622
+ assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}"
623
+ self.binding_addrs['images'] = int(im.data_ptr())
624
+ self.context.execute_v2(list(self.binding_addrs.values()))
625
+ y = [self.bindings[x].data for x in sorted(self.output_names)]
626
+ elif self.coreml: # CoreML
627
+ im = im.cpu().numpy()
628
+ im = Image.fromarray((im[0] * 255).astype('uint8'))
629
+ # im = im.resize((192, 320), Image.ANTIALIAS)
630
+ y = self.model.predict({'image': im}) # coordinates are xywh normalized
631
+ if 'confidence' in y:
632
+ box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels
633
+ conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)
634
+ y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)
635
+ else:
636
+ y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)
637
+ elif self.paddle: # PaddlePaddle
638
+ im = im.cpu().numpy().astype(np.float32)
639
+ self.input_handle.copy_from_cpu(im)
640
+ self.predictor.run()
641
+ y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
642
+ elif self.triton: # NVIDIA Triton Inference Server
643
+ y = self.model(im)
644
+ else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)
645
+ im = im.cpu().numpy()
646
+ if self.saved_model: # SavedModel
647
+ y = self.model(im, training=False) if self.keras else self.model(im)
648
+ elif self.pb: # GraphDef
649
+ y = self.frozen_func(x=self.tf.constant(im))
650
+ else: # Lite or Edge TPU
651
+ input = self.input_details[0]
652
+ int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model
653
+ if int8:
654
+ scale, zero_point = input['quantization']
655
+ im = (im / scale + zero_point).astype(np.uint8) # de-scale
656
+ self.interpreter.set_tensor(input['index'], im)
657
+ self.interpreter.invoke()
658
+ y = []
659
+ for output in self.output_details:
660
+ x = self.interpreter.get_tensor(output['index'])
661
+ if int8:
662
+ scale, zero_point = output['quantization']
663
+ x = (x.astype(np.float32) - zero_point) * scale # re-scale
664
+ y.append(x)
665
+ y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
666
+ y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels
667
+
668
+ if isinstance(y, (list, tuple)):
669
+ return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]
670
+ else:
671
+ return self.from_numpy(y)
672
+
673
+ def from_numpy(self, x):
674
+ return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x
675
+
676
+ def warmup(self, imgsz=(1, 3, 640, 640)):
677
+ # Warmup model by running inference once
678
+ warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton
679
+ if any(warmup_types) and (self.device.type != 'cpu' or self.triton):
680
+ im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
681
+ for _ in range(2 if self.jit else 1): #
682
+ self.forward(im) # warmup
683
+
684
+ @staticmethod
685
+ def _model_type(p='path/to/model.pt'):
686
+ # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx
687
+ # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]
688
+ from export import export_formats
689
+ from utils.downloads import is_url
690
+ sf = list(export_formats().Suffix) # export suffixes
691
+ if not is_url(p, check=False):
692
+ check_suffix(p, sf) # checks
693
+ url = urlparse(p) # if url may be Triton inference server
694
+ types = [s in Path(p).name for s in sf]
695
+ types[8] &= not types[9] # tflite &= not edgetpu
696
+ triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])
697
+ return types + [triton]
698
+
699
+ @staticmethod
700
+ def _load_metadata(f=Path('path/to/meta.yaml')):
701
+ # Load metadata from meta.yaml if it exists
702
+ if f.exists():
703
+ d = yaml_load(f)
704
+ return d['stride'], d['names'] # assign stride, names
705
+ return None, None
706
+
707
+
708
+ class AutoShape(nn.Module):
709
+ # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS
710
+ conf = 0.25 # NMS confidence threshold
711
+ iou = 0.45 # NMS IoU threshold
712
+ agnostic = False # NMS class-agnostic
713
+ multi_label = False # NMS multiple labels per box
714
+ classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs
715
+ max_det = 1000 # maximum number of detections per image
716
+ amp = False # Automatic Mixed Precision (AMP) inference
717
+
718
+ def __init__(self, model, verbose=True):
719
+ super().__init__()
720
+ if verbose:
721
+ LOGGER.info('Adding AutoShape... ')
722
+ copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes
723
+ self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance
724
+ self.pt = not self.dmb or model.pt # PyTorch model
725
+ self.model = model.eval()
726
+ if self.pt:
727
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
728
+ m.inplace = False # Detect.inplace=False for safe multithread inference
729
+ m.export = True # do not output loss values
730
+
731
+ def _apply(self, fn):
732
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
733
+ self = super()._apply(fn)
734
+ if self.pt:
735
+ m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect()
736
+ m.stride = fn(m.stride)
737
+ m.grid = list(map(fn, m.grid))
738
+ if isinstance(m.anchor_grid, list):
739
+ m.anchor_grid = list(map(fn, m.anchor_grid))
740
+ return self
741
+
742
+ @smart_inference_mode()
743
+ def forward(self, ims, size=640, augment=False, profile=False):
744
+ # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are:
745
+ # file: ims = 'data/images/zidane.jpg' # str or PosixPath
746
+ # URI: = 'https://ultralytics.com/images/zidane.jpg'
747
+ # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3)
748
+ # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3)
749
+ # numpy: = np.zeros((640,1280,3)) # HWC
750
+ # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values)
751
+ # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images
752
+
753
+ dt = (Profile(), Profile(), Profile())
754
+ with dt[0]:
755
+ if isinstance(size, int): # expand
756
+ size = (size, size)
757
+ p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param
758
+ autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference
759
+ if isinstance(ims, torch.Tensor): # torch
760
+ with amp.autocast(autocast):
761
+ return self.model(ims.to(p.device).type_as(p), augment=augment) # inference
762
+
763
+ # Pre-process
764
+ n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images
765
+ shape0, shape1, files = [], [], [] # image and inference shapes, filenames
766
+ for i, im in enumerate(ims):
767
+ f = f'image{i}' # filename
768
+ if isinstance(im, (str, Path)): # filename or uri
769
+ im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im
770
+ im = np.asarray(exif_transpose(im))
771
+ elif isinstance(im, Image.Image): # PIL Image
772
+ im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f
773
+ files.append(Path(f).with_suffix('.jpg').name)
774
+ if im.shape[0] < 5: # image in CHW
775
+ im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1)
776
+ im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input
777
+ s = im.shape[:2] # HWC
778
+ shape0.append(s) # image shape
779
+ g = max(size) / max(s) # gain
780
+ shape1.append([int(y * g) for y in s])
781
+ ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update
782
+ shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape
783
+ x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad
784
+ x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW
785
+ x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32
786
+
787
+ with amp.autocast(autocast):
788
+ # Inference
789
+ with dt[1]:
790
+ y = self.model(x, augment=augment) # forward
791
+
792
+ # Post-process
793
+ with dt[2]:
794
+ y = non_max_suppression(y if self.dmb else y[0],
795
+ self.conf,
796
+ self.iou,
797
+ self.classes,
798
+ self.agnostic,
799
+ self.multi_label,
800
+ max_det=self.max_det) # NMS
801
+ for i in range(n):
802
+ scale_boxes(shape1, y[i][:, :4], shape0[i])
803
+
804
+ return Detections(ims, y, files, dt, self.names, x.shape)
805
+
806
+
807
+ class Detections:
808
+ # YOLOv5 detections class for inference results
809
+ def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None):
810
+ super().__init__()
811
+ d = pred[0].device # device
812
+ gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations
813
+ self.ims = ims # list of images as numpy arrays
814
+ self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls)
815
+ self.names = names # class names
816
+ self.files = files # image filenames
817
+ self.times = times # profiling times
818
+ self.xyxy = pred # xyxy pixels
819
+ self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels
820
+ self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized
821
+ self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized
822
+ self.n = len(self.pred) # number of images (batch size)
823
+ self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms)
824
+ self.s = tuple(shape) # inference BCHW shape
825
+
826
+ def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')):
827
+ s, crops = '', []
828
+ for i, (im, pred) in enumerate(zip(self.ims, self.pred)):
829
+ s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string
830
+ if pred.shape[0]:
831
+ for c in pred[:, -1].unique():
832
+ n = (pred[:, -1] == c).sum() # detections per class
833
+ s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string
834
+ s = s.rstrip(', ')
835
+ if show or save or render or crop:
836
+ annotator = Annotator(im, example=str(self.names))
837
+ for *box, conf, cls in reversed(pred): # xyxy, confidence, class
838
+ label = f'{self.names[int(cls)]} {conf:.2f}'
839
+ if crop:
840
+ file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None
841
+ crops.append({
842
+ 'box': box,
843
+ 'conf': conf,
844
+ 'cls': cls,
845
+ 'label': label,
846
+ 'im': save_one_box(box, im, file=file, save=save)})
847
+ else: # all others
848
+ annotator.box_label(box, label if labels else '', color=colors(cls))
849
+ im = annotator.im
850
+ else:
851
+ s += '(no detections)'
852
+
853
+ im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np
854
+ if show:
855
+ if is_jupyter():
856
+ from IPython.display import display
857
+ display(im)
858
+ else:
859
+ im.show(self.files[i])
860
+ if save:
861
+ f = self.files[i]
862
+ im.save(save_dir / f) # save
863
+ if i == self.n - 1:
864
+ LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}")
865
+ if render:
866
+ self.ims[i] = np.asarray(im)
867
+ if pprint:
868
+ s = s.lstrip('\n')
869
+ return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t
870
+ if crop:
871
+ if save:
872
+ LOGGER.info(f'Saved results to {save_dir}\n')
873
+ return crops
874
+
875
+ @TryExcept('Showing images is not supported in this environment')
876
+ def show(self, labels=True):
877
+ self._run(show=True, labels=labels) # show results
878
+
879
+ def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False):
880
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir
881
+ self._run(save=True, labels=labels, save_dir=save_dir) # save results
882
+
883
+ def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False):
884
+ save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None
885
+ return self._run(crop=True, save=save, save_dir=save_dir) # crop results
886
+
887
+ def render(self, labels=True):
888
+ self._run(render=True, labels=labels) # render results
889
+ return self.ims
890
+
891
+ def pandas(self):
892
+ # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0])
893
+ new = copy(self) # return copy
894
+ ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns
895
+ cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns
896
+ for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]):
897
+ a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update
898
+ setattr(new, k, [pd.DataFrame(x, columns=c) for x in a])
899
+ return new
900
+
901
+ def tolist(self):
902
+ # return a list of Detections objects, i.e. 'for result in results.tolist():'
903
+ r = range(self.n) # iterable
904
+ x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r]
905
+ # for d in x:
906
+ # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']:
907
+ # setattr(d, k, getattr(d, k)[0]) # pop out of list
908
+ return x
909
+
910
+ def print(self):
911
+ LOGGER.info(self.__str__())
912
+
913
+ def __len__(self): # override len(results)
914
+ return self.n
915
+
916
+ def __str__(self): # override print(results)
917
+ return self._run(pprint=True) # print results
918
+
919
+ def __repr__(self):
920
+ return f'YOLOv5 {self.__class__} instance\n' + self.__str__()
921
+
922
+
923
+ class Proto(nn.Module):
924
+ # YOLOv5 mask Proto module for segmentation models
925
+ def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks
926
+ super().__init__()
927
+ self.cv1 = Conv(c1, c_, k=3)
928
+ self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
929
+ self.cv2 = Conv(c_, c_, k=3)
930
+ self.cv3 = Conv(c_, c2)
931
+
932
+ def forward(self, x):
933
+ return self.cv3(self.cv2(self.upsample(self.cv1(x))))
934
+
935
+
936
+ class Classify(nn.Module):
937
+ # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2)
938
+ def __init__(self,
939
+ c1,
940
+ c2,
941
+ k=1,
942
+ s=1,
943
+ p=None,
944
+ g=1,
945
+ dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability
946
+ super().__init__()
947
+ c_ = 1280 # efficientnet_b0 size
948
+ self.conv = Conv(c1, c_, k, s, autopad(k, p), g)
949
+ self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1)
950
+ self.drop = nn.Dropout(p=dropout_p, inplace=True)
951
+ self.linear = nn.Linear(c_, c2) # to x(b,c2)
952
+
953
+ def forward(self, x):
954
+ if isinstance(x, list):
955
+ x = torch.cat(x, 1)
956
+ return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
yolov5-code-main/models/experimental.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ Experimental modules
4
+ """
5
+ import math
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torch.nn as nn
10
+
11
+ from utils.downloads import attempt_download
12
+
13
+
14
+ class Sum(nn.Module):
15
+ # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
16
+ def __init__(self, n, weight=False): # n: number of inputs
17
+ super().__init__()
18
+ self.weight = weight # apply weights boolean
19
+ self.iter = range(n - 1) # iter object
20
+ if weight:
21
+ self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights
22
+
23
+ def forward(self, x):
24
+ y = x[0] # no weight
25
+ if self.weight:
26
+ w = torch.sigmoid(self.w) * 2
27
+ for i in self.iter:
28
+ y = y + x[i + 1] * w[i]
29
+ else:
30
+ for i in self.iter:
31
+ y = y + x[i + 1]
32
+ return y
33
+
34
+
35
+ class MixConv2d(nn.Module):
36
+ # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595
37
+ def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy
38
+ super().__init__()
39
+ n = len(k) # number of convolutions
40
+ if equal_ch: # equal c_ per group
41
+ i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices
42
+ c_ = [(i == g).sum() for g in range(n)] # intermediate channels
43
+ else: # equal weight.numel() per group
44
+ b = [c2] + [0] * n
45
+ a = np.eye(n + 1, n, k=-1)
46
+ a -= np.roll(a, 1, axis=1)
47
+ a *= np.array(k) ** 2
48
+ a[0] = 1
49
+ c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b
50
+
51
+ self.m = nn.ModuleList([
52
+ nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)])
53
+ self.bn = nn.BatchNorm2d(c2)
54
+ self.act = nn.SiLU()
55
+
56
+ def forward(self, x):
57
+ return self.act(self.bn(torch.cat([m(x) for m in self.m], 1)))
58
+
59
+
60
+ class Ensemble(nn.ModuleList):
61
+ # Ensemble of models
62
+ def __init__(self):
63
+ super().__init__()
64
+
65
+ def forward(self, x, augment=False, profile=False, visualize=False):
66
+ y = [module(x, augment, profile, visualize)[0] for module in self]
67
+ # y = torch.stack(y).max(0)[0] # max ensemble
68
+ # y = torch.stack(y).mean(0) # mean ensemble
69
+ y = torch.cat(y, 1) # nms ensemble
70
+ return y, None # inference, train output
71
+
72
+
73
+ def attempt_load(weights, device=None, inplace=True, fuse=True):
74
+ # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
75
+ from models.yolo import Detect, Model
76
+
77
+ model = Ensemble()
78
+ for w in weights if isinstance(weights, list) else [weights]:
79
+ ckpt = torch.load(attempt_download(w), map_location='cpu') # load
80
+ ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model
81
+
82
+ # Model compatibility updates
83
+ if not hasattr(ckpt, 'stride'):
84
+ ckpt.stride = torch.tensor([32.])
85
+ if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):
86
+ ckpt.names = dict(enumerate(ckpt.names)) # convert to dict
87
+
88
+ model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode
89
+
90
+ # Module compatibility updates
91
+ for m in model.modules():
92
+ t = type(m)
93
+ if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):
94
+ m.inplace = inplace # torch 1.7.0 compatibility
95
+ if t is Detect and not isinstance(m.anchor_grid, list):
96
+ delattr(m, 'anchor_grid')
97
+ setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)
98
+ elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):
99
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
100
+
101
+ # Return model
102
+ if len(model) == 1:
103
+ return model[-1]
104
+
105
+ # Return detection ensemble
106
+ print(f'Ensemble created with {weights}\n')
107
+ for k in 'names', 'nc', 'yaml':
108
+ setattr(model, k, getattr(model[0], k))
109
+ model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride
110
+ assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'
111
+ return model
yolov5-code-main/models/hub/anchors.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ # Default anchors for COCO data
3
+
4
+
5
+ # P5 -------------------------------------------------------------------------------------------------------------------
6
+ # P5-640:
7
+ anchors_p5_640:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+
13
+ # P6 -------------------------------------------------------------------------------------------------------------------
14
+ # P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387
15
+ anchors_p6_640:
16
+ - [9,11, 21,19, 17,41] # P3/8
17
+ - [43,32, 39,70, 86,64] # P4/16
18
+ - [65,131, 134,130, 120,265] # P5/32
19
+ - [282,180, 247,354, 512,387] # P6/64
20
+
21
+ # P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792
22
+ anchors_p6_1280:
23
+ - [19,27, 44,40, 38,94] # P3/8
24
+ - [96,68, 86,152, 180,137] # P4/16
25
+ - [140,301, 303,264, 238,542] # P5/32
26
+ - [436,615, 739,380, 925,792] # P6/64
27
+
28
+ # P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187
29
+ anchors_p6_1920:
30
+ - [28,41, 67,59, 57,141] # P3/8
31
+ - [144,103, 129,227, 270,205] # P4/16
32
+ - [209,452, 455,396, 358,812] # P5/32
33
+ - [653,922, 1109,570, 1387,1187] # P6/64
34
+
35
+
36
+ # P7 -------------------------------------------------------------------------------------------------------------------
37
+ # P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372
38
+ anchors_p7_640:
39
+ - [11,11, 13,30, 29,20] # P3/8
40
+ - [30,46, 61,38, 39,92] # P4/16
41
+ - [78,80, 146,66, 79,163] # P5/32
42
+ - [149,150, 321,143, 157,303] # P6/64
43
+ - [257,402, 359,290, 524,372] # P7/128
44
+
45
+ # P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818
46
+ anchors_p7_1280:
47
+ - [19,22, 54,36, 32,77] # P3/8
48
+ - [70,83, 138,71, 75,173] # P4/16
49
+ - [165,159, 148,334, 375,151] # P5/32
50
+ - [334,317, 251,626, 499,474] # P6/64
51
+ - [750,326, 534,814, 1079,818] # P7/128
52
+
53
+ # P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227
54
+ anchors_p7_1920:
55
+ - [29,34, 81,55, 47,115] # P3/8
56
+ - [105,124, 207,107, 113,259] # P4/16
57
+ - [247,238, 222,500, 563,227] # P5/32
58
+ - [501,476, 376,939, 749,711] # P6/64
59
+ - [1126,489, 801,1222, 1618,1227] # P7/128
yolov5-code-main/models/hub/yolov3-spp.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3-SPP head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, SPP, [512, [5, 9, 13]]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
yolov5-code-main/models/hub/yolov3-tiny.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,14, 23,27, 37,58] # P4/16
9
+ - [81,82, 135,169, 344,319] # P5/32
10
+
11
+ # YOLOv3-tiny backbone
12
+ backbone:
13
+ # [from, number, module, args]
14
+ [[-1, 1, Conv, [16, 3, 1]], # 0
15
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2
16
+ [-1, 1, Conv, [32, 3, 1]],
17
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4
18
+ [-1, 1, Conv, [64, 3, 1]],
19
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8
20
+ [-1, 1, Conv, [128, 3, 1]],
21
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16
22
+ [-1, 1, Conv, [256, 3, 1]],
23
+ [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32
24
+ [-1, 1, Conv, [512, 3, 1]],
25
+ [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11
26
+ [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12
27
+ ]
28
+
29
+ # YOLOv3-tiny head
30
+ head:
31
+ [[-1, 1, Conv, [1024, 3, 1]],
32
+ [-1, 1, Conv, [256, 1, 1]],
33
+ [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large)
34
+
35
+ [-2, 1, Conv, [128, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
38
+ [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium)
39
+
40
+ [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5)
41
+ ]
yolov5-code-main/models/hub/yolov3.yaml ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # darknet53 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [32, 3, 1]], # 0
16
+ [-1, 1, Conv, [64, 3, 2]], # 1-P1/2
17
+ [-1, 1, Bottleneck, [64]],
18
+ [-1, 1, Conv, [128, 3, 2]], # 3-P2/4
19
+ [-1, 2, Bottleneck, [128]],
20
+ [-1, 1, Conv, [256, 3, 2]], # 5-P3/8
21
+ [-1, 8, Bottleneck, [256]],
22
+ [-1, 1, Conv, [512, 3, 2]], # 7-P4/16
23
+ [-1, 8, Bottleneck, [512]],
24
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32
25
+ [-1, 4, Bottleneck, [1024]], # 10
26
+ ]
27
+
28
+ # YOLOv3 head
29
+ head:
30
+ [[-1, 1, Bottleneck, [1024, False]],
31
+ [-1, 1, Conv, [512, 1, 1]],
32
+ [-1, 1, Conv, [1024, 3, 1]],
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large)
35
+
36
+ [-2, 1, Conv, [256, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 8], 1, Concat, [1]], # cat backbone P4
39
+ [-1, 1, Bottleneck, [512, False]],
40
+ [-1, 1, Bottleneck, [512, False]],
41
+ [-1, 1, Conv, [256, 1, 1]],
42
+ [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium)
43
+
44
+ [-2, 1, Conv, [128, 1, 1]],
45
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
46
+ [[-1, 6], 1, Concat, [1]], # cat backbone P3
47
+ [-1, 1, Bottleneck, [256, False]],
48
+ [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small)
49
+
50
+ [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
51
+ ]
yolov5-code-main/models/hub/yolov5-bifpn.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 BiFPN head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/hub/yolov5-fpn.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 FPN head
28
+ head:
29
+ [[-1, 3, C3, [1024, False]], # 10 (P5/32-large)
30
+
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 3, C3, [512, False]], # 14 (P4/16-medium)
35
+
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
+ [-1, 1, Conv, [256, 1, 1]],
39
+ [-1, 3, C3, [256, False]], # 18 (P3/8-small)
40
+
41
+ [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
42
+ ]
yolov5-code-main/models/hub/yolov5-p2.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [1024]],
21
+ [-1, 1, SPPF, [1024, 5]], # 9
22
+ ]
23
+
24
+ # YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs
25
+ head:
26
+ [[-1, 1, Conv, [512, 1, 1]],
27
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
28
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
29
+ [-1, 3, C3, [512, False]], # 13
30
+
31
+ [-1, 1, Conv, [256, 1, 1]],
32
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
33
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
34
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
35
+
36
+ [-1, 1, Conv, [128, 1, 1]],
37
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
38
+ [[-1, 2], 1, Concat, [1]], # cat backbone P2
39
+ [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall)
40
+
41
+ [-1, 1, Conv, [128, 3, 2]],
42
+ [[-1, 18], 1, Concat, [1]], # cat head P3
43
+ [-1, 3, C3, [256, False]], # 24 (P3/8-small)
44
+
45
+ [-1, 1, Conv, [256, 3, 2]],
46
+ [[-1, 14], 1, Concat, [1]], # cat head P4
47
+ [-1, 3, C3, [512, False]], # 27 (P4/16-medium)
48
+
49
+ [-1, 1, Conv, [512, 3, 2]],
50
+ [[-1, 10], 1, Concat, [1]], # cat head P5
51
+ [-1, 3, C3, [1024, False]], # 30 (P5/32-large)
52
+
53
+ [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5)
54
+ ]
yolov5-code-main/models/hub/yolov5-p34.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2
13
+ [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4
14
+ [ -1, 3, C3, [ 128 ] ],
15
+ [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8
16
+ [ -1, 6, C3, [ 256 ] ],
17
+ [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16
18
+ [ -1, 9, C3, [ 512 ] ],
19
+ [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32
20
+ [ -1, 3, C3, [ 1024 ] ],
21
+ [ -1, 1, SPPF, [ 1024, 5 ] ], # 9
22
+ ]
23
+
24
+ # YOLOv5 v6.0 head with (P3, P4) outputs
25
+ head:
26
+ [ [ -1, 1, Conv, [ 512, 1, 1 ] ],
27
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
28
+ [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4
29
+ [ -1, 3, C3, [ 512, False ] ], # 13
30
+
31
+ [ -1, 1, Conv, [ 256, 1, 1 ] ],
32
+ [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ],
33
+ [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3
34
+ [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small)
35
+
36
+ [ -1, 1, Conv, [ 256, 3, 2 ] ],
37
+ [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4
38
+ [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium)
39
+
40
+ [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4)
41
+ ]
yolov5-code-main/models/hub/yolov5-p6.yaml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [768]],
21
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
+ [-1, 3, C3, [1024]],
23
+ [-1, 1, SPPF, [1024, 5]], # 11
24
+ ]
25
+
26
+ # YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs
27
+ head:
28
+ [[-1, 1, Conv, [768, 1, 1]],
29
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
30
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
31
+ [-1, 3, C3, [768, False]], # 15
32
+
33
+ [-1, 1, Conv, [512, 1, 1]],
34
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
35
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
36
+ [-1, 3, C3, [512, False]], # 19
37
+
38
+ [-1, 1, Conv, [256, 1, 1]],
39
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
40
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
41
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
42
+
43
+ [-1, 1, Conv, [256, 3, 2]],
44
+ [[-1, 20], 1, Concat, [1]], # cat head P4
45
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
46
+
47
+ [-1, 1, Conv, [512, 3, 2]],
48
+ [[-1, 16], 1, Concat, [1]], # cat head P5
49
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
50
+
51
+ [-1, 1, Conv, [768, 3, 2]],
52
+ [[-1, 12], 1, Concat, [1]], # cat head P6
53
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
54
+
55
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
56
+ ]
yolov5-code-main/models/hub/yolov5-p7.yaml ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors: 3 # AutoAnchor evolves 3 anchors per P output layer
8
+
9
+ # YOLOv5 v6.0 backbone
10
+ backbone:
11
+ # [from, number, module, args]
12
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
13
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
14
+ [-1, 3, C3, [128]],
15
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
16
+ [-1, 6, C3, [256]],
17
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
18
+ [-1, 9, C3, [512]],
19
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
20
+ [-1, 3, C3, [768]],
21
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
22
+ [-1, 3, C3, [1024]],
23
+ [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128
24
+ [-1, 3, C3, [1280]],
25
+ [-1, 1, SPPF, [1280, 5]], # 13
26
+ ]
27
+
28
+ # YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs
29
+ head:
30
+ [[-1, 1, Conv, [1024, 1, 1]],
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 10], 1, Concat, [1]], # cat backbone P6
33
+ [-1, 3, C3, [1024, False]], # 17
34
+
35
+ [-1, 1, Conv, [768, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
38
+ [-1, 3, C3, [768, False]], # 21
39
+
40
+ [-1, 1, Conv, [512, 1, 1]],
41
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
42
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
43
+ [-1, 3, C3, [512, False]], # 25
44
+
45
+ [-1, 1, Conv, [256, 1, 1]],
46
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
47
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
48
+ [-1, 3, C3, [256, False]], # 29 (P3/8-small)
49
+
50
+ [-1, 1, Conv, [256, 3, 2]],
51
+ [[-1, 26], 1, Concat, [1]], # cat head P4
52
+ [-1, 3, C3, [512, False]], # 32 (P4/16-medium)
53
+
54
+ [-1, 1, Conv, [512, 3, 2]],
55
+ [[-1, 22], 1, Concat, [1]], # cat head P5
56
+ [-1, 3, C3, [768, False]], # 35 (P5/32-large)
57
+
58
+ [-1, 1, Conv, [768, 3, 2]],
59
+ [[-1, 18], 1, Concat, [1]], # cat head P6
60
+ [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge)
61
+
62
+ [-1, 1, Conv, [1024, 3, 2]],
63
+ [[-1, 14], 1, Concat, [1]], # cat head P7
64
+ [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge)
65
+
66
+ [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7)
67
+ ]
yolov5-code-main/models/hub/yolov5-panet.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 PANet head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/hub/yolov5l6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
yolov5-code-main/models/hub/yolov5m6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
yolov5-code-main/models/hub/yolov5n6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.25 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
yolov5-code-main/models/hub/yolov5s-LeakyReLU.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model
6
+ depth_multiple: 0.33 # model depth multiple
7
+ width_multiple: 0.50 # layer channel multiple
8
+ anchors:
9
+ - [10,13, 16,30, 33,23] # P3/8
10
+ - [30,61, 62,45, 59,119] # P4/16
11
+ - [116,90, 156,198, 373,326] # P5/32
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [1024]],
25
+ [-1, 1, SPPF, [1024, 5]], # 9
26
+ ]
27
+
28
+ # YOLOv5 v6.0 head
29
+ head:
30
+ [[-1, 1, Conv, [512, 1, 1]],
31
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
32
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
33
+ [-1, 3, C3, [512, False]], # 13
34
+
35
+ [-1, 1, Conv, [256, 1, 1]],
36
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
37
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
38
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
39
+
40
+ [-1, 1, Conv, [256, 3, 2]],
41
+ [[-1, 14], 1, Concat, [1]], # cat head P4
42
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
43
+
44
+ [-1, 1, Conv, [512, 3, 2]],
45
+ [[-1, 10], 1, Concat, [1]], # cat head P5
46
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
47
+
48
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
49
+ ]
yolov5-code-main/models/hub/yolov5s-ghost.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3Ghost, [128]],
18
+ [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3Ghost, [256]],
20
+ [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3Ghost, [512]],
22
+ [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3Ghost, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, GhostConv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3Ghost, [512, False]], # 13
33
+
34
+ [-1, 1, GhostConv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, GhostConv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, GhostConv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/hub/yolov5s-transformer.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/hub/yolov5s6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.50 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
yolov5-code-main/models/hub/yolov5x6.yaml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.33 # model depth multiple
6
+ width_multiple: 1.25 # layer channel multiple
7
+ anchors:
8
+ - [19,27, 44,40, 38,94] # P3/8
9
+ - [96,68, 86,152, 180,137] # P4/16
10
+ - [140,301, 303,264, 238,542] # P5/32
11
+ - [436,615, 739,380, 925,792] # P6/64
12
+
13
+ # YOLOv5 v6.0 backbone
14
+ backbone:
15
+ # [from, number, module, args]
16
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
17
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
18
+ [-1, 3, C3, [128]],
19
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
20
+ [-1, 6, C3, [256]],
21
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
22
+ [-1, 9, C3, [512]],
23
+ [-1, 1, Conv, [768, 3, 2]], # 7-P5/32
24
+ [-1, 3, C3, [768]],
25
+ [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64
26
+ [-1, 3, C3, [1024]],
27
+ [-1, 1, SPPF, [1024, 5]], # 11
28
+ ]
29
+
30
+ # YOLOv5 v6.0 head
31
+ head:
32
+ [[-1, 1, Conv, [768, 1, 1]],
33
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
34
+ [[-1, 8], 1, Concat, [1]], # cat backbone P5
35
+ [-1, 3, C3, [768, False]], # 15
36
+
37
+ [-1, 1, Conv, [512, 1, 1]],
38
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
39
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
40
+ [-1, 3, C3, [512, False]], # 19
41
+
42
+ [-1, 1, Conv, [256, 1, 1]],
43
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
44
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
45
+ [-1, 3, C3, [256, False]], # 23 (P3/8-small)
46
+
47
+ [-1, 1, Conv, [256, 3, 2]],
48
+ [[-1, 20], 1, Concat, [1]], # cat head P4
49
+ [-1, 3, C3, [512, False]], # 26 (P4/16-medium)
50
+
51
+ [-1, 1, Conv, [512, 3, 2]],
52
+ [[-1, 16], 1, Concat, [1]], # cat head P5
53
+ [-1, 3, C3, [768, False]], # 29 (P5/32-large)
54
+
55
+ [-1, 1, Conv, [768, 3, 2]],
56
+ [[-1, 12], 1, Concat, [1]], # cat head P6
57
+ [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge)
58
+
59
+ [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6)
60
+ ]
yolov5-code-main/models/segment/yolov5l-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/segment/yolov5m-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/segment/yolov5n-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/segment/yolov5s-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.33 # model depth multiple
6
+ width_multiple: 0.5 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/segment/yolov5x-seg.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.33 # model depth multiple
6
+ width_multiple: 1.25 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/tf.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ TensorFlow, Keras and TFLite versions of YOLOv5
4
+ Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127
5
+
6
+ Usage:
7
+ $ python models/tf.py --weights yolov5s.pt
8
+
9
+ Export:
10
+ $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs
11
+ """
12
+
13
+ import argparse
14
+ import sys
15
+ from copy import deepcopy
16
+ from pathlib import Path
17
+
18
+ FILE = Path(__file__).resolve()
19
+ ROOT = FILE.parents[1] # YOLOv5 root directory
20
+ if str(ROOT) not in sys.path:
21
+ sys.path.append(str(ROOT)) # add ROOT to PATH
22
+ # ROOT = ROOT.relative_to(Path.cwd()) # relative
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+ import torch
27
+ import torch.nn as nn
28
+ from tensorflow import keras
29
+
30
+ from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv,
31
+ DWConvTranspose2d, Focus, autopad)
32
+ from models.experimental import MixConv2d, attempt_load
33
+ from models.yolo import Detect, Segment
34
+ from utils.activations import SiLU
35
+ from utils.general import LOGGER, make_divisible, print_args
36
+
37
+
38
+ class TFBN(keras.layers.Layer):
39
+ # TensorFlow BatchNormalization wrapper
40
+ def __init__(self, w=None):
41
+ super().__init__()
42
+ self.bn = keras.layers.BatchNormalization(
43
+ beta_initializer=keras.initializers.Constant(w.bias.numpy()),
44
+ gamma_initializer=keras.initializers.Constant(w.weight.numpy()),
45
+ moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()),
46
+ moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()),
47
+ epsilon=w.eps)
48
+
49
+ def call(self, inputs):
50
+ return self.bn(inputs)
51
+
52
+
53
+ class TFPad(keras.layers.Layer):
54
+ # Pad inputs in spatial dimensions 1 and 2
55
+ def __init__(self, pad):
56
+ super().__init__()
57
+ if isinstance(pad, int):
58
+ self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]])
59
+ else: # tuple/list
60
+ self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]])
61
+
62
+ def call(self, inputs):
63
+ return tf.pad(inputs, self.pad, mode='constant', constant_values=0)
64
+
65
+
66
+ class TFConv(keras.layers.Layer):
67
+ # Standard convolution
68
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
69
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
70
+ super().__init__()
71
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
72
+ # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding)
73
+ # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch
74
+ conv = keras.layers.Conv2D(
75
+ filters=c2,
76
+ kernel_size=k,
77
+ strides=s,
78
+ padding='SAME' if s == 1 else 'VALID',
79
+ use_bias=not hasattr(w, 'bn'),
80
+ kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
81
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
82
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
83
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
84
+ self.act = activations(w.act) if act else tf.identity
85
+
86
+ def call(self, inputs):
87
+ return self.act(self.bn(self.conv(inputs)))
88
+
89
+
90
+ class TFDWConv(keras.layers.Layer):
91
+ # Depthwise convolution
92
+ def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None):
93
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
94
+ super().__init__()
95
+ assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels'
96
+ conv = keras.layers.DepthwiseConv2D(
97
+ kernel_size=k,
98
+ depth_multiplier=c2 // c1,
99
+ strides=s,
100
+ padding='SAME' if s == 1 else 'VALID',
101
+ use_bias=not hasattr(w, 'bn'),
102
+ depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()),
103
+ bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy()))
104
+ self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv])
105
+ self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity
106
+ self.act = activations(w.act) if act else tf.identity
107
+
108
+ def call(self, inputs):
109
+ return self.act(self.bn(self.conv(inputs)))
110
+
111
+
112
+ class TFDWConvTranspose2d(keras.layers.Layer):
113
+ # Depthwise ConvTranspose2d
114
+ def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None):
115
+ # ch_in, ch_out, weights, kernel, stride, padding, groups
116
+ super().__init__()
117
+ assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels'
118
+ assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1'
119
+ weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy()
120
+ self.c1 = c1
121
+ self.conv = [
122
+ keras.layers.Conv2DTranspose(filters=1,
123
+ kernel_size=k,
124
+ strides=s,
125
+ padding='VALID',
126
+ output_padding=p2,
127
+ use_bias=True,
128
+ kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]),
129
+ bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)]
130
+
131
+ def call(self, inputs):
132
+ return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1]
133
+
134
+
135
+ class TFFocus(keras.layers.Layer):
136
+ # Focus wh information into c-space
137
+ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None):
138
+ # ch_in, ch_out, kernel, stride, padding, groups
139
+ super().__init__()
140
+ self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv)
141
+
142
+ def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c)
143
+ # inputs = inputs / 255 # normalize 0-255 to 0-1
144
+ inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]]
145
+ return self.conv(tf.concat(inputs, 3))
146
+
147
+
148
+ class TFBottleneck(keras.layers.Layer):
149
+ # Standard bottleneck
150
+ def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion
151
+ super().__init__()
152
+ c_ = int(c2 * e) # hidden channels
153
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
154
+ self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2)
155
+ self.add = shortcut and c1 == c2
156
+
157
+ def call(self, inputs):
158
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
159
+
160
+
161
+ class TFCrossConv(keras.layers.Layer):
162
+ # Cross Convolution
163
+ def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None):
164
+ super().__init__()
165
+ c_ = int(c2 * e) # hidden channels
166
+ self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1)
167
+ self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2)
168
+ self.add = shortcut and c1 == c2
169
+
170
+ def call(self, inputs):
171
+ return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs))
172
+
173
+
174
+ class TFConv2d(keras.layers.Layer):
175
+ # Substitution for PyTorch nn.Conv2D
176
+ def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None):
177
+ super().__init__()
178
+ assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument"
179
+ self.conv = keras.layers.Conv2D(filters=c2,
180
+ kernel_size=k,
181
+ strides=s,
182
+ padding='VALID',
183
+ use_bias=bias,
184
+ kernel_initializer=keras.initializers.Constant(
185
+ w.weight.permute(2, 3, 1, 0).numpy()),
186
+ bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None)
187
+
188
+ def call(self, inputs):
189
+ return self.conv(inputs)
190
+
191
+
192
+ class TFBottleneckCSP(keras.layers.Layer):
193
+ # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks
194
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
195
+ # ch_in, ch_out, number, shortcut, groups, expansion
196
+ super().__init__()
197
+ c_ = int(c2 * e) # hidden channels
198
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
199
+ self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2)
200
+ self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3)
201
+ self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4)
202
+ self.bn = TFBN(w.bn)
203
+ self.act = lambda x: keras.activations.swish(x)
204
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
205
+
206
+ def call(self, inputs):
207
+ y1 = self.cv3(self.m(self.cv1(inputs)))
208
+ y2 = self.cv2(inputs)
209
+ return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3))))
210
+
211
+
212
+ class TFC3(keras.layers.Layer):
213
+ # CSP Bottleneck with 3 convolutions
214
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
215
+ # ch_in, ch_out, number, shortcut, groups, expansion
216
+ super().__init__()
217
+ c_ = int(c2 * e) # hidden channels
218
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
219
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
220
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
221
+ self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)])
222
+
223
+ def call(self, inputs):
224
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
225
+
226
+
227
+ class TFC3x(keras.layers.Layer):
228
+ # 3 module with cross-convolutions
229
+ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None):
230
+ # ch_in, ch_out, number, shortcut, groups, expansion
231
+ super().__init__()
232
+ c_ = int(c2 * e) # hidden channels
233
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
234
+ self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2)
235
+ self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3)
236
+ self.m = keras.Sequential([
237
+ TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)])
238
+
239
+ def call(self, inputs):
240
+ return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3))
241
+
242
+
243
+ class TFSPP(keras.layers.Layer):
244
+ # Spatial pyramid pooling layer used in YOLOv3-SPP
245
+ def __init__(self, c1, c2, k=(5, 9, 13), w=None):
246
+ super().__init__()
247
+ c_ = c1 // 2 # hidden channels
248
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
249
+ self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2)
250
+ self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k]
251
+
252
+ def call(self, inputs):
253
+ x = self.cv1(inputs)
254
+ return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3))
255
+
256
+
257
+ class TFSPPF(keras.layers.Layer):
258
+ # Spatial pyramid pooling-Fast layer
259
+ def __init__(self, c1, c2, k=5, w=None):
260
+ super().__init__()
261
+ c_ = c1 // 2 # hidden channels
262
+ self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1)
263
+ self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2)
264
+ self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME')
265
+
266
+ def call(self, inputs):
267
+ x = self.cv1(inputs)
268
+ y1 = self.m(x)
269
+ y2 = self.m(y1)
270
+ return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3))
271
+
272
+
273
+ class TFDetect(keras.layers.Layer):
274
+ # TF YOLOv5 Detect layer
275
+ def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer
276
+ super().__init__()
277
+ self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32)
278
+ self.nc = nc # number of classes
279
+ self.no = nc + 5 # number of outputs per anchor
280
+ self.nl = len(anchors) # number of detection layers
281
+ self.na = len(anchors[0]) // 2 # number of anchors
282
+ self.grid = [tf.zeros(1)] * self.nl # init grid
283
+ self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32)
284
+ self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2])
285
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)]
286
+ self.training = False # set to False after building model
287
+ self.imgsz = imgsz
288
+ for i in range(self.nl):
289
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
290
+ self.grid[i] = self._make_grid(nx, ny)
291
+
292
+ def call(self, inputs):
293
+ z = [] # inference output
294
+ x = []
295
+ for i in range(self.nl):
296
+ x.append(self.m[i](inputs[i]))
297
+ # x(bs,20,20,255) to x(bs,3,20,20,85)
298
+ ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i]
299
+ x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no])
300
+
301
+ if not self.training: # inference
302
+ y = x[i]
303
+ grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5
304
+ anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4
305
+ xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy
306
+ wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid
307
+ # Normalize xywh to 0-1 to reduce calibration error
308
+ xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
309
+ wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32)
310
+ y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1)
311
+ z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no]))
312
+
313
+ return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),)
314
+
315
+ @staticmethod
316
+ def _make_grid(nx=20, ny=20):
317
+ # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
318
+ # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
319
+ xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny))
320
+ return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32)
321
+
322
+
323
+ class TFSegment(TFDetect):
324
+ # YOLOv5 Segment head for segmentation models
325
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None):
326
+ super().__init__(nc, anchors, ch, imgsz, w)
327
+ self.nm = nm # number of masks
328
+ self.npr = npr # number of protos
329
+ self.no = 5 + nc + self.nm # number of outputs per anchor
330
+ self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv
331
+ self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos
332
+ self.detect = TFDetect.call
333
+
334
+ def call(self, x):
335
+ p = self.proto(x[0])
336
+ # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos
337
+ p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160)
338
+ x = self.detect(self, x)
339
+ return (x, p) if self.training else (x[0], p)
340
+
341
+
342
+ class TFProto(keras.layers.Layer):
343
+
344
+ def __init__(self, c1, c_=256, c2=32, w=None):
345
+ super().__init__()
346
+ self.cv1 = TFConv(c1, c_, k=3, w=w.cv1)
347
+ self.upsample = TFUpsample(None, scale_factor=2, mode='nearest')
348
+ self.cv2 = TFConv(c_, c_, k=3, w=w.cv2)
349
+ self.cv3 = TFConv(c_, c2, w=w.cv3)
350
+
351
+ def call(self, inputs):
352
+ return self.cv3(self.cv2(self.upsample(self.cv1(inputs))))
353
+
354
+
355
+ class TFUpsample(keras.layers.Layer):
356
+ # TF version of torch.nn.Upsample()
357
+ def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w'
358
+ super().__init__()
359
+ assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2'
360
+ self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode)
361
+ # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode)
362
+ # with default arguments: align_corners=False, half_pixel_centers=False
363
+ # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x,
364
+ # size=(x.shape[1] * 2, x.shape[2] * 2))
365
+
366
+ def call(self, inputs):
367
+ return self.upsample(inputs)
368
+
369
+
370
+ class TFConcat(keras.layers.Layer):
371
+ # TF version of torch.concat()
372
+ def __init__(self, dimension=1, w=None):
373
+ super().__init__()
374
+ assert dimension == 1, 'convert only NCHW to NHWC concat'
375
+ self.d = 3
376
+
377
+ def call(self, inputs):
378
+ return tf.concat(inputs, self.d)
379
+
380
+
381
+ def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3)
382
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
383
+ anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
384
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
385
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
386
+
387
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
388
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
389
+ m_str = m
390
+ m = eval(m) if isinstance(m, str) else m # eval strings
391
+ for j, a in enumerate(args):
392
+ try:
393
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
394
+ except NameError:
395
+ pass
396
+
397
+ n = max(round(n * gd), 1) if n > 1 else n # depth gain
398
+ if m in [
399
+ nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv,
400
+ BottleneckCSP, C3, C3x]:
401
+ c1, c2 = ch[f], args[0]
402
+ c2 = make_divisible(c2 * gw, 8) if c2 != no else c2
403
+
404
+ args = [c1, c2, *args[1:]]
405
+ if m in [BottleneckCSP, C3, C3x]:
406
+ args.insert(2, n)
407
+ n = 1
408
+ elif m is nn.BatchNorm2d:
409
+ args = [ch[f]]
410
+ elif m is Concat:
411
+ c2 = sum(ch[-1 if x == -1 else x + 1] for x in f)
412
+ elif m in [Detect, Segment]:
413
+ args.append([ch[x + 1] for x in f])
414
+ if isinstance(args[1], int): # number of anchors
415
+ args[1] = [list(range(args[1] * 2))] * len(f)
416
+ if m is Segment:
417
+ args[3] = make_divisible(args[3] * gw, 8)
418
+ args.append(imgsz)
419
+ else:
420
+ c2 = ch[f]
421
+
422
+ tf_m = eval('TF' + m_str.replace('nn.', ''))
423
+ m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \
424
+ else tf_m(*args, w=model.model[i]) # module
425
+
426
+ torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
427
+ t = str(m)[8:-2].replace('__main__.', '') # module type
428
+ np = sum(x.numel() for x in torch_m_.parameters()) # number params
429
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
430
+ LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print
431
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
432
+ layers.append(m_)
433
+ ch.append(c2)
434
+ return keras.Sequential(layers), sorted(save)
435
+
436
+
437
+ class TFModel:
438
+ # TF YOLOv5 model
439
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes
440
+ super().__init__()
441
+ if isinstance(cfg, dict):
442
+ self.yaml = cfg # model dict
443
+ else: # is *.yaml
444
+ import yaml # for torch hub
445
+ self.yaml_file = Path(cfg).name
446
+ with open(cfg) as f:
447
+ self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict
448
+
449
+ # Define model
450
+ if nc and nc != self.yaml['nc']:
451
+ LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}")
452
+ self.yaml['nc'] = nc # override yaml value
453
+ self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz)
454
+
455
+ def predict(self,
456
+ inputs,
457
+ tf_nms=False,
458
+ agnostic_nms=False,
459
+ topk_per_class=100,
460
+ topk_all=100,
461
+ iou_thres=0.45,
462
+ conf_thres=0.25):
463
+ y = [] # outputs
464
+ x = inputs
465
+ for m in self.model.layers:
466
+ if m.f != -1: # if not from previous layer
467
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
468
+
469
+ x = m(x) # run
470
+ y.append(x if m.i in self.savelist else None) # save output
471
+
472
+ # Add TensorFlow NMS
473
+ if tf_nms:
474
+ boxes = self._xywh2xyxy(x[0][..., :4])
475
+ probs = x[0][:, :, 4:5]
476
+ classes = x[0][:, :, 5:]
477
+ scores = probs * classes
478
+ if agnostic_nms:
479
+ nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres)
480
+ else:
481
+ boxes = tf.expand_dims(boxes, 2)
482
+ nms = tf.image.combined_non_max_suppression(boxes,
483
+ scores,
484
+ topk_per_class,
485
+ topk_all,
486
+ iou_thres,
487
+ conf_thres,
488
+ clip_boxes=False)
489
+ return (nms,)
490
+ return x # output [1,6300,85] = [xywh, conf, class0, class1, ...]
491
+ # x = x[0] # [x(1,6300,85), ...] to x(6300,85)
492
+ # xywh = x[..., :4] # x(6300,4) boxes
493
+ # conf = x[..., 4:5] # x(6300,1) confidences
494
+ # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes
495
+ # return tf.concat([conf, cls, xywh], 1)
496
+
497
+ @staticmethod
498
+ def _xywh2xyxy(xywh):
499
+ # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
500
+ x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1)
501
+ return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1)
502
+
503
+
504
+ class AgnosticNMS(keras.layers.Layer):
505
+ # TF Agnostic NMS
506
+ def call(self, input, topk_all, iou_thres, conf_thres):
507
+ # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450
508
+ return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres),
509
+ input,
510
+ fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32),
511
+ name='agnostic_nms')
512
+
513
+ @staticmethod
514
+ def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS
515
+ boxes, classes, scores = x
516
+ class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32)
517
+ scores_inp = tf.reduce_max(scores, -1)
518
+ selected_inds = tf.image.non_max_suppression(boxes,
519
+ scores_inp,
520
+ max_output_size=topk_all,
521
+ iou_threshold=iou_thres,
522
+ score_threshold=conf_thres)
523
+ selected_boxes = tf.gather(boxes, selected_inds)
524
+ padded_boxes = tf.pad(selected_boxes,
525
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]],
526
+ mode='CONSTANT',
527
+ constant_values=0.0)
528
+ selected_scores = tf.gather(scores_inp, selected_inds)
529
+ padded_scores = tf.pad(selected_scores,
530
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
531
+ mode='CONSTANT',
532
+ constant_values=-1.0)
533
+ selected_classes = tf.gather(class_inds, selected_inds)
534
+ padded_classes = tf.pad(selected_classes,
535
+ paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]],
536
+ mode='CONSTANT',
537
+ constant_values=-1.0)
538
+ valid_detections = tf.shape(selected_inds)[0]
539
+ return padded_boxes, padded_scores, padded_classes, valid_detections
540
+
541
+
542
+ def activations(act=nn.SiLU):
543
+ # Returns TF activation from input PyTorch activation
544
+ if isinstance(act, nn.LeakyReLU):
545
+ return lambda x: keras.activations.relu(x, alpha=0.1)
546
+ elif isinstance(act, nn.Hardswish):
547
+ return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667
548
+ elif isinstance(act, (nn.SiLU, SiLU)):
549
+ return lambda x: keras.activations.swish(x)
550
+ else:
551
+ raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}')
552
+
553
+
554
+ def representative_dataset_gen(dataset, ncalib=100):
555
+ # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays
556
+ for n, (path, img, im0s, vid_cap, string) in enumerate(dataset):
557
+ im = np.transpose(img, [1, 2, 0])
558
+ im = np.expand_dims(im, axis=0).astype(np.float32)
559
+ im /= 255
560
+ yield [im]
561
+ if n >= ncalib:
562
+ break
563
+
564
+
565
+ def run(
566
+ weights=ROOT / 'yolov5s.pt', # weights path
567
+ imgsz=(640, 640), # inference size h,w
568
+ batch_size=1, # batch size
569
+ dynamic=False, # dynamic batch size
570
+ ):
571
+ # PyTorch model
572
+ im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image
573
+ model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False)
574
+ _ = model(im) # inference
575
+ model.info()
576
+
577
+ # TensorFlow model
578
+ im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image
579
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
580
+ _ = tf_model.predict(im) # inference
581
+
582
+ # Keras model
583
+ im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
584
+ keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im))
585
+ keras_model.summary()
586
+
587
+ LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.')
588
+
589
+
590
+ def parse_opt():
591
+ parser = argparse.ArgumentParser()
592
+ parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path')
593
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
594
+ parser.add_argument('--batch-size', type=int, default=1, help='batch size')
595
+ parser.add_argument('--dynamic', action='store_true', help='dynamic batch size')
596
+ opt = parser.parse_args()
597
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
598
+ print_args(vars(opt))
599
+ return opt
600
+
601
+
602
+ def main(opt):
603
+ run(**vars(opt))
604
+
605
+
606
+ if __name__ == '__main__':
607
+ opt = parse_opt()
608
+ main(opt)
yolov5-code-main/models/yolo.py ADDED
@@ -0,0 +1,400 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+ """
3
+ YOLO-specific modules
4
+
5
+ Usage:
6
+ $ python models/yolo.py --cfg yolov5s.yaml
7
+ """
8
+
9
+ import argparse
10
+ import contextlib
11
+ import os
12
+ import platform
13
+ import sys
14
+ from copy import deepcopy
15
+ from pathlib import Path
16
+
17
+ FILE = Path(__file__).resolve()
18
+ ROOT = FILE.parents[1] # YOLOv5 root directory
19
+ if str(ROOT) not in sys.path:
20
+ sys.path.append(str(ROOT)) # add ROOT to PATH
21
+ if platform.system() != 'Windows':
22
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
23
+
24
+ from models.common import *
25
+ from models.experimental import *
26
+ from utils.autoanchor import check_anchor_order
27
+ from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args
28
+ from utils.plots import feature_visualization
29
+ from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device,
30
+ time_sync)
31
+
32
+ try:
33
+ import thop # for FLOPs computation
34
+ except ImportError:
35
+ thop = None
36
+
37
+
38
+ class Detect(nn.Module):
39
+ # YOLOv5 Detect head for detection models
40
+ stride = None # strides computed during build
41
+ dynamic = False # force grid reconstruction
42
+ export = False # export mode
43
+
44
+ def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer
45
+ super().__init__()
46
+ self.nc = nc # number of classes
47
+ self.no = nc + 5 # number of outputs per anchor
48
+ self.nl = len(anchors) # number of detection layers
49
+ self.na = len(anchors[0]) // 2 # number of anchors
50
+ self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid
51
+ self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid
52
+ self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2)
53
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
54
+ self.inplace = inplace # use inplace ops (e.g. slice assignment)
55
+
56
+ def forward(self, x):
57
+ z = [] # inference output
58
+ for i in range(self.nl):
59
+ x[i] = self.m[i](x[i]) # conv
60
+ bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
61
+ x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
62
+
63
+ if not self.training: # inference
64
+ if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]:
65
+ self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i)
66
+
67
+ if isinstance(self, Segment): # (boxes + masks)
68
+ xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4)
69
+ xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy
70
+ wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh
71
+ y = torch.cat((xy, wh, conf.sigmoid(), mask), 4)
72
+ else: # Detect (boxes only)
73
+ xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4)
74
+ xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy
75
+ wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh
76
+ y = torch.cat((xy, wh, conf), 4)
77
+ z.append(y.view(bs, self.na * nx * ny, self.no))
78
+
79
+ return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x)
80
+
81
+ def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')):
82
+ d = self.anchors[i].device
83
+ t = self.anchors[i].dtype
84
+ shape = 1, self.na, ny, nx, 2 # grid shape
85
+ y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t)
86
+ yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility
87
+ grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5
88
+ anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape)
89
+ return grid, anchor_grid
90
+
91
+
92
+ class Segment(Detect):
93
+ # YOLOv5 Segment head for segmentation models
94
+ def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True):
95
+ super().__init__(nc, anchors, ch, inplace)
96
+ self.nm = nm # number of masks
97
+ self.npr = npr # number of protos
98
+ self.no = 5 + nc + self.nm # number of outputs per anchor
99
+ self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
100
+ self.proto = Proto(ch[0], self.npr, self.nm) # protos
101
+ self.detect = Detect.forward
102
+
103
+ def forward(self, x):
104
+ p = self.proto(x[0])
105
+ x = self.detect(self, x)
106
+ return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1])
107
+
108
+
109
+ class BaseModel(nn.Module):
110
+ # YOLOv5 base model
111
+ def forward(self, x, profile=False, visualize=False):
112
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
113
+
114
+ def _forward_once(self, x, profile=False, visualize=False):
115
+ y, dt = [], [] # outputs
116
+ for m in self.model:
117
+ if m.f != -1: # if not from previous layer
118
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
119
+ if profile:
120
+ self._profile_one_layer(m, x, dt)
121
+ x = m(x) # run
122
+ y.append(x if m.i in self.save else None) # save output
123
+ if visualize:
124
+ feature_visualization(x, m.type, m.i, save_dir=visualize)
125
+ return x
126
+
127
+ def _profile_one_layer(self, m, x, dt):
128
+ c = m == self.model[-1] # is final layer, copy input as inplace fix
129
+ o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs
130
+ t = time_sync()
131
+ for _ in range(10):
132
+ m(x.copy() if c else x)
133
+ dt.append((time_sync() - t) * 100)
134
+ if m == self.model[0]:
135
+ LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module")
136
+ LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}')
137
+ if c:
138
+ LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total")
139
+
140
+ def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
141
+ LOGGER.info('Fusing layers... ')
142
+ for m in self.model.modules():
143
+ if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'):
144
+ m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
145
+ delattr(m, 'bn') # remove batchnorm
146
+ m.forward = m.forward_fuse # update forward
147
+ self.info()
148
+ return self
149
+
150
+ def info(self, verbose=False, img_size=640): # print model information
151
+ model_info(self, verbose, img_size)
152
+
153
+ def _apply(self, fn):
154
+ # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers
155
+ self = super()._apply(fn)
156
+ m = self.model[-1] # Detect()
157
+ if isinstance(m, (Detect, Segment)):
158
+ m.stride = fn(m.stride)
159
+ m.grid = list(map(fn, m.grid))
160
+ if isinstance(m.anchor_grid, list):
161
+ m.anchor_grid = list(map(fn, m.anchor_grid))
162
+ return self
163
+
164
+
165
+ class DetectionModel(BaseModel):
166
+ # YOLOv5 detection model
167
+ def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
168
+ super().__init__()
169
+ if isinstance(cfg, dict):
170
+ self.yaml = cfg # model dict
171
+ else: # is *.yaml
172
+ import yaml # for torch hub
173
+ self.yaml_file = Path(cfg).name
174
+ with open(cfg, encoding='ascii', errors='ignore') as f:
175
+ self.yaml = yaml.safe_load(f) # model dict
176
+
177
+ # Define model
178
+ ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
179
+ if nc and nc != self.yaml['nc']:
180
+ LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
181
+ self.yaml['nc'] = nc # override yaml value
182
+ if anchors:
183
+ LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
184
+ self.yaml['anchors'] = round(anchors) # override yaml value
185
+ self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
186
+ self.names = [str(i) for i in range(self.yaml['nc'])] # default names
187
+ self.inplace = self.yaml.get('inplace', True)
188
+
189
+ # Build strides, anchors
190
+ m = self.model[-1] # Detect()
191
+ if isinstance(m, (Detect, Segment)):
192
+ s = 256 # 2x min stride
193
+ m.inplace = self.inplace
194
+ forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x)
195
+ m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward
196
+ check_anchor_order(m)
197
+ m.anchors /= m.stride.view(-1, 1, 1)
198
+ self.stride = m.stride
199
+ self._initialize_biases() # only run once
200
+
201
+ # Init weights, biases
202
+ initialize_weights(self)
203
+ self.info()
204
+ LOGGER.info('')
205
+
206
+ def forward(self, x, augment=False, profile=False, visualize=False):
207
+ if augment:
208
+ return self._forward_augment(x) # augmented inference, None
209
+ return self._forward_once(x, profile, visualize) # single-scale inference, train
210
+
211
+ def _forward_augment(self, x):
212
+ img_size = x.shape[-2:] # height, width
213
+ s = [1, 0.83, 0.67] # scales
214
+ f = [None, 3, None] # flips (2-ud, 3-lr)
215
+ y = [] # outputs
216
+ for si, fi in zip(s, f):
217
+ xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
218
+ yi = self._forward_once(xi)[0] # forward
219
+ # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
220
+ yi = self._descale_pred(yi, fi, si, img_size)
221
+ y.append(yi)
222
+ y = self._clip_augmented(y) # clip augmented tails
223
+ return torch.cat(y, 1), None # augmented inference, train
224
+
225
+ def _descale_pred(self, p, flips, scale, img_size):
226
+ # de-scale predictions following augmented inference (inverse operation)
227
+ if self.inplace:
228
+ p[..., :4] /= scale # de-scale
229
+ if flips == 2:
230
+ p[..., 1] = img_size[0] - p[..., 1] # de-flip ud
231
+ elif flips == 3:
232
+ p[..., 0] = img_size[1] - p[..., 0] # de-flip lr
233
+ else:
234
+ x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale
235
+ if flips == 2:
236
+ y = img_size[0] - y # de-flip ud
237
+ elif flips == 3:
238
+ x = img_size[1] - x # de-flip lr
239
+ p = torch.cat((x, y, wh, p[..., 4:]), -1)
240
+ return p
241
+
242
+ def _clip_augmented(self, y):
243
+ # Clip YOLOv5 augmented inference tails
244
+ nl = self.model[-1].nl # number of detection layers (P3-P5)
245
+ g = sum(4 ** x for x in range(nl)) # grid points
246
+ e = 1 # exclude layer count
247
+ i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices
248
+ y[0] = y[0][:, :-i] # large
249
+ i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices
250
+ y[-1] = y[-1][:, i:] # small
251
+ return y
252
+
253
+ def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
254
+ # https://arxiv.org/abs/1708.02002 section 3.3
255
+ # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
256
+ m = self.model[-1] # Detect() module
257
+ for mi, s in zip(m.m, m.stride): # from
258
+ b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
259
+ b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
260
+ b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls
261
+ mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
262
+
263
+
264
+ Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility
265
+
266
+
267
+ class SegmentationModel(DetectionModel):
268
+ # YOLOv5 segmentation model
269
+ def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):
270
+ super().__init__(cfg, ch, nc, anchors)
271
+
272
+
273
+ class ClassificationModel(BaseModel):
274
+ # YOLOv5 classification model
275
+ def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index
276
+ super().__init__()
277
+ self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg)
278
+
279
+ def _from_detection_model(self, model, nc=1000, cutoff=10):
280
+ # Create a YOLOv5 classification model from a YOLOv5 detection model
281
+ if isinstance(model, DetectMultiBackend):
282
+ model = model.model # unwrap DetectMultiBackend
283
+ model.model = model.model[:cutoff] # backbone
284
+ m = model.model[-1] # last layer
285
+ ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module
286
+ c = Classify(ch, nc) # Classify()
287
+ c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type
288
+ model.model[-1] = c # replace
289
+ self.model = model.model
290
+ self.stride = model.stride
291
+ self.save = []
292
+ self.nc = nc
293
+
294
+ def _from_yaml(self, cfg):
295
+ # Create a YOLOv5 classification model from a *.yaml file
296
+ self.model = None
297
+
298
+
299
+ def parse_model(d, ch): # model_dict, input_channels(3)
300
+ # Parse a YOLOv5 model.yaml dictionary
301
+ LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}")
302
+ anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation')
303
+ if act:
304
+ Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU()
305
+ LOGGER.info(f"{colorstr('activation:')} {act}") # print
306
+ na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
307
+ no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
308
+
309
+ layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
310
+ for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
311
+ m = eval(m) if isinstance(m, str) else m # eval strings
312
+ for j, a in enumerate(args):
313
+ with contextlib.suppress(NameError):
314
+ args[j] = eval(a) if isinstance(a, str) else a # eval strings
315
+
316
+ n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain
317
+ if m in {
318
+ Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
319
+ BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, C2f}:
320
+ c1, c2 = ch[f], args[0]
321
+ if c2 != no: # if not output
322
+ c2 = make_divisible(c2 * gw, 8)
323
+
324
+ args = [c1, c2, *args[1:]]
325
+ if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x, C2f}:
326
+ args.insert(2, n) # number of repeats
327
+ n = 1
328
+ elif m is nn.BatchNorm2d:
329
+ args = [ch[f]]
330
+ elif m is Concat:
331
+ c2 = sum(ch[x] for x in f)
332
+ # TODO: channel, gw, gd
333
+ elif m in {Detect, Segment}:
334
+ args.append([ch[x] for x in f])
335
+ if isinstance(args[1], int): # number of anchors
336
+ args[1] = [list(range(args[1] * 2))] * len(f)
337
+ if m is Segment:
338
+ args[3] = make_divisible(args[3] * gw, 8)
339
+ elif m is Contract:
340
+ c2 = ch[f] * args[0] ** 2
341
+ elif m is Expand:
342
+ c2 = ch[f] // args[0] ** 2
343
+ elif m is SE:
344
+ c1 = ch[f]
345
+ c2 = args[0]
346
+ if c2 != no: # if not output
347
+ c2 = make_divisible(c2 * gw, 8)
348
+ args = [c1, args[1]]
349
+ elif m is MobileNetV3:
350
+ c2 = args[0]
351
+ args = args[1:]
352
+ else:
353
+ c2 = ch[f]
354
+
355
+ m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module
356
+ t = str(m)[8:-2].replace('__main__.', '') # module type
357
+ np = sum(x.numel() for x in m_.parameters()) # number params
358
+ m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
359
+ LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print
360
+ save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
361
+ layers.append(m_)
362
+ if i == 0:
363
+ ch = []
364
+ ch.append(c2)
365
+ return nn.Sequential(*layers), sorted(save)
366
+
367
+
368
+ if __name__ == '__main__':
369
+ parser = argparse.ArgumentParser()
370
+ parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
371
+ parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs')
372
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
373
+ parser.add_argument('--profile', action='store_true', help='profile model speed')
374
+ parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer')
375
+ parser.add_argument('--test', action='store_true', help='test all yolo*.yaml')
376
+ opt = parser.parse_args()
377
+ opt.cfg = check_yaml(opt.cfg) # check YAML
378
+ print_args(vars(opt))
379
+ device = select_device(opt.device)
380
+
381
+ # Create model
382
+ im = torch.rand(opt.batch_size, 3, 640, 640).to(device)
383
+ model = Model(opt.cfg).to(device)
384
+
385
+ # Options
386
+ if opt.line_profile: # profile layer by layer
387
+ model(im, profile=True)
388
+
389
+ elif opt.profile: # profile forward-backward
390
+ results = profile(input=im, ops=[model], n=3)
391
+
392
+ elif opt.test: # test all models
393
+ for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'):
394
+ try:
395
+ _ = Model(cfg)
396
+ except Exception as e:
397
+ print(f'Error in {cfg}: {e}')
398
+
399
+ else: # report fused model summary
400
+ model.fuse()
yolov5-code-main/models/yolov5l.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 1.0 # model depth multiple
6
+ width_multiple: 1.0 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]
yolov5-code-main/models/yolov5m.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, GPL-3.0 license
2
+
3
+ # Parameters
4
+ nc: 80 # number of classes
5
+ depth_multiple: 0.67 # model depth multiple
6
+ width_multiple: 0.75 # layer channel multiple
7
+ anchors:
8
+ - [10,13, 16,30, 33,23] # P3/8
9
+ - [30,61, 62,45, 59,119] # P4/16
10
+ - [116,90, 156,198, 373,326] # P5/32
11
+
12
+ # YOLOv5 v6.0 backbone
13
+ backbone:
14
+ # [from, number, module, args]
15
+ [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
16
+ [-1, 1, Conv, [128, 3, 2]], # 1-P2/4
17
+ [-1, 3, C3, [128]],
18
+ [-1, 1, Conv, [256, 3, 2]], # 3-P3/8
19
+ [-1, 6, C3, [256]],
20
+ [-1, 1, Conv, [512, 3, 2]], # 5-P4/16
21
+ [-1, 9, C3, [512]],
22
+ [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
23
+ [-1, 3, C3, [1024]],
24
+ [-1, 1, SPPF, [1024, 5]], # 9
25
+ ]
26
+
27
+ # YOLOv5 v6.0 head
28
+ head:
29
+ [[-1, 1, Conv, [512, 1, 1]],
30
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
31
+ [[-1, 6], 1, Concat, [1]], # cat backbone P4
32
+ [-1, 3, C3, [512, False]], # 13
33
+
34
+ [-1, 1, Conv, [256, 1, 1]],
35
+ [-1, 1, nn.Upsample, [None, 2, 'nearest']],
36
+ [[-1, 4], 1, Concat, [1]], # cat backbone P3
37
+ [-1, 3, C3, [256, False]], # 17 (P3/8-small)
38
+
39
+ [-1, 1, Conv, [256, 3, 2]],
40
+ [[-1, 14], 1, Concat, [1]], # cat head P4
41
+ [-1, 3, C3, [512, False]], # 20 (P4/16-medium)
42
+
43
+ [-1, 1, Conv, [512, 3, 2]],
44
+ [[-1, 10], 1, Concat, [1]], # cat head P5
45
+ [-1, 3, C3, [1024, False]], # 23 (P5/32-large)
46
+
47
+ [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
48
+ ]