hololens commited on
Commit
016e2e5
·
1 Parent(s): d803dcf
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. iteach_toolkit/DHYOLO/__init__.py +1 -0
  2. iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-310.pyc +0 -0
  3. iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-311.pyc +0 -0
  4. iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-312.pyc +0 -0
  5. iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-38.pyc +0 -0
  6. iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-39.pyc +0 -0
  7. iteach_toolkit/DHYOLO/__pycache__/detect.cpython-310.pyc +0 -0
  8. iteach_toolkit/DHYOLO/__pycache__/detect.cpython-311.pyc +0 -0
  9. iteach_toolkit/DHYOLO/__pycache__/detect.cpython-312.pyc +0 -0
  10. iteach_toolkit/DHYOLO/__pycache__/detect.cpython-38.pyc +0 -0
  11. iteach_toolkit/DHYOLO/__pycache__/detect.cpython-39.pyc +0 -0
  12. iteach_toolkit/DHYOLO/__pycache__/export.cpython-310.pyc +0 -0
  13. iteach_toolkit/DHYOLO/__pycache__/export.cpython-311.pyc +0 -0
  14. iteach_toolkit/DHYOLO/__pycache__/export.cpython-312.pyc +0 -0
  15. iteach_toolkit/DHYOLO/__pycache__/export.cpython-38.pyc +0 -0
  16. iteach_toolkit/DHYOLO/__pycache__/export.cpython-39.pyc +0 -0
  17. iteach_toolkit/DHYOLO/__pycache__/hubconf.cpython-39.pyc +0 -0
  18. iteach_toolkit/DHYOLO/__pycache__/inference.cpython-310.pyc +0 -0
  19. iteach_toolkit/DHYOLO/__pycache__/load_model.cpython-310.pyc +0 -0
  20. iteach_toolkit/DHYOLO/__pycache__/load_model.cpython-39.pyc +0 -0
  21. iteach_toolkit/DHYOLO/__pycache__/model.cpython-310.pyc +0 -0
  22. iteach_toolkit/DHYOLO/__pycache__/model.cpython-311.pyc +0 -0
  23. iteach_toolkit/DHYOLO/__pycache__/model.cpython-312.pyc +0 -0
  24. iteach_toolkit/DHYOLO/__pycache__/model.cpython-38.pyc +0 -0
  25. iteach_toolkit/DHYOLO/__pycache__/model.cpython-39.pyc +0 -0
  26. iteach_toolkit/DHYOLO/detect.py +290 -0
  27. iteach_toolkit/DHYOLO/export.py +1537 -0
  28. iteach_toolkit/DHYOLO/hubconf.py +510 -0
  29. iteach_toolkit/DHYOLO/model.py +174 -0
  30. iteach_toolkit/DHYOLO/models/__init__.py +0 -0
  31. iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-310.pyc +0 -0
  32. iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-311.pyc +0 -0
  33. iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-312.pyc +0 -0
  34. iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-38.pyc +0 -0
  35. iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-39.pyc +0 -0
  36. iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-310.pyc +0 -0
  37. iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-311.pyc +0 -0
  38. iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-312.pyc +0 -0
  39. iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-38.pyc +0 -0
  40. iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-39.pyc +0 -0
  41. iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-310.pyc +0 -0
  42. iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-311.pyc +0 -0
  43. iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-312.pyc +0 -0
  44. iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-38.pyc +0 -0
  45. iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-39.pyc +0 -0
  46. iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-310.pyc +0 -0
  47. iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-311.pyc +0 -0
  48. iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-312.pyc +0 -0
  49. iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-38.pyc +0 -0
  50. iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-39.pyc +0 -0
iteach_toolkit/DHYOLO/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .model import DHYOLODetector
iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (231 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (210 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (201 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (235 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (178 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/detect.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/detect.cpython-311.pyc ADDED
Binary file (20.6 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/detect.cpython-312.pyc ADDED
Binary file (18.1 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/detect.cpython-38.pyc ADDED
Binary file (10.6 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/detect.cpython-39.pyc ADDED
Binary file (10.6 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/export.cpython-310.pyc ADDED
Binary file (58.1 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/export.cpython-311.pyc ADDED
Binary file (87 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/export.cpython-312.pyc ADDED
Binary file (82.3 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/export.cpython-38.pyc ADDED
Binary file (58 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/export.cpython-39.pyc ADDED
Binary file (58.1 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/hubconf.cpython-39.pyc ADDED
Binary file (22.4 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/inference.cpython-310.pyc ADDED
Binary file (350 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/load_model.cpython-310.pyc ADDED
Binary file (629 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/load_model.cpython-39.pyc ADDED
Binary file (667 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/model.cpython-310.pyc ADDED
Binary file (806 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/model.cpython-311.pyc ADDED
Binary file (922 Bytes). View file
 
iteach_toolkit/DHYOLO/__pycache__/model.cpython-312.pyc ADDED
Binary file (2.8 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/model.cpython-38.pyc ADDED
Binary file (5.38 kB). View file
 
iteach_toolkit/DHYOLO/__pycache__/model.cpython-39.pyc ADDED
Binary file (684 Bytes). View file
 
iteach_toolkit/DHYOLO/detect.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # YOLOv5 🚀 by Ultralytics, AGPL-3.0 license
2
+ """
3
+ Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
+
5
+ Usage - sources:
6
+ $ python detect.py --weights yolov5s.pt --source 0 # webcam
7
+ img.jpg # image
8
+ vid.mp4 # video
9
+ screen # screenshot
10
+ path/ # directory
11
+ list.txt # list of images
12
+ list.streams # list of streams
13
+ 'path/*.jpg' # glob
14
+ 'https://youtu.be/LNwODJXcvt4' # YouTube
15
+ 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream
16
+
17
+ Usage - formats:
18
+ $ python detect.py --weights yolov5s.pt # PyTorch
19
+ yolov5s.torchscript # TorchScript
20
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
21
+ yolov5s_openvino_model # OpenVINO
22
+ yolov5s.engine # TensorRT
23
+ yolov5s.mlmodel # CoreML (macOS-only)
24
+ yolov5s_saved_model # TensorFlow SavedModel
25
+ yolov5s.pb # TensorFlow GraphDef
26
+ yolov5s.tflite # TensorFlow Lite
27
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolov5s_paddle_model # PaddlePaddle
29
+ """
30
+
31
+ import argparse
32
+ import csv
33
+ import os
34
+ import platform
35
+ import sys
36
+ from pathlib import Path
37
+ from PIL import Image as PILImg
38
+ import shutil
39
+ import torch
40
+
41
+ FILE = Path(__file__).resolve()
42
+ ROOT = FILE.parents[0] # YOLOv5 root directory
43
+ if str(ROOT) not in sys.path:
44
+ sys.path.append(str(ROOT)) # add ROOT to PATH
45
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
46
+
47
+ from ultralytics.utils.plotting import Annotator, colors, save_one_box
48
+
49
+ from models.common import DetectMultiBackend
50
+ from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
51
+ from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
52
+ increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
53
+ from utils.torch_utils import select_device, smart_inference_mode
54
+
55
+
56
+ @smart_inference_mode()
57
+ def run(
58
+ weights=ROOT / 'yolov5s.pt', # model path or triton URL
59
+ source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam)
60
+ data=ROOT / 'data/coco128.yaml', # dataset.yaml path
61
+ imgsz=(640, 640), # inference size (height, width)
62
+ conf_thres=0.25, # confidence threshold
63
+ iou_thres=0.45, # NMS IOU threshold
64
+ max_det=1000, # maximum detections per image
65
+ device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
66
+ view_img=False, # show results
67
+ save_txt=False, # save results to *.txt
68
+ save_csv=False, # save results in CSV format
69
+ save_conf=False, # save confidences in --save-txt labels
70
+ save_crop=False, # save cropped prediction boxes
71
+ nosave=False, # do not save images/videos
72
+ classes=None, # filter by class: --class 0, or --class 0 2 3
73
+ agnostic_nms=False, # class-agnostic NMS
74
+ augment=False, # augmented inference
75
+ visualize=False, # visualize features
76
+ update=False, # update all models
77
+ project=ROOT / 'inference', # save results to project/name
78
+ name='_dhyolo', # save results to project/name
79
+ exist_ok=False, # existing project/name ok, do not increment
80
+ line_thickness=3, # bounding box thickness (pixels)
81
+ hide_labels=False, # hide labels
82
+ hide_conf=False, # hide confidences
83
+ half=False, # use FP16 half-precision inference
84
+ dnn=False, # use OpenCV DNN for ONNX inference
85
+ vid_stride=1, # video frame-rate stride
86
+ ):
87
+ source = str(source)
88
+ save_img = False #not nosave and not source.endswith('.txt') # save inference images
89
+ is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
90
+ is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
91
+ webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file)
92
+ screenshot = source.lower().startswith('screen')
93
+ if is_url and is_file:
94
+ source = check_file(source) # download
95
+
96
+ # Directories
97
+ save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
98
+ (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
99
+
100
+ # Load model
101
+ device = select_device(device)
102
+ model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
103
+ stride, names, pt = model.stride, model.names, model.pt
104
+ imgsz = check_img_size(imgsz, s=stride) # check image size
105
+
106
+ # Dataloader
107
+ bs = 1 # batch_size
108
+ if webcam:
109
+ view_img = check_imshow(warn=True)
110
+ dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
111
+ bs = len(dataset)
112
+ elif screenshot:
113
+ dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt)
114
+ else:
115
+ dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
116
+ vid_path, vid_writer = [None] * bs, [None] * bs
117
+
118
+ # Run inference
119
+ model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup
120
+ seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
121
+ for path, im, im0s, vid_cap, s in dataset:
122
+ with dt[0]:
123
+ im = torch.from_numpy(im).to(model.device)
124
+ im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
125
+ im /= 255 # 0 - 255 to 0.0 - 1.0
126
+ if len(im.shape) == 3:
127
+ im = im[None] # expand for batch dim
128
+
129
+ # Inference
130
+ with dt[1]:
131
+ visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
132
+ pred = model(im, augment=augment, visualize=visualize)
133
+
134
+ # NMS
135
+ with dt[2]:
136
+ pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
137
+
138
+ # Second-stage classifier (optional)
139
+ # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
140
+
141
+ # Define the path for the CSV file
142
+ csv_path = save_dir / 'predictions.csv'
143
+
144
+ # Create or append to the CSV file
145
+ def write_to_csv(image_name, prediction, confidence):
146
+ data = {'Image Name': image_name, 'Prediction': prediction, 'Confidence': confidence}
147
+ with open(csv_path, mode='a', newline='') as f:
148
+ writer = csv.DictWriter(f, fieldnames=data.keys())
149
+ if not csv_path.is_file():
150
+ writer.writeheader()
151
+ writer.writerow(data)
152
+
153
+ # Process predictions
154
+ for i, det in enumerate(pred): # per image
155
+ seen += 1
156
+ if webcam: # batch_size >= 1
157
+ p, im0, frame = path[i], im0s[i].copy(), dataset.count
158
+ s += f'{i}: '
159
+ else:
160
+ p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
161
+
162
+ p = Path(p) # to Path
163
+ save_path = str(save_dir / p.name) # im.jpg
164
+ txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
165
+ s += '%gx%g ' % im.shape[2:] # print string
166
+ gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
167
+ imc = im0.copy() if save_crop else im0 # for save_crop
168
+ annotator = Annotator(im0, line_width=line_thickness, example=str(names))
169
+ if len(det):
170
+ # Rescale boxes from img_size to im0 size
171
+ det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()
172
+
173
+ # Print results
174
+ for c in det[:, 5].unique():
175
+ n = (det[:, 5] == c).sum() # detections per class
176
+ s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
177
+
178
+ # Write results
179
+ for *xyxy, conf, cls in reversed(det):
180
+ c = int(cls) # integer class
181
+ label = names[c] if hide_conf else f'{names[c]}'
182
+ confidence = float(conf)
183
+ confidence_str = f'{confidence:.2f}'
184
+
185
+ if save_csv:
186
+ write_to_csv(p.name, label, confidence_str)
187
+
188
+ if save_txt: # Write to file
189
+ xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
190
+ line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
191
+ with open(f'{txt_path}.txt', 'a') as f:
192
+ f.write(('%g ' * len(line)).rstrip() % line + '\n')
193
+
194
+ if save_img or save_crop or view_img: # Add bbox to image
195
+ c = int(cls) # integer class
196
+ label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
197
+ annotator.box_label(xyxy, label, color=colors(c, True))
198
+ if save_crop:
199
+ save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
200
+
201
+ # Stream results
202
+ im0 = annotator.result()
203
+ if view_img:
204
+ if platform.system() == 'Linux' and p not in windows:
205
+ windows.append(p)
206
+ cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
207
+ cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
208
+ cv2.imshow(str(p), im0)
209
+ cv2.waitKey(1) # 1 millisecond
210
+
211
+ # Save results (image with detections)
212
+ if save_img:
213
+ if dataset.mode == 'image':
214
+ cv2.imwrite(save_path, im0)
215
+ else: # 'video' or 'stream'
216
+ if vid_path[i] != save_path: # new video
217
+ vid_path[i] = save_path
218
+ if isinstance(vid_writer[i], cv2.VideoWriter):
219
+ vid_writer[i].release() # release previous video writer
220
+ if vid_cap: # video
221
+ fps = vid_cap.get(cv2.CAP_PROP_FPS)
222
+ w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
223
+ h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
224
+ else: # stream
225
+ fps, w, h = 30, im0.shape[1], im0.shape[0]
226
+ save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
227
+ vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
228
+ vid_writer[i].write(im0)
229
+
230
+ # Print time (inference-only)
231
+ LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
232
+
233
+ # Print results
234
+ t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
235
+ LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
236
+ if save_txt or save_img:
237
+ s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
238
+ LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
239
+ if update:
240
+ strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
241
+
242
+ # need to remove as we are not saving anything
243
+ shutil.rmtree(project, ignore_errors=True)
244
+
245
+ return pred
246
+
247
+ def parse_opt():
248
+ parser = argparse.ArgumentParser()
249
+ parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL')
250
+ parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)')
251
+ parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
252
+ parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
253
+ parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
254
+ parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
255
+ parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
256
+ parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
257
+ parser.add_argument('--view-img', action='store_true', help='show results')
258
+ parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
259
+ parser.add_argument('--save-csv', action='store_true', help='save results in CSV format')
260
+ parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
261
+ parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
262
+ parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
263
+ parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
264
+ parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
265
+ parser.add_argument('--augment', action='store_true', help='augmented inference')
266
+ parser.add_argument('--visualize', action='store_true', help='visualize features')
267
+ parser.add_argument('--update', action='store_true', help='update all models')
268
+ parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
269
+ parser.add_argument('--name', default='exp', help='save results to project/name')
270
+ parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
271
+ parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
272
+ parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
273
+ parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
274
+ parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
275
+ parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
276
+ parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
277
+ opt = parser.parse_args()
278
+ opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
279
+ print_args(vars(opt))
280
+ return opt
281
+
282
+
283
+ def main(opt):
284
+ check_requirements(ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
285
+ run(**vars(opt))
286
+
287
+
288
+ if __name__ == '__main__':
289
+ opt = parse_opt()
290
+ main(opt)
iteach_toolkit/DHYOLO/export.py ADDED
@@ -0,0 +1,1537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
+ """
3
+ Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
4
+
5
+ Format | `export.py --include` | Model
6
+ --- | --- | ---
7
+ PyTorch | - | yolov5s.pt
8
+ TorchScript | `torchscript` | yolov5s.torchscript
9
+ ONNX | `onnx` | yolov5s.onnx
10
+ OpenVINO | `openvino` | yolov5s_openvino_model/
11
+ TensorRT | `engine` | yolov5s.engine
12
+ CoreML | `coreml` | yolov5s.mlmodel
13
+ TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolov5s.pb
15
+ TensorFlow Lite | `tflite` | yolov5s.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolov5s_web_model/
18
+ PaddlePaddle | `paddle` | yolov5s_paddle_model/
19
+
20
+ Requirements:
21
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU
22
+ $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU
23
+
24
+ Usage:
25
+ $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
26
+
27
+ Inference:
28
+ $ python detect.py --weights yolov5s.pt # PyTorch
29
+ yolov5s.torchscript # TorchScript
30
+ yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
31
+ yolov5s_openvino_model # OpenVINO
32
+ yolov5s.engine # TensorRT
33
+ yolov5s.mlmodel # CoreML (macOS-only)
34
+ yolov5s_saved_model # TensorFlow SavedModel
35
+ yolov5s.pb # TensorFlow GraphDef
36
+ yolov5s.tflite # TensorFlow Lite
37
+ yolov5s_edgetpu.tflite # TensorFlow Edge TPU
38
+ yolov5s_paddle_model # PaddlePaddle
39
+
40
+ TensorFlow.js:
41
+ $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
42
+ $ npm install
43
+ $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
44
+ $ npm start
45
+ """
46
+
47
+ import argparse
48
+ import contextlib
49
+ import json
50
+ import os
51
+ import platform
52
+ import re
53
+ import subprocess
54
+ import sys
55
+ import time
56
+ import warnings
57
+ from pathlib import Path
58
+
59
+ import pandas as pd
60
+ import torch
61
+ from torch.utils.mobile_optimizer import optimize_for_mobile
62
+
63
+ FILE = Path(__file__).resolve()
64
+ ROOT = FILE.parents[0] # YOLOv5 root directory
65
+ if str(ROOT) not in sys.path:
66
+ sys.path.append(str(ROOT)) # add ROOT to PATH
67
+ if platform.system() != "Windows":
68
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
69
+
70
+ from models.experimental import attempt_load
71
+ from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel
72
+ from utils.dataloaders import LoadImages
73
+ from utils.general import (
74
+ LOGGER,
75
+ Profile,
76
+ check_dataset,
77
+ check_img_size,
78
+ check_requirements,
79
+ check_version,
80
+ check_yaml,
81
+ colorstr,
82
+ file_size,
83
+ get_default_args,
84
+ print_args,
85
+ url2file,
86
+ yaml_save,
87
+ )
88
+ from utils.torch_utils import select_device, smart_inference_mode
89
+
90
+ MACOS = platform.system() == "Darwin" # macOS environment
91
+
92
+
93
+ class iOSModel(torch.nn.Module):
94
+ def __init__(self, model, im):
95
+ """
96
+ Initializes an iOS compatible model with normalization based on image dimensions.
97
+
98
+ Args:
99
+ model (torch.nn.Module): The PyTorch model to be adapted for iOS compatibility.
100
+ im (torch.Tensor): An input tensor representing a batch of images with shape (B, C, H, W).
101
+
102
+ Returns:
103
+ None: This method does not return any value.
104
+
105
+ Notes:
106
+ This initializer configures normalization based on the input image dimensions, which is critical for
107
+ ensuring the model's compatibility and proper functionality on iOS devices. The normalization step
108
+ involves dividing by the image width if the image is square; otherwise, additional conditions might apply.
109
+ """
110
+ super().__init__()
111
+ b, c, h, w = im.shape # batch, channel, height, width
112
+ self.model = model
113
+ self.nc = model.nc # number of classes
114
+ if w == h:
115
+ self.normalize = 1.0 / w
116
+ else:
117
+ self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller)
118
+ # np = model(im)[0].shape[1] # number of points
119
+ # self.normalize = torch.tensor([1. / w, 1. / h, 1. / w, 1. / h]).expand(np, 4) # explicit (faster, larger)
120
+
121
+ def forward(self, x):
122
+ """
123
+ Run a forward pass on the input tensor, returning class confidences and normalized coordinates.
124
+
125
+ Args:
126
+ x (torch.Tensor): Input tensor containing the image data with shape (batch, channels, height, width).
127
+
128
+ Returns:
129
+ torch.Tensor: Concatenated tensor with normalized coordinates (xywh), confidence scores (conf),
130
+ and class probabilities (cls), having shape (N, 4 + 1 + C), where N is the number of predictions,
131
+ and C is the number of classes.
132
+
133
+ Examples:
134
+ ```python
135
+ model = iOSModel(pretrained_model, input_image)
136
+ output = model.forward(torch_input_tensor)
137
+ ```
138
+ """
139
+ xywh, conf, cls = self.model(x)[0].squeeze().split((4, 1, self.nc), 1)
140
+ return cls * conf, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4)
141
+
142
+
143
+ def export_formats():
144
+ """
145
+ Returns a DataFrame of supported YOLOv5 model export formats and their properties.
146
+
147
+ Returns:
148
+ pandas.DataFrame: A DataFrame containing supported export formats and their properties. The DataFrame
149
+ includes columns for format name, CLI argument suffix, file extension or directory name, and boolean flags
150
+ indicating if the export format supports training and detection.
151
+
152
+ Examples:
153
+ ```python
154
+ formats = export_formats()
155
+ print(f"Supported export formats:\n{formats}")
156
+ ```
157
+
158
+ Notes:
159
+ The DataFrame contains the following columns:
160
+ - Format: The name of the model format (e.g., PyTorch, TorchScript, ONNX, etc.).
161
+ - Include Argument: The argument to use with the export script to include this format.
162
+ - File Suffix: File extension or directory name associated with the format.
163
+ - Supports Training: Whether the format supports training.
164
+ - Supports Detection: Whether the format supports detection.
165
+ """
166
+ x = [
167
+ ["PyTorch", "-", ".pt", True, True],
168
+ ["TorchScript", "torchscript", ".torchscript", True, True],
169
+ ["ONNX", "onnx", ".onnx", True, True],
170
+ ["OpenVINO", "openvino", "_openvino_model", True, False],
171
+ ["TensorRT", "engine", ".engine", False, True],
172
+ ["CoreML", "coreml", ".mlpackage", True, False],
173
+ ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True],
174
+ ["TensorFlow GraphDef", "pb", ".pb", True, True],
175
+ ["TensorFlow Lite", "tflite", ".tflite", True, False],
176
+ ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", False, False],
177
+ ["TensorFlow.js", "tfjs", "_web_model", False, False],
178
+ ["PaddlePaddle", "paddle", "_paddle_model", True, True],
179
+ ]
180
+ return pd.DataFrame(x, columns=["Format", "Argument", "Suffix", "CPU", "GPU"])
181
+
182
+
183
+ def try_export(inner_func):
184
+ """
185
+ Log success or failure, execution time, and file size for YOLOv5 model export functions wrapped with @try_export.
186
+
187
+ Args:
188
+ inner_func (Callable): The model export function to be wrapped by the decorator.
189
+
190
+ Returns:
191
+ Callable: The wrapped function that logs execution details. When executed, this wrapper function returns either:
192
+ - Tuple (str | torch.nn.Module): On success — the file path of the exported model and the model instance.
193
+ - Tuple (None, None): On failure — None values indicating export failure.
194
+
195
+ Examples:
196
+ ```python
197
+ @try_export
198
+ def export_onnx(model, filepath):
199
+ # implementation here
200
+ pass
201
+
202
+ exported_file, exported_model = export_onnx(yolo_model, 'path/to/save/model.onnx')
203
+ ```
204
+
205
+ Notes:
206
+ For additional requirements and model export formats, refer to the
207
+ [Ultralytics YOLOv5 GitHub repository](https://github.com/ultralytics/ultralytics).
208
+ """
209
+ inner_args = get_default_args(inner_func)
210
+
211
+ def outer_func(*args, **kwargs):
212
+ """Logs success/failure and execution details of model export functions wrapped with @try_export decorator."""
213
+ prefix = inner_args["prefix"]
214
+ try:
215
+ with Profile() as dt:
216
+ f, model = inner_func(*args, **kwargs)
217
+ LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)")
218
+ return f, model
219
+ except Exception as e:
220
+ LOGGER.info(f"{prefix} export failure ❌ {dt.t:.1f}s: {e}")
221
+ return None, None
222
+
223
+ return outer_func
224
+
225
+
226
+ @try_export
227
+ def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")):
228
+ """
229
+ Export a YOLOv5 model to the TorchScript format.
230
+
231
+ Args:
232
+ model (torch.nn.Module): The YOLOv5 model to be exported.
233
+ im (torch.Tensor): Example input tensor to be used for tracing the TorchScript model.
234
+ file (Path): File path where the exported TorchScript model will be saved.
235
+ optimize (bool): If True, applies optimizations for mobile deployment.
236
+ prefix (str): Optional prefix for log messages. Default is 'TorchScript:'.
237
+
238
+ Returns:
239
+ (str | None, torch.jit.ScriptModule | None): A tuple containing the file path of the exported model
240
+ (as a string) and the TorchScript model (as a torch.jit.ScriptModule). If the export fails, both elements
241
+ of the tuple will be None.
242
+
243
+ Notes:
244
+ - This function uses tracing to create the TorchScript model.
245
+ - Metadata, including the input image shape, model stride, and class names, is saved in an extra file (`config.txt`)
246
+ within the TorchScript model package.
247
+ - For mobile optimization, refer to the PyTorch tutorial: https://pytorch.org/tutorials/recipes/mobile_interpreter.html
248
+
249
+ Example:
250
+ ```python
251
+ from pathlib import Path
252
+ import torch
253
+ from models.experimental import attempt_load
254
+ from utils.torch_utils import select_device
255
+
256
+ # Load model
257
+ weights = 'yolov5s.pt'
258
+ device = select_device('')
259
+ model = attempt_load(weights, device=device)
260
+
261
+ # Example input tensor
262
+ im = torch.zeros(1, 3, 640, 640).to(device)
263
+
264
+ # Export model
265
+ file = Path('yolov5s.torchscript')
266
+ export_torchscript(model, im, file, optimize=False)
267
+ ```
268
+ """
269
+ LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
270
+ f = file.with_suffix(".torchscript")
271
+
272
+ ts = torch.jit.trace(model, im, strict=False)
273
+ d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
274
+ extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap()
275
+ if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
276
+ optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
277
+ else:
278
+ ts.save(str(f), _extra_files=extra_files)
279
+ return f, None
280
+
281
+
282
+ @try_export
283
+ def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr("ONNX:")):
284
+ """
285
+ Export a YOLOv5 model to ONNX format with dynamic axes support and optional model simplification.
286
+
287
+ Args:
288
+ model (torch.nn.Module): The YOLOv5 model to be exported.
289
+ im (torch.Tensor): A sample input tensor for model tracing, usually the shape is (1, 3, height, width).
290
+ file (pathlib.Path | str): The output file path where the ONNX model will be saved.
291
+ opset (int): The ONNX opset version to use for export.
292
+ dynamic (bool): If True, enables dynamic axes for batch, height, and width dimensions.
293
+ simplify (bool): If True, applies ONNX model simplification for optimization.
294
+ prefix (str): A prefix string for logging messages, defaults to 'ONNX:'.
295
+
296
+ Returns:
297
+ tuple[pathlib.Path | str, None]: The path to the saved ONNX model file and None (consistent with decorator).
298
+
299
+ Raises:
300
+ ImportError: If required libraries for export (e.g., 'onnx', 'onnx-simplifier') are not installed.
301
+ AssertionError: If the simplification check fails.
302
+
303
+ Notes:
304
+ The required packages for this function can be installed via:
305
+ ```
306
+ pip install onnx onnx-simplifier onnxruntime onnxruntime-gpu
307
+ ```
308
+
309
+ Example:
310
+ ```python
311
+ from pathlib import Path
312
+ import torch
313
+ from models.experimental import attempt_load
314
+ from utils.torch_utils import select_device
315
+
316
+ # Load model
317
+ weights = 'yolov5s.pt'
318
+ device = select_device('')
319
+ model = attempt_load(weights, map_location=device)
320
+
321
+ # Example input tensor
322
+ im = torch.zeros(1, 3, 640, 640).to(device)
323
+
324
+ # Export model
325
+ file_path = Path('yolov5s.onnx')
326
+ export_onnx(model, im, file_path, opset=12, dynamic=True, simplify=True)
327
+ ```
328
+ """
329
+ check_requirements("onnx>=1.12.0")
330
+ import onnx
331
+
332
+ LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...")
333
+ f = str(file.with_suffix(".onnx"))
334
+
335
+ output_names = ["output0", "output1"] if isinstance(model, SegmentationModel) else ["output0"]
336
+ if dynamic:
337
+ dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640)
338
+ if isinstance(model, SegmentationModel):
339
+ dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
340
+ dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160)
341
+ elif isinstance(model, DetectionModel):
342
+ dynamic["output0"] = {0: "batch", 1: "anchors"} # shape(1,25200,85)
343
+
344
+ torch.onnx.export(
345
+ model.cpu() if dynamic else model, # --dynamic only compatible with cpu
346
+ im.cpu() if dynamic else im,
347
+ f,
348
+ verbose=False,
349
+ opset_version=opset,
350
+ do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
351
+ input_names=["images"],
352
+ output_names=output_names,
353
+ dynamic_axes=dynamic or None,
354
+ )
355
+
356
+ # Checks
357
+ model_onnx = onnx.load(f) # load onnx model
358
+ onnx.checker.check_model(model_onnx) # check onnx model
359
+
360
+ # Metadata
361
+ d = {"stride": int(max(model.stride)), "names": model.names}
362
+ for k, v in d.items():
363
+ meta = model_onnx.metadata_props.add()
364
+ meta.key, meta.value = k, str(v)
365
+ onnx.save(model_onnx, f)
366
+
367
+ # Simplify
368
+ if simplify:
369
+ try:
370
+ cuda = torch.cuda.is_available()
371
+ check_requirements(("onnxruntime-gpu" if cuda else "onnxruntime", "onnxslim"))
372
+ import onnxslim
373
+
374
+ LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
375
+ model_onnx = onnxslim.slim(model_onnx)
376
+ onnx.save(model_onnx, f)
377
+ except Exception as e:
378
+ LOGGER.info(f"{prefix} simplifier failure: {e}")
379
+ return f, model_onnx
380
+
381
+
382
+ @try_export
383
+ def export_openvino(file, metadata, half, int8, data, prefix=colorstr("OpenVINO:")):
384
+ """
385
+ Export a YOLOv5 model to OpenVINO format with optional FP16 and INT8 quantization.
386
+
387
+ Args:
388
+ file (Path): Path to the output file where the OpenVINO model will be saved.
389
+ metadata (dict): Dictionary including model metadata such as names and strides.
390
+ half (bool): If True, export the model with FP16 precision.
391
+ int8 (bool): If True, export the model with INT8 quantization.
392
+ data (str): Path to the dataset YAML file required for INT8 quantization.
393
+ prefix (str): Prefix string for logging purposes (default is "OpenVINO:").
394
+
395
+ Returns:
396
+ (str, openvino.runtime.Model | None): The OpenVINO model file path and openvino.runtime.Model object if export is
397
+ successful; otherwise, None.
398
+
399
+ Notes:
400
+ - Requires `openvino-dev` package version 2023.0 or higher. Install with:
401
+ `$ pip install openvino-dev>=2023.0`
402
+ - For INT8 quantization, also requires `nncf` library version 2.5.0 or higher. Install with:
403
+ `$ pip install nncf>=2.5.0`
404
+
405
+ Examples:
406
+ ```python
407
+ from pathlib import Path
408
+ from ultralytics import YOLOv5
409
+
410
+ model = YOLOv5('yolov5s.pt')
411
+ export_openvino(Path('yolov5s.onnx'), metadata={'names': model.names, 'stride': model.stride}, half=True,
412
+ int8=False, data='data.yaml')
413
+ ```
414
+
415
+ This will export the YOLOv5 model to OpenVINO with FP16 precision but without INT8 quantization, saving it to
416
+ the specified file path.
417
+ """
418
+ check_requirements("openvino-dev>=2023.0") # requires openvino-dev: https://pypi.org/project/openvino-dev/
419
+ import openvino.runtime as ov # noqa
420
+ from openvino.tools import mo # noqa
421
+
422
+ LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
423
+ f = str(file).replace(file.suffix, f"_{'int8_' if int8 else ''}openvino_model{os.sep}")
424
+ f_onnx = file.with_suffix(".onnx")
425
+ f_ov = str(Path(f) / file.with_suffix(".xml").name)
426
+
427
+ ov_model = mo.convert_model(f_onnx, model_name=file.stem, framework="onnx", compress_to_fp16=half) # export
428
+
429
+ if int8:
430
+ check_requirements("nncf>=2.5.0") # requires at least version 2.5.0 to use the post-training quantization
431
+ import nncf
432
+ import numpy as np
433
+
434
+ from utils.dataloaders import create_dataloader
435
+
436
+ def gen_dataloader(yaml_path, task="train", imgsz=640, workers=4):
437
+ """Generates a DataLoader for model training or validation based on the given YAML dataset configuration."""
438
+ data_yaml = check_yaml(yaml_path)
439
+ data = check_dataset(data_yaml)
440
+ dataloader = create_dataloader(
441
+ data[task], imgsz=imgsz, batch_size=1, stride=32, pad=0.5, single_cls=False, rect=False, workers=workers
442
+ )[0]
443
+ return dataloader
444
+
445
+ # noqa: F811
446
+
447
+ def transform_fn(data_item):
448
+ """
449
+ Quantization transform function.
450
+
451
+ Extracts and preprocess input data from dataloader item for quantization.
452
+ Parameters:
453
+ data_item: Tuple with data item produced by DataLoader during iteration
454
+ Returns:
455
+ input_tensor: Input data for quantization
456
+ """
457
+ assert data_item[0].dtype == torch.uint8, "input image must be uint8 for the quantization preprocessing"
458
+
459
+ img = data_item[0].numpy().astype(np.float32) # uint8 to fp16/32
460
+ img /= 255.0 # 0 - 255 to 0.0 - 1.0
461
+ return np.expand_dims(img, 0) if img.ndim == 3 else img
462
+
463
+ ds = gen_dataloader(data)
464
+ quantization_dataset = nncf.Dataset(ds, transform_fn)
465
+ ov_model = nncf.quantize(ov_model, quantization_dataset, preset=nncf.QuantizationPreset.MIXED)
466
+
467
+ ov.serialize(ov_model, f_ov) # save
468
+ yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
469
+ return f, None
470
+
471
+
472
+ @try_export
473
+ def export_paddle(model, im, file, metadata, prefix=colorstr("PaddlePaddle:")):
474
+ """
475
+ Export a YOLOv5 PyTorch model to PaddlePaddle format using X2Paddle, saving the converted model and metadata.
476
+
477
+ Args:
478
+ model (torch.nn.Module): The YOLOv5 model to be exported.
479
+ im (torch.Tensor): Input tensor used for model tracing during export.
480
+ file (pathlib.Path): Path to the source file to be converted.
481
+ metadata (dict): Additional metadata to be saved alongside the model.
482
+ prefix (str): Prefix for logging information.
483
+
484
+ Returns:
485
+ tuple (str, None): A tuple where the first element is the path to the saved PaddlePaddle model, and the
486
+ second element is None.
487
+
488
+ Examples:
489
+ ```python
490
+ from pathlib import Path
491
+ import torch
492
+
493
+ # Assume 'model' is a pre-trained YOLOv5 model and 'im' is an example input tensor
494
+ model = ... # Load your model here
495
+ im = torch.randn((1, 3, 640, 640)) # Dummy input tensor for tracing
496
+ file = Path("yolov5s.pt")
497
+ metadata = {"stride": 32, "names": ["person", "bicycle", "car", "motorbike"]}
498
+
499
+ export_paddle(model=model, im=im, file=file, metadata=metadata)
500
+ ```
501
+
502
+ Notes:
503
+ Ensure that `paddlepaddle` and `x2paddle` are installed, as these are required for the export function. You can
504
+ install them via pip:
505
+ ```
506
+ $ pip install paddlepaddle x2paddle
507
+ ```
508
+ """
509
+ check_requirements(("paddlepaddle", "x2paddle"))
510
+ import x2paddle
511
+ from x2paddle.convert import pytorch2paddle
512
+
513
+ LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
514
+ f = str(file).replace(".pt", f"_paddle_model{os.sep}")
515
+
516
+ pytorch2paddle(module=model, save_dir=f, jit_type="trace", input_examples=[im]) # export
517
+ yaml_save(Path(f) / file.with_suffix(".yaml").name, metadata) # add metadata.yaml
518
+ return f, None
519
+
520
+
521
+ @try_export
522
+ def export_coreml(model, im, file, int8, half, nms, mlmodel, prefix=colorstr("CoreML:")):
523
+ """
524
+ Export a YOLOv5 model to CoreML format with optional NMS, INT8, and FP16 support.
525
+
526
+ Args:
527
+ model (torch.nn.Module): The YOLOv5 model to be exported.
528
+ im (torch.Tensor): Example input tensor to trace the model.
529
+ file (pathlib.Path): Path object where the CoreML model will be saved.
530
+ int8 (bool): Flag indicating whether to use INT8 quantization (default is False).
531
+ half (bool): Flag indicating whether to use FP16 quantization (default is False).
532
+ nms (bool): Flag indicating whether to include Non-Maximum Suppression (default is False).
533
+ mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
534
+ prefix (str): Prefix string for logging purposes (default is 'CoreML:').
535
+
536
+ Returns:
537
+ tuple[pathlib.Path | None, None]: The path to the saved CoreML model file, or (None, None) if there is an error.
538
+
539
+ Notes:
540
+ The exported CoreML model will be saved with a .mlmodel extension.
541
+ Quantization is supported only on macOS.
542
+
543
+ Example:
544
+ ```python
545
+ from pathlib import Path
546
+ import torch
547
+ from models.yolo import Model
548
+ model = Model(cfg, ch=3, nc=80)
549
+ im = torch.randn(1, 3, 640, 640)
550
+ file = Path("yolov5s_coreml")
551
+ export_coreml(model, im, file, int8=False, half=False, nms=True, mlmodel=False)
552
+ ```
553
+ """
554
+ check_requirements("coremltools")
555
+ import coremltools as ct
556
+
557
+ LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
558
+ if mlmodel:
559
+ f = file.with_suffix(".mlmodel")
560
+ convert_to = "neuralnetwork"
561
+ precision = None
562
+ else:
563
+ f = file.with_suffix(".mlpackage")
564
+ convert_to = "mlprogram"
565
+ if half:
566
+ precision = ct.precision.FLOAT16
567
+ else:
568
+ precision = ct.precision.FLOAT32
569
+
570
+ if nms:
571
+ model = iOSModel(model, im)
572
+ ts = torch.jit.trace(model, im, strict=False) # TorchScript model
573
+ ct_model = ct.convert(
574
+ ts,
575
+ inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])],
576
+ convert_to=convert_to,
577
+ compute_precision=precision,
578
+ )
579
+ bits, mode = (8, "kmeans") if int8 else (16, "linear") if half else (32, None)
580
+ if bits < 32:
581
+ if mlmodel:
582
+ with warnings.catch_warnings():
583
+ warnings.filterwarnings(
584
+ "ignore", category=DeprecationWarning
585
+ ) # suppress numpy==1.20 float warning, fixed in coremltools==7.0
586
+ ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
587
+ elif bits == 8:
588
+ op_config = ct.optimize.coreml.OpPalettizerConfig(mode=mode, nbits=bits, weight_threshold=512)
589
+ config = ct.optimize.coreml.OptimizationConfig(global_config=op_config)
590
+ ct_model = ct.optimize.coreml.palettize_weights(ct_model, config)
591
+ ct_model.save(f)
592
+ return f, ct_model
593
+
594
+
595
+ @try_export
596
+ def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr("TensorRT:")):
597
+ """
598
+ Export a YOLOv5 model to TensorRT engine format, requiring GPU and TensorRT>=7.0.0.
599
+
600
+ Args:
601
+ model (torch.nn.Module): YOLOv5 model to be exported.
602
+ im (torch.Tensor): Input tensor of shape (B, C, H, W).
603
+ file (pathlib.Path): Path to save the exported model.
604
+ half (bool): Set to True to export with FP16 precision.
605
+ dynamic (bool): Set to True to enable dynamic input shapes.
606
+ simplify (bool): Set to True to simplify the model during export.
607
+ workspace (int): Workspace size in GB (default is 4).
608
+ verbose (bool): Set to True for verbose logging output.
609
+ prefix (str): Log message prefix.
610
+
611
+ Returns:
612
+ (pathlib.Path, None): Tuple containing the path to the exported model and None.
613
+
614
+ Raises:
615
+ AssertionError: If executed on CPU instead of GPU.
616
+ RuntimeError: If there is a failure in parsing the ONNX file.
617
+
618
+ Example:
619
+ ```python
620
+ from ultralytics import YOLOv5
621
+ import torch
622
+ from pathlib import Path
623
+
624
+ model = YOLOv5('yolov5s.pt') # Load a pre-trained YOLOv5 model
625
+ input_tensor = torch.randn(1, 3, 640, 640).cuda() # example input tensor on GPU
626
+ export_path = Path('yolov5s.engine') # export destination
627
+
628
+ export_engine(model.model, input_tensor, export_path, half=True, dynamic=True, simplify=True, workspace=8, verbose=True)
629
+ ```
630
+ """
631
+ assert im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. `python export.py --device 0`"
632
+ try:
633
+ import tensorrt as trt
634
+ except Exception:
635
+ if platform.system() == "Linux":
636
+ check_requirements("nvidia-tensorrt", cmds="-U --index-url https://pypi.ngc.nvidia.com")
637
+ import tensorrt as trt
638
+
639
+ if trt.__version__[0] == "7": # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
640
+ grid = model.model[-1].anchor_grid
641
+ model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
642
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
643
+ model.model[-1].anchor_grid = grid
644
+ else: # TensorRT >= 8
645
+ check_version(trt.__version__, "8.0.0", hard=True) # require tensorrt>=8.0.0
646
+ export_onnx(model, im, file, 12, dynamic, simplify) # opset 12
647
+ onnx = file.with_suffix(".onnx")
648
+
649
+ LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
650
+ is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
651
+ assert onnx.exists(), f"failed to export ONNX file: {onnx}"
652
+ f = file.with_suffix(".engine") # TensorRT engine file
653
+ logger = trt.Logger(trt.Logger.INFO)
654
+ if verbose:
655
+ logger.min_severity = trt.Logger.Severity.VERBOSE
656
+
657
+ builder = trt.Builder(logger)
658
+ config = builder.create_builder_config()
659
+ if is_trt10:
660
+ config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30)
661
+ else: # TensorRT versions 7, 8
662
+ config.max_workspace_size = workspace * 1 << 30
663
+ flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
664
+ network = builder.create_network(flag)
665
+ parser = trt.OnnxParser(network, logger)
666
+ if not parser.parse_from_file(str(onnx)):
667
+ raise RuntimeError(f"failed to load ONNX file: {onnx}")
668
+
669
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
670
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
671
+ for inp in inputs:
672
+ LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
673
+ for out in outputs:
674
+ LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
675
+
676
+ if dynamic:
677
+ if im.shape[0] <= 1:
678
+ LOGGER.warning(f"{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument")
679
+ profile = builder.create_optimization_profile()
680
+ for inp in inputs:
681
+ profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape)
682
+ config.add_optimization_profile(profile)
683
+
684
+ LOGGER.info(f"{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}")
685
+ if builder.platform_has_fast_fp16 and half:
686
+ config.set_flag(trt.BuilderFlag.FP16)
687
+
688
+ build = builder.build_serialized_network if is_trt10 else builder.build_engine
689
+ with build(network, config) as engine, open(f, "wb") as t:
690
+ t.write(engine if is_trt10 else engine.serialize())
691
+ return f, None
692
+
693
+
694
+ @try_export
695
+ def export_saved_model(
696
+ model,
697
+ im,
698
+ file,
699
+ dynamic,
700
+ tf_nms=False,
701
+ agnostic_nms=False,
702
+ topk_per_class=100,
703
+ topk_all=100,
704
+ iou_thres=0.45,
705
+ conf_thres=0.25,
706
+ keras=False,
707
+ prefix=colorstr("TensorFlow SavedModel:"),
708
+ ):
709
+ """
710
+ Export a YOLOv5 model to the TensorFlow SavedModel format, supporting dynamic axes and non-maximum suppression
711
+ (NMS).
712
+
713
+ Args:
714
+ model (torch.nn.Module): The PyTorch model to convert.
715
+ im (torch.Tensor): Sample input tensor with shape (B, C, H, W) for tracing.
716
+ file (pathlib.Path): File path to save the exported model.
717
+ dynamic (bool): Flag to indicate whether dynamic axes should be used.
718
+ tf_nms (bool, optional): Enable TensorFlow non-maximum suppression (NMS). Default is False.
719
+ agnostic_nms (bool, optional): Enable class-agnostic NMS. Default is False.
720
+ topk_per_class (int, optional): Top K detections per class to keep before applying NMS. Default is 100.
721
+ topk_all (int, optional): Top K detections across all classes to keep before applying NMS. Default is 100.
722
+ iou_thres (float, optional): IoU threshold for NMS. Default is 0.45.
723
+ conf_thres (float, optional): Confidence threshold for detections. Default is 0.25.
724
+ keras (bool, optional): Save the model in Keras format if True. Default is False.
725
+ prefix (str, optional): Prefix for logging messages. Default is "TensorFlow SavedModel:".
726
+
727
+ Returns:
728
+ tuple[str, tf.keras.Model | None]: A tuple containing the path to the saved model folder and the Keras model instance,
729
+ or None if TensorFlow export fails.
730
+
731
+ Notes:
732
+ - The method supports TensorFlow versions up to 2.15.1.
733
+ - TensorFlow NMS may not be supported in older TensorFlow versions.
734
+ - If the TensorFlow version exceeds 2.13.1, it might cause issues when exporting to TFLite.
735
+ Refer to: https://github.com/ultralytics/yolov5/issues/12489
736
+
737
+ Example:
738
+ ```python
739
+ model, im = ... # Initialize your PyTorch model and input tensor
740
+ export_saved_model(model, im, Path("yolov5_saved_model"), dynamic=True)
741
+ ```
742
+ """
743
+ # YOLOv5 TensorFlow SavedModel export
744
+ try:
745
+ import tensorflow as tf
746
+ except Exception:
747
+ check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}<=2.15.1")
748
+
749
+ import tensorflow as tf
750
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
751
+
752
+ from models.tf import TFModel
753
+
754
+ LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
755
+ if tf.__version__ > "2.13.1":
756
+ helper_url = "https://github.com/ultralytics/yolov5/issues/12489"
757
+ LOGGER.info(
758
+ f"WARNING ⚠️ using Tensorflow {tf.__version__} > 2.13.1 might cause issue when exporting the model to tflite {helper_url}"
759
+ ) # handling issue https://github.com/ultralytics/yolov5/issues/12489
760
+ f = str(file).replace(".pt", "_saved_model")
761
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
762
+
763
+ tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
764
+ im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow
765
+ _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
766
+ inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size)
767
+ outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres)
768
+ keras_model = tf.keras.Model(inputs=inputs, outputs=outputs)
769
+ keras_model.trainable = False
770
+ keras_model.summary()
771
+ if keras:
772
+ keras_model.save(f, save_format="tf")
773
+ else:
774
+ spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
775
+ m = tf.function(lambda x: keras_model(x)) # full model
776
+ m = m.get_concrete_function(spec)
777
+ frozen_func = convert_variables_to_constants_v2(m)
778
+ tfm = tf.Module()
779
+ tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec])
780
+ tfm.__call__(im)
781
+ tf.saved_model.save(
782
+ tfm,
783
+ f,
784
+ options=tf.saved_model.SaveOptions(experimental_custom_gradients=False)
785
+ if check_version(tf.__version__, "2.6")
786
+ else tf.saved_model.SaveOptions(),
787
+ )
788
+ return f, keras_model
789
+
790
+
791
+ @try_export
792
+ def export_pb(keras_model, file, prefix=colorstr("TensorFlow GraphDef:")):
793
+ """
794
+ Export YOLOv5 model to TensorFlow GraphDef (*.pb) format.
795
+
796
+ Args:
797
+ keras_model (tf.keras.Model): The Keras model to be converted.
798
+ file (Path): The output file path where the GraphDef will be saved.
799
+ prefix (str): Optional prefix string; defaults to a colored string indicating TensorFlow GraphDef export status.
800
+
801
+ Returns:
802
+ Tuple[Path, None]: The file path where the GraphDef model was saved and a None placeholder.
803
+
804
+ Notes:
805
+ For more details, refer to the guide on frozen graphs: https://github.com/leimao/Frozen_Graph_TensorFlow
806
+
807
+ Example:
808
+ ```python
809
+ from pathlib import Path
810
+ keras_model = ... # assume an existing Keras model
811
+ file = Path("model.pb")
812
+ export_pb(keras_model, file)
813
+ ```
814
+ """
815
+ import tensorflow as tf
816
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
817
+
818
+ LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
819
+ f = file.with_suffix(".pb")
820
+
821
+ m = tf.function(lambda x: keras_model(x)) # full model
822
+ m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype))
823
+ frozen_func = convert_variables_to_constants_v2(m)
824
+ frozen_func.graph.as_graph_def()
825
+ tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False)
826
+ return f, None
827
+
828
+
829
+ @try_export
830
+ def export_tflite(
831
+ keras_model, im, file, int8, per_tensor, data, nms, agnostic_nms, prefix=colorstr("TensorFlow Lite:")
832
+ ):
833
+ # YOLOv5 TensorFlow Lite export
834
+ """
835
+ Export a YOLOv5 model to TensorFlow Lite format with optional INT8 quantization and NMS support.
836
+
837
+ Args:
838
+ keras_model (tf.keras.Model): The Keras model to be exported.
839
+ im (torch.Tensor): An input image tensor for normalization and model tracing.
840
+ file (Path): The file path to save the TensorFlow Lite model.
841
+ int8 (bool): Enables INT8 quantization if True.
842
+ per_tensor (bool): If True, disables per-channel quantization.
843
+ data (str): Path to the dataset for representative dataset generation in INT8 quantization.
844
+ nms (bool): Enables Non-Maximum Suppression (NMS) if True.
845
+ agnostic_nms (bool): Enables class-agnostic NMS if True.
846
+ prefix (str): Prefix for log messages.
847
+
848
+ Returns:
849
+ (str | None, tflite.Model | None): The file path of the exported TFLite model and the TFLite model instance, or None
850
+ if the export failed.
851
+
852
+ Example:
853
+ ```python
854
+ from pathlib import Path
855
+ import torch
856
+ import tensorflow as tf
857
+
858
+ # Load a Keras model wrapping a YOLOv5 model
859
+ keras_model = tf.keras.models.load_model('path/to/keras_model.h5')
860
+
861
+ # Example input tensor
862
+ im = torch.zeros(1, 3, 640, 640)
863
+
864
+ # Export the model
865
+ export_tflite(keras_model, im, Path('model.tflite'), int8=True, per_tensor=False, data='data/coco.yaml',
866
+ nms=True, agnostic_nms=False)
867
+ ```
868
+
869
+ Notes:
870
+ - Ensure TensorFlow and TensorFlow Lite dependencies are installed.
871
+ - INT8 quantization requires a representative dataset to achieve optimal accuracy.
872
+ - TensorFlow Lite models are suitable for efficient inference on mobile and edge devices.
873
+ """
874
+ import tensorflow as tf
875
+
876
+ LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
877
+ batch_size, ch, *imgsz = list(im.shape) # BCHW
878
+ f = str(file).replace(".pt", "-fp16.tflite")
879
+
880
+ converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
881
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
882
+ converter.target_spec.supported_types = [tf.float16]
883
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
884
+ if int8:
885
+ from models.tf import representative_dataset_gen
886
+
887
+ dataset = LoadImages(check_dataset(check_yaml(data))["train"], img_size=imgsz, auto=False)
888
+ converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100)
889
+ converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
890
+ converter.target_spec.supported_types = []
891
+ converter.inference_input_type = tf.uint8 # or tf.int8
892
+ converter.inference_output_type = tf.uint8 # or tf.int8
893
+ converter.experimental_new_quantizer = True
894
+ if per_tensor:
895
+ converter._experimental_disable_per_channel = True
896
+ f = str(file).replace(".pt", "-int8.tflite")
897
+ if nms or agnostic_nms:
898
+ converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS)
899
+
900
+ tflite_model = converter.convert()
901
+ open(f, "wb").write(tflite_model)
902
+ return f, None
903
+
904
+
905
+ @try_export
906
+ def export_edgetpu(file, prefix=colorstr("Edge TPU:")):
907
+ """
908
+ Exports a YOLOv5 model to Edge TPU compatible TFLite format; requires Linux and Edge TPU compiler.
909
+
910
+ Args:
911
+ file (Path): Path to the YOLOv5 model file to be exported (.pt format).
912
+ prefix (str, optional): Prefix for logging messages. Defaults to colorstr("Edge TPU:").
913
+
914
+ Returns:
915
+ tuple[Path, None]: Path to the exported Edge TPU compatible TFLite model, None.
916
+
917
+ Raises:
918
+ AssertionError: If the system is not Linux.
919
+ subprocess.CalledProcessError: If any subprocess call to install or run the Edge TPU compiler fails.
920
+
921
+ Notes:
922
+ To use this function, ensure you have the Edge TPU compiler installed on your Linux system. You can find
923
+ installation instructions here: https://coral.ai/docs/edgetpu/compiler/.
924
+
925
+ Example:
926
+ ```python
927
+ from pathlib import Path
928
+ file = Path('yolov5s.pt')
929
+ export_edgetpu(file)
930
+ ```
931
+ """
932
+ cmd = "edgetpu_compiler --version"
933
+ help_url = "https://coral.ai/docs/edgetpu/compiler/"
934
+ assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}"
935
+ if subprocess.run(f"{cmd} > /dev/null 2>&1", shell=True).returncode != 0:
936
+ LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
937
+ sudo = subprocess.run("sudo --version >/dev/null", shell=True).returncode == 0 # sudo installed on system
938
+ for c in (
939
+ "curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
940
+ 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
941
+ "sudo apt-get update",
942
+ "sudo apt-get install edgetpu-compiler",
943
+ ):
944
+ subprocess.run(c if sudo else c.replace("sudo ", ""), shell=True, check=True)
945
+ ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]
946
+
947
+ LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
948
+ f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model
949
+ f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model
950
+
951
+ subprocess.run(
952
+ [
953
+ "edgetpu_compiler",
954
+ "-s",
955
+ "-d",
956
+ "-k",
957
+ "10",
958
+ "--out_dir",
959
+ str(file.parent),
960
+ f_tfl,
961
+ ],
962
+ check=True,
963
+ )
964
+ return f, None
965
+
966
+
967
+ @try_export
968
+ def export_tfjs(file, int8, prefix=colorstr("TensorFlow.js:")):
969
+ """
970
+ Convert a YOLOv5 model to TensorFlow.js format with optional uint8 quantization.
971
+
972
+ Args:
973
+ file (Path): Path to the YOLOv5 model file to be converted, typically having a ".pt" or ".onnx" extension.
974
+ int8 (bool): If True, applies uint8 quantization during the conversion process.
975
+ prefix (str): Optional prefix for logging messages, default is 'TensorFlow.js:' with color formatting.
976
+
977
+ Returns:
978
+ (str, None): Tuple containing the output directory path as a string and None.
979
+
980
+ Notes:
981
+ - This function requires the `tensorflowjs` package. Install it using:
982
+ ```shell
983
+ pip install tensorflowjs
984
+ ```
985
+ - The converted TensorFlow.js model will be saved in a directory with the "_web_model" suffix appended to the original file name.
986
+ - The conversion involves running shell commands that invoke the TensorFlow.js converter tool.
987
+
988
+ Example:
989
+ ```python
990
+ from pathlib import Path
991
+ file = Path('yolov5.onnx')
992
+ export_tfjs(file, int8=False)
993
+ ```
994
+ """
995
+ check_requirements("tensorflowjs")
996
+ import tensorflowjs as tfjs
997
+
998
+ LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
999
+ f = str(file).replace(".pt", "_web_model") # js dir
1000
+ f_pb = file.with_suffix(".pb") # *.pb path
1001
+ f_json = f"{f}/model.json" # *.json path
1002
+
1003
+ args = [
1004
+ "tensorflowjs_converter",
1005
+ "--input_format=tf_frozen_model",
1006
+ "--quantize_uint8" if int8 else "",
1007
+ "--output_node_names=Identity,Identity_1,Identity_2,Identity_3",
1008
+ str(f_pb),
1009
+ f,
1010
+ ]
1011
+ subprocess.run([arg for arg in args if arg], check=True)
1012
+
1013
+ json = Path(f_json).read_text()
1014
+ with open(f_json, "w") as j: # sort JSON Identity_* in ascending order
1015
+ subst = re.sub(
1016
+ r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
1017
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
1018
+ r'"Identity.?.?": {"name": "Identity.?.?"}, '
1019
+ r'"Identity.?.?": {"name": "Identity.?.?"}}}',
1020
+ r'{"outputs": {"Identity": {"name": "Identity"}, '
1021
+ r'"Identity_1": {"name": "Identity_1"}, '
1022
+ r'"Identity_2": {"name": "Identity_2"}, '
1023
+ r'"Identity_3": {"name": "Identity_3"}}}',
1024
+ json,
1025
+ )
1026
+ j.write(subst)
1027
+ return f, None
1028
+
1029
+
1030
+ def add_tflite_metadata(file, metadata, num_outputs):
1031
+ """
1032
+ Adds metadata to a TensorFlow Lite (TFLite) model file, supporting multiple outputs according to TensorFlow
1033
+ guidelines.
1034
+
1035
+ Args:
1036
+ file (str): Path to the TFLite model file to which metadata will be added.
1037
+ metadata (dict): Metadata information to be added to the model, structured as required by the TFLite metadata schema.
1038
+ Common keys include "name", "description", "version", "author", and "license".
1039
+ num_outputs (int): Number of output tensors the model has, used to configure the metadata properly.
1040
+
1041
+ Returns:
1042
+ None
1043
+
1044
+ Example:
1045
+ ```python
1046
+ metadata = {
1047
+ "name": "yolov5",
1048
+ "description": "YOLOv5 object detection model",
1049
+ "version": "1.0",
1050
+ "author": "Ultralytics",
1051
+ "license": "Apache License 2.0"
1052
+ }
1053
+ add_tflite_metadata("model.tflite", metadata, num_outputs=4)
1054
+ ```
1055
+
1056
+ Note:
1057
+ TFLite metadata can include information such as model name, version, author, and other relevant details.
1058
+ For more details on the structure of the metadata, refer to TensorFlow Lite
1059
+ [metadata guidelines](https://www.tensorflow.org/lite/models/convert/metadata).
1060
+ """
1061
+ with contextlib.suppress(ImportError):
1062
+ # check_requirements('tflite_support')
1063
+ from tflite_support import flatbuffers
1064
+ from tflite_support import metadata as _metadata
1065
+ from tflite_support import metadata_schema_py_generated as _metadata_fb
1066
+
1067
+ tmp_file = Path("/tmp/meta.txt")
1068
+ with open(tmp_file, "w") as meta_f:
1069
+ meta_f.write(str(metadata))
1070
+
1071
+ model_meta = _metadata_fb.ModelMetadataT()
1072
+ label_file = _metadata_fb.AssociatedFileT()
1073
+ label_file.name = tmp_file.name
1074
+ model_meta.associatedFiles = [label_file]
1075
+
1076
+ subgraph = _metadata_fb.SubGraphMetadataT()
1077
+ subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()]
1078
+ subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs
1079
+ model_meta.subgraphMetadata = [subgraph]
1080
+
1081
+ b = flatbuffers.Builder(0)
1082
+ b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
1083
+ metadata_buf = b.Output()
1084
+
1085
+ populator = _metadata.MetadataPopulator.with_model_file(file)
1086
+ populator.load_metadata_buffer(metadata_buf)
1087
+ populator.load_associated_files([str(tmp_file)])
1088
+ populator.populate()
1089
+ tmp_file.unlink()
1090
+
1091
+
1092
+ def pipeline_coreml(model, im, file, names, y, mlmodel, prefix=colorstr("CoreML Pipeline:")):
1093
+ """
1094
+ Convert a PyTorch YOLOv5 model to CoreML format with Non-Maximum Suppression (NMS), handling different input/output
1095
+ shapes, and saving the model.
1096
+
1097
+ Args:
1098
+ model (torch.nn.Module): The YOLOv5 PyTorch model to be converted.
1099
+ im (torch.Tensor): Example input tensor with shape (N, C, H, W), where N is the batch size, C is the number of channels,
1100
+ H is the height, and W is the width.
1101
+ file (Path): Path to save the converted CoreML model.
1102
+ names (dict[int, str]): Dictionary mapping class indices to class names.
1103
+ y (torch.Tensor): Output tensor from the PyTorch model's forward pass.
1104
+ mlmodel (bool): Flag indicating whether to export as older *.mlmodel format (default is False).
1105
+ prefix (str): Custom prefix for logging messages.
1106
+
1107
+ Returns:
1108
+ (Path): Path to the saved CoreML model (.mlmodel).
1109
+
1110
+ Raises:
1111
+ AssertionError: If the number of class names does not match the number of classes in the model.
1112
+
1113
+ Notes:
1114
+ - This function requires `coremltools` to be installed.
1115
+ - Running this function on a non-macOS environment might not support some features.
1116
+ - Flexible input shapes and additional NMS options can be customized within the function.
1117
+
1118
+ Examples:
1119
+ ```python
1120
+ from pathlib import Path
1121
+ import torch
1122
+
1123
+ model = torch.load('yolov5s.pt') # Load YOLOv5 model
1124
+ im = torch.zeros((1, 3, 640, 640)) # Example input tensor
1125
+
1126
+ names = {0: "person", 1: "bicycle", 2: "car", ...} # Define class names
1127
+
1128
+ y = model(im) # Perform forward pass to get model output
1129
+
1130
+ output_file = Path('yolov5s.mlmodel') # Convert to CoreML
1131
+ pipeline_coreml(model, im, output_file, names, y)
1132
+ ```
1133
+ """
1134
+ import coremltools as ct
1135
+ from PIL import Image
1136
+
1137
+ if mlmodel:
1138
+ f = file.with_suffix(".mlmodel") # filename
1139
+ else:
1140
+ f = file.with_suffix(".mlpackage") # filename
1141
+
1142
+ print(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1143
+ batch_size, ch, h, w = list(im.shape) # BCHW
1144
+ t = time.time()
1145
+
1146
+ # YOLOv5 Output shapes
1147
+ spec = model.get_spec()
1148
+ out0, out1 = iter(spec.description.output)
1149
+ if platform.system() == "Darwin":
1150
+ img = Image.new("RGB", (w, h)) # img(192 width, 320 height)
1151
+ # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection
1152
+ out = model.predict({"image": img})
1153
+ out0_shape, out1_shape = out[out0.name].shape, out[out1.name].shape
1154
+ else: # linux and windows can not run model.predict(), get sizes from pytorch output y
1155
+ s = tuple(y[0].shape)
1156
+ out0_shape, out1_shape = (s[1], s[2] - 5), (s[1], 4) # (3780, 80), (3780, 4)
1157
+
1158
+ # Checks
1159
+ nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
1160
+ na, nc = out0_shape
1161
+ # na, nc = out0.type.multiArrayType.shape # number anchors, classes
1162
+ assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
1163
+
1164
+ # Define output shapes (missing)
1165
+ out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
1166
+ out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
1167
+ # spec.neuralNetwork.preprocessing[0].featureName = '0'
1168
+
1169
+ # Flexible input shapes
1170
+ # from coremltools.models.neural_network import flexible_shape_utils
1171
+ # s = [] # shapes
1172
+ # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192))
1173
+ # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width)
1174
+ # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s)
1175
+ # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges
1176
+ # r.add_height_range((192, 640))
1177
+ # r.add_width_range((192, 640))
1178
+ # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r)
1179
+
1180
+ # Print
1181
+ print(spec.description)
1182
+
1183
+ # Model from spec
1184
+ weights_dir = None
1185
+ if mlmodel:
1186
+ weights_dir = None
1187
+ else:
1188
+ weights_dir = str(f / "Data/com.apple.CoreML/weights")
1189
+ model = ct.models.MLModel(spec, weights_dir=weights_dir)
1190
+
1191
+ # 3. Create NMS protobuf
1192
+ nms_spec = ct.proto.Model_pb2.Model()
1193
+ nms_spec.specificationVersion = 5
1194
+ for i in range(2):
1195
+ decoder_output = model._spec.description.output[i].SerializeToString()
1196
+ nms_spec.description.input.add()
1197
+ nms_spec.description.input[i].ParseFromString(decoder_output)
1198
+ nms_spec.description.output.add()
1199
+ nms_spec.description.output[i].ParseFromString(decoder_output)
1200
+
1201
+ nms_spec.description.output[0].name = "confidence"
1202
+ nms_spec.description.output[1].name = "coordinates"
1203
+
1204
+ output_sizes = [nc, 4]
1205
+ for i in range(2):
1206
+ ma_type = nms_spec.description.output[i].type.multiArrayType
1207
+ ma_type.shapeRange.sizeRanges.add()
1208
+ ma_type.shapeRange.sizeRanges[0].lowerBound = 0
1209
+ ma_type.shapeRange.sizeRanges[0].upperBound = -1
1210
+ ma_type.shapeRange.sizeRanges.add()
1211
+ ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
1212
+ ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
1213
+ del ma_type.shape[:]
1214
+
1215
+ nms = nms_spec.nonMaximumSuppression
1216
+ nms.confidenceInputFeatureName = out0.name # 1x507x80
1217
+ nms.coordinatesInputFeatureName = out1.name # 1x507x4
1218
+ nms.confidenceOutputFeatureName = "confidence"
1219
+ nms.coordinatesOutputFeatureName = "coordinates"
1220
+ nms.iouThresholdInputFeatureName = "iouThreshold"
1221
+ nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1222
+ nms.iouThreshold = 0.45
1223
+ nms.confidenceThreshold = 0.25
1224
+ nms.pickTop.perClass = True
1225
+ nms.stringClassLabels.vector.extend(names.values())
1226
+ nms_model = ct.models.MLModel(nms_spec)
1227
+
1228
+ # 4. Pipeline models together
1229
+ pipeline = ct.models.pipeline.Pipeline(
1230
+ input_features=[
1231
+ ("image", ct.models.datatypes.Array(3, ny, nx)),
1232
+ ("iouThreshold", ct.models.datatypes.Double()),
1233
+ ("confidenceThreshold", ct.models.datatypes.Double()),
1234
+ ],
1235
+ output_features=["confidence", "coordinates"],
1236
+ )
1237
+ pipeline.add_model(model)
1238
+ pipeline.add_model(nms_model)
1239
+
1240
+ # Correct datatypes
1241
+ pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
1242
+ pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
1243
+ pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
1244
+
1245
+ # Update metadata
1246
+ pipeline.spec.specificationVersion = 5
1247
+ pipeline.spec.description.metadata.versionString = "https://github.com/ultralytics/yolov5"
1248
+ pipeline.spec.description.metadata.shortDescription = "https://github.com/ultralytics/yolov5"
1249
+ pipeline.spec.description.metadata.author = "glenn.jocher@ultralytics.com"
1250
+ pipeline.spec.description.metadata.license = "https://github.com/ultralytics/yolov5/blob/master/LICENSE"
1251
+ pipeline.spec.description.metadata.userDefined.update(
1252
+ {
1253
+ "classes": ",".join(names.values()),
1254
+ "iou_threshold": str(nms.iouThreshold),
1255
+ "confidence_threshold": str(nms.confidenceThreshold),
1256
+ }
1257
+ )
1258
+
1259
+ # Save the model
1260
+ model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
1261
+ model.input_description["image"] = "Input image"
1262
+ model.input_description["iouThreshold"] = f"(optional) IOU Threshold override (default: {nms.iouThreshold})"
1263
+ model.input_description["confidenceThreshold"] = (
1264
+ f"(optional) Confidence Threshold override (default: {nms.confidenceThreshold})"
1265
+ )
1266
+ model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
1267
+ model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
1268
+ model.save(f) # pipelined
1269
+ print(f"{prefix} pipeline success ({time.time() - t:.2f}s), saved as {f} ({file_size(f):.1f} MB)")
1270
+
1271
+
1272
+ @smart_inference_mode()
1273
+ def run(
1274
+ data=ROOT / "data/coco128.yaml", # 'dataset.yaml path'
1275
+ weights=ROOT / "yolov5s.pt", # weights path
1276
+ imgsz=(640, 640), # image (height, width)
1277
+ batch_size=1, # batch size
1278
+ device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu
1279
+ include=("torchscript", "onnx"), # include formats
1280
+ half=False, # FP16 half-precision export
1281
+ inplace=False, # set YOLOv5 Detect() inplace=True
1282
+ keras=False, # use Keras
1283
+ optimize=False, # TorchScript: optimize for mobile
1284
+ int8=False, # CoreML/TF INT8 quantization
1285
+ per_tensor=False, # TF per tensor quantization
1286
+ dynamic=False, # ONNX/TF/TensorRT: dynamic axes
1287
+ simplify=False, # ONNX: simplify model
1288
+ mlmodel=False, # CoreML: Export in *.mlmodel format
1289
+ opset=12, # ONNX: opset version
1290
+ verbose=False, # TensorRT: verbose log
1291
+ workspace=4, # TensorRT: workspace size (GB)
1292
+ nms=False, # TF: add NMS to model
1293
+ agnostic_nms=False, # TF: add agnostic NMS to model
1294
+ topk_per_class=100, # TF.js NMS: topk per class to keep
1295
+ topk_all=100, # TF.js NMS: topk for all classes to keep
1296
+ iou_thres=0.45, # TF.js NMS: IoU threshold
1297
+ conf_thres=0.25, # TF.js NMS: confidence threshold
1298
+ ):
1299
+ """
1300
+ Exports a YOLOv5 model to specified formats including ONNX, TensorRT, CoreML, and TensorFlow.
1301
+
1302
+ Args:
1303
+ data (str | Path): Path to the dataset YAML configuration file. Default is 'data/coco128.yaml'.
1304
+ weights (str | Path): Path to the pretrained model weights file. Default is 'yolov5s.pt'.
1305
+ imgsz (tuple): Image size as (height, width). Default is (640, 640).
1306
+ batch_size (int): Batch size for exporting the model. Default is 1.
1307
+ device (str): Device to run the export on, e.g., '0' for GPU, 'cpu' for CPU. Default is 'cpu'.
1308
+ include (tuple): Formats to include in the export. Default is ('torchscript', 'onnx').
1309
+ half (bool): Flag to export model with FP16 half-precision. Default is False.
1310
+ inplace (bool): Set the YOLOv5 Detect() module inplace=True. Default is False.
1311
+ keras (bool): Flag to use Keras for TensorFlow SavedModel export. Default is False.
1312
+ optimize (bool): Optimize TorchScript model for mobile deployment. Default is False.
1313
+ int8 (bool): Apply INT8 quantization for CoreML or TensorFlow models. Default is False.
1314
+ per_tensor (bool): Apply per tensor quantization for TensorFlow models. Default is False.
1315
+ dynamic (bool): Enable dynamic axes for ONNX, TensorFlow, or TensorRT exports. Default is False.
1316
+ simplify (bool): Simplify the ONNX model during export. Default is False.
1317
+ opset (int): ONNX opset version. Default is 12.
1318
+ verbose (bool): Enable verbose logging for TensorRT export. Default is False.
1319
+ workspace (int): TensorRT workspace size in GB. Default is 4.
1320
+ nms (bool): Add non-maximum suppression (NMS) to the TensorFlow model. Default is False.
1321
+ agnostic_nms (bool): Add class-agnostic NMS to the TensorFlow model. Default is False.
1322
+ topk_per_class (int): Top-K boxes per class to keep for TensorFlow.js NMS. Default is 100.
1323
+ topk_all (int): Top-K boxes for all classes to keep for TensorFlow.js NMS. Default is 100.
1324
+ iou_thres (float): IoU threshold for NMS. Default is 0.45.
1325
+ conf_thres (float): Confidence threshold for NMS. Default is 0.25.
1326
+ mlmodel (bool): Flag to use *.mlmodel for CoreML export. Default is False.
1327
+
1328
+ Returns:
1329
+ None
1330
+
1331
+ Notes:
1332
+ - Model export is based on the specified formats in the 'include' argument.
1333
+ - Be cautious of combinations where certain flags are mutually exclusive, such as `--half` and `--dynamic`.
1334
+
1335
+ Example:
1336
+ ```python
1337
+ run(
1338
+ data="data/coco128.yaml",
1339
+ weights="yolov5s.pt",
1340
+ imgsz=(640, 640),
1341
+ batch_size=1,
1342
+ device="cpu",
1343
+ include=("torchscript", "onnx"),
1344
+ half=False,
1345
+ inplace=False,
1346
+ keras=False,
1347
+ optimize=False,
1348
+ int8=False,
1349
+ per_tensor=False,
1350
+ dynamic=False,
1351
+ simplify=False,
1352
+ opset=12,
1353
+ verbose=False,
1354
+ mlmodel=False,
1355
+ workspace=4,
1356
+ nms=False,
1357
+ agnostic_nms=False,
1358
+ topk_per_class=100,
1359
+ topk_all=100,
1360
+ iou_thres=0.45,
1361
+ conf_thres=0.25,
1362
+ )
1363
+ ```
1364
+ """
1365
+ t = time.time()
1366
+ include = [x.lower() for x in include] # to lowercase
1367
+ fmts = tuple(export_formats()["Argument"][1:]) # --include arguments
1368
+ flags = [x in include for x in fmts]
1369
+ assert sum(flags) == len(include), f"ERROR: Invalid --include {include}, valid --include arguments are {fmts}"
1370
+ jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans
1371
+ file = Path(url2file(weights) if str(weights).startswith(("http:/", "https:/")) else weights) # PyTorch weights
1372
+
1373
+ # Load PyTorch model
1374
+ device = select_device(device)
1375
+ if half:
1376
+ assert device.type != "cpu" or coreml, "--half only compatible with GPU export, i.e. use --device 0"
1377
+ assert not dynamic, "--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both"
1378
+ model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model
1379
+
1380
+ # Checks
1381
+ imgsz *= 2 if len(imgsz) == 1 else 1 # expand
1382
+ if optimize:
1383
+ assert device.type == "cpu", "--optimize not compatible with cuda devices, i.e. use --device cpu"
1384
+
1385
+ # Input
1386
+ gs = int(max(model.stride)) # grid size (max stride)
1387
+ imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
1388
+ im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
1389
+
1390
+ # Update model
1391
+ model.eval()
1392
+ for k, m in model.named_modules():
1393
+ if isinstance(m, Detect):
1394
+ m.inplace = inplace
1395
+ m.dynamic = dynamic
1396
+ m.export = True
1397
+
1398
+ for _ in range(2):
1399
+ y = model(im) # dry runs
1400
+ if half and not coreml:
1401
+ im, model = im.half(), model.half() # to FP16
1402
+ shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape
1403
+ metadata = {"stride": int(max(model.stride)), "names": model.names} # model metadata
1404
+ LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)")
1405
+
1406
+ # Exports
1407
+ f = [""] * len(fmts) # exported filenames
1408
+ warnings.filterwarnings(action="ignore", category=torch.jit.TracerWarning) # suppress TracerWarning
1409
+ if jit: # TorchScript
1410
+ f[0], _ = export_torchscript(model, im, file, optimize)
1411
+ if engine: # TensorRT required before ONNX
1412
+ f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose)
1413
+ if onnx or xml: # OpenVINO requires ONNX
1414
+ f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify)
1415
+ if xml: # OpenVINO
1416
+ f[3], _ = export_openvino(file, metadata, half, int8, data)
1417
+ if coreml: # CoreML
1418
+ f[4], ct_model = export_coreml(model, im, file, int8, half, nms, mlmodel)
1419
+ if nms:
1420
+ pipeline_coreml(ct_model, im, file, model.names, y, mlmodel)
1421
+ if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats
1422
+ assert not tflite or not tfjs, "TFLite and TF.js models must be exported separately, please pass only one type."
1423
+ assert not isinstance(model, ClassificationModel), "ClassificationModel export to TF formats not yet supported."
1424
+ f[5], s_model = export_saved_model(
1425
+ model.cpu(),
1426
+ im,
1427
+ file,
1428
+ dynamic,
1429
+ tf_nms=nms or agnostic_nms or tfjs,
1430
+ agnostic_nms=agnostic_nms or tfjs,
1431
+ topk_per_class=topk_per_class,
1432
+ topk_all=topk_all,
1433
+ iou_thres=iou_thres,
1434
+ conf_thres=conf_thres,
1435
+ keras=keras,
1436
+ )
1437
+ if pb or tfjs: # pb prerequisite to tfjs
1438
+ f[6], _ = export_pb(s_model, file)
1439
+ if tflite or edgetpu:
1440
+ f[7], _ = export_tflite(
1441
+ s_model, im, file, int8 or edgetpu, per_tensor, data=data, nms=nms, agnostic_nms=agnostic_nms
1442
+ )
1443
+ if edgetpu:
1444
+ f[8], _ = export_edgetpu(file)
1445
+ add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs))
1446
+ if tfjs:
1447
+ f[9], _ = export_tfjs(file, int8)
1448
+ if paddle: # PaddlePaddle
1449
+ f[10], _ = export_paddle(model, im, file, metadata)
1450
+
1451
+ # Finish
1452
+ f = [str(x) for x in f if x] # filter out '' and None
1453
+ if any(f):
1454
+ cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type
1455
+ det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel)
1456
+ dir = Path("segment" if seg else "classify" if cls else "")
1457
+ h = "--half" if half else "" # --half FP16 inference arg
1458
+ s = (
1459
+ "# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference"
1460
+ if cls
1461
+ else "# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference"
1462
+ if seg
1463
+ else ""
1464
+ )
1465
+ LOGGER.info(
1466
+ f'\nExport complete ({time.time() - t:.1f}s)'
1467
+ f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
1468
+ f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}"
1469
+ f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}"
1470
+ f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}"
1471
+ f'\nVisualize: https://netron.app'
1472
+ )
1473
+ return f # return list of exported files/dirs
1474
+
1475
+
1476
+ def parse_opt(known=False):
1477
+ """
1478
+ Parse command-line options for YOLOv5 model export configurations.
1479
+
1480
+ Args:
1481
+ known (bool): If True, uses `argparse.ArgumentParser.parse_known_args`; otherwise, uses `argparse.ArgumentParser.parse_args`.
1482
+ Default is False.
1483
+
1484
+ Returns:
1485
+ argparse.Namespace: Object containing parsed command-line arguments.
1486
+
1487
+ Example:
1488
+ ```python
1489
+ opts = parse_opt()
1490
+ print(opts.data)
1491
+ print(opts.weights)
1492
+ ```
1493
+ """
1494
+ parser = argparse.ArgumentParser()
1495
+ parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path")
1496
+ parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)")
1497
+ parser.add_argument("--imgsz", "--img", "--img-size", nargs="+", type=int, default=[640, 640], help="image (h, w)")
1498
+ parser.add_argument("--batch-size", type=int, default=1, help="batch size")
1499
+ parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
1500
+ parser.add_argument("--half", action="store_true", help="FP16 half-precision export")
1501
+ parser.add_argument("--inplace", action="store_true", help="set YOLOv5 Detect() inplace=True")
1502
+ parser.add_argument("--keras", action="store_true", help="TF: use Keras")
1503
+ parser.add_argument("--optimize", action="store_true", help="TorchScript: optimize for mobile")
1504
+ parser.add_argument("--int8", action="store_true", help="CoreML/TF/OpenVINO INT8 quantization")
1505
+ parser.add_argument("--per-tensor", action="store_true", help="TF per-tensor quantization")
1506
+ parser.add_argument("--dynamic", action="store_true", help="ONNX/TF/TensorRT: dynamic axes")
1507
+ parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model")
1508
+ parser.add_argument("--mlmodel", action="store_true", help="CoreML: Export in *.mlmodel format")
1509
+ parser.add_argument("--opset", type=int, default=17, help="ONNX: opset version")
1510
+ parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log")
1511
+ parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)")
1512
+ parser.add_argument("--nms", action="store_true", help="TF: add NMS to model")
1513
+ parser.add_argument("--agnostic-nms", action="store_true", help="TF: add agnostic NMS to model")
1514
+ parser.add_argument("--topk-per-class", type=int, default=100, help="TF.js NMS: topk per class to keep")
1515
+ parser.add_argument("--topk-all", type=int, default=100, help="TF.js NMS: topk for all classes to keep")
1516
+ parser.add_argument("--iou-thres", type=float, default=0.45, help="TF.js NMS: IoU threshold")
1517
+ parser.add_argument("--conf-thres", type=float, default=0.25, help="TF.js NMS: confidence threshold")
1518
+ parser.add_argument(
1519
+ "--include",
1520
+ nargs="+",
1521
+ default=["torchscript"],
1522
+ help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle",
1523
+ )
1524
+ opt = parser.parse_known_args()[0] if known else parser.parse_args()
1525
+ print_args(vars(opt))
1526
+ return opt
1527
+
1528
+
1529
+ def main(opt):
1530
+ """Run(**vars(opt)) # Execute the run function with parsed options."""
1531
+ for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
1532
+ run(**vars(opt))
1533
+
1534
+
1535
+ if __name__ == "__main__":
1536
+ opt = parse_opt()
1537
+ main(opt)
iteach_toolkit/DHYOLO/hubconf.py ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ultralytics YOLOv5 🚀, AGPL-3.0 license
2
+ """
3
+ PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5
4
+
5
+ Usage:
6
+ import torch
7
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model
8
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch
9
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model
10
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo
11
+ """
12
+
13
+ import torch
14
+
15
+
16
+ def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
17
+ """
18
+ Creates or loads a YOLOv5 model, with options for pretrained weights and model customization.
19
+
20
+ Args:
21
+ name (str): Model name (e.g., 'yolov5s') or path to the model checkpoint (e.g., 'path/to/best.pt').
22
+ pretrained (bool, optional): If True, loads pretrained weights into the model. Defaults to True.
23
+ channels (int, optional): Number of input channels the model expects. Defaults to 3.
24
+ classes (int, optional): Number of classes the model is expected to detect. Defaults to 80.
25
+ autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper for various input formats. Defaults to True.
26
+ verbose (bool, optional): If True, prints detailed information during the model creation/loading process. Defaults to True.
27
+ device (str | torch.device | None, optional): Device to use for model parameters (e.g., 'cpu', 'cuda'). If None, selects
28
+ the best available device. Defaults to None.
29
+
30
+ Returns:
31
+ (DetectMultiBackend | AutoShape): The loaded YOLOv5 model, potentially wrapped with AutoShape if specified.
32
+
33
+ Examples:
34
+ ```python
35
+ import torch
36
+ from ultralytics import _create
37
+
38
+ # Load an official YOLOv5s model with pretrained weights
39
+ model = _create('yolov5s')
40
+
41
+ # Load a custom model from a local checkpoint
42
+ model = _create('path/to/custom_model.pt', pretrained=False)
43
+
44
+ # Load a model with specific input channels and classes
45
+ model = _create('yolov5s', channels=1, classes=10)
46
+ ```
47
+
48
+ Notes:
49
+ For more information on model loading and customization, visit the
50
+ [YOLOv5 PyTorch Hub Documentation](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading).
51
+ """
52
+ from pathlib import Path
53
+
54
+ from models.common import AutoShape, DetectMultiBackend
55
+ from models.experimental import attempt_load
56
+ from models.yolo import ClassificationModel, DetectionModel, SegmentationModel
57
+ from utils.downloads import attempt_download
58
+ from utils.general import LOGGER, ROOT, check_requirements, intersect_dicts, logging
59
+ from utils.torch_utils import select_device
60
+
61
+ if not verbose:
62
+ LOGGER.setLevel(logging.WARNING)
63
+ check_requirements(ROOT / "requirements.txt", exclude=("opencv-python", "tensorboard", "thop"))
64
+ name = Path(name)
65
+ path = name.with_suffix(".pt") if name.suffix == "" and not name.is_dir() else name # checkpoint path
66
+ try:
67
+ device = select_device(device)
68
+ if pretrained and channels == 3 and classes == 80:
69
+ try:
70
+ model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model
71
+ if autoshape:
72
+ if model.pt and isinstance(model.model, ClassificationModel):
73
+ LOGGER.warning(
74
+ "WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. "
75
+ "You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224)."
76
+ )
77
+ elif model.pt and isinstance(model.model, SegmentationModel):
78
+ LOGGER.warning(
79
+ "WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. "
80
+ "You will not be able to run inference with this model."
81
+ )
82
+ else:
83
+ model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS
84
+ except Exception:
85
+ model = attempt_load(path, device=device, fuse=False) # arbitrary model
86
+ else:
87
+ cfg = list((Path(__file__).parent / "models").rglob(f"{path.stem}.yaml"))[0] # model.yaml path
88
+ model = DetectionModel(cfg, channels, classes) # create model
89
+ if pretrained:
90
+ ckpt = torch.load(attempt_download(path), map_location=device) # load
91
+ csd = ckpt["model"].float().state_dict() # checkpoint state_dict as FP32
92
+ csd = intersect_dicts(csd, model.state_dict(), exclude=["anchors"]) # intersect
93
+ model.load_state_dict(csd, strict=False) # load
94
+ if len(ckpt["model"].names) == classes:
95
+ model.names = ckpt["model"].names # set class names attribute
96
+ if not verbose:
97
+ LOGGER.setLevel(logging.INFO) # reset to default
98
+ return model.to(device)
99
+
100
+ except Exception as e:
101
+ help_url = "https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading"
102
+ s = f"{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help."
103
+ raise Exception(s) from e
104
+
105
+
106
+ def custom(path="path/to/model.pt", autoshape=True, _verbose=True, device=None):
107
+ """
108
+ Loads a custom or local YOLOv5 model from a given path with optional autoshaping and device specification.
109
+
110
+ Args:
111
+ path (str): Path to the custom model file (e.g., 'path/to/model.pt').
112
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model if True, enabling compatibility with various input
113
+ types (default is True).
114
+ _verbose (bool): If True, prints all informational messages to the screen; otherwise, operates silently
115
+ (default is True).
116
+ device (str | torch.device | None): Device to load the model on, e.g., 'cpu', 'cuda', torch.device('cuda:0'), etc.
117
+ (default is None, which automatically selects the best available device).
118
+
119
+ Returns:
120
+ torch.nn.Module: A YOLOv5 model loaded with the specified parameters.
121
+
122
+ Notes:
123
+ For more details on loading models from PyTorch Hub:
124
+ https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading
125
+
126
+ Examples:
127
+ ```python
128
+ # Load model from a given path with autoshape enabled on the best available device
129
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
130
+
131
+ # Load model from a local path without autoshape on the CPU device
132
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local', autoshape=False, device='cpu')
133
+ ```
134
+ """
135
+ return _create(path, autoshape=autoshape, verbose=_verbose, device=device)
136
+
137
+
138
+ def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
139
+ """
140
+ Instantiates the YOLOv5-nano model with options for pretraining, input channels, class count, autoshaping,
141
+ verbosity, and device.
142
+
143
+ Args:
144
+ pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
145
+ channels (int): Number of input channels for the model. Defaults to 3.
146
+ classes (int): Number of classes for object detection. Defaults to 80.
147
+ autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper to the model for various formats (file/URI/PIL/
148
+ cv2/np) and non-maximum suppression (NMS) during inference. Defaults to True.
149
+ _verbose (bool): If True, prints detailed information to the screen. Defaults to True.
150
+ device (str | torch.device | None): Specifies the device to use for model computation. If None, uses the best device
151
+ available (i.e., GPU if available, otherwise CPU). Defaults to None.
152
+
153
+ Returns:
154
+ DetectionModel | ClassificationModel | SegmentationModel: The instantiated YOLOv5-nano model, potentially with
155
+ pretrained weights and autoshaping applied.
156
+
157
+ Notes:
158
+ For further details on loading models from PyTorch Hub, refer to [PyTorch Hub models](https://pytorch.org/hub/
159
+ ultralytics_yolov5).
160
+
161
+ Examples:
162
+ ```python
163
+ import torch
164
+ from ultralytics import yolov5n
165
+
166
+ # Load the YOLOv5-nano model with defaults
167
+ model = yolov5n()
168
+
169
+ # Load the YOLOv5-nano model with a specific device
170
+ model = yolov5n(device='cuda')
171
+ ```
172
+ """
173
+ return _create("yolov5n", pretrained, channels, classes, autoshape, _verbose, device)
174
+
175
+
176
+ def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
177
+ """
178
+ Create a YOLOv5-small (yolov5s) model with options for pretraining, input channels, class count, autoshaping,
179
+ verbosity, and device configuration.
180
+
181
+ Args:
182
+ pretrained (bool, optional): Flag to load pretrained weights into the model. Defaults to True.
183
+ channels (int, optional): Number of input channels. Defaults to 3.
184
+ classes (int, optional): Number of model classes. Defaults to 80.
185
+ autoshape (bool, optional): Whether to wrap the model with YOLOv5's .autoshape() for handling various input formats.
186
+ Defaults to True.
187
+ _verbose (bool, optional): Flag to print detailed information regarding model loading. Defaults to True.
188
+ device (str | torch.device | None, optional): Device to use for model computation, can be 'cpu', 'cuda', or
189
+ torch.device instances. If None, automatically selects the best available device. Defaults to None.
190
+
191
+ Returns:
192
+ torch.nn.Module: The YOLOv5-small model configured and loaded according to the specified parameters.
193
+
194
+ Example:
195
+ ```python
196
+ import torch
197
+
198
+ # Load the official YOLOv5-small model with pretrained weights
199
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
200
+
201
+ # Load the YOLOv5-small model from a specific branch
202
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s')
203
+
204
+ # Load a custom YOLOv5-small model from a local checkpoint
205
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt')
206
+
207
+ # Load a local YOLOv5-small model specifying source as local repository
208
+ model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local')
209
+ ```
210
+
211
+ Notes:
212
+ For more details on model loading and customization, visit
213
+ the [YOLOv5 PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5).
214
+ """
215
+ return _create("yolov5s", pretrained, channels, classes, autoshape, _verbose, device)
216
+
217
+
218
+ def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
219
+ """
220
+ Instantiates the YOLOv5-medium model with customizable pretraining, channel count, class count, autoshaping,
221
+ verbosity, and device.
222
+
223
+ Args:
224
+ pretrained (bool, optional): Whether to load pretrained weights into the model. Default is True.
225
+ channels (int, optional): Number of input channels. Default is 3.
226
+ classes (int, optional): Number of model classes. Default is 80.
227
+ autoshape (bool, optional): Apply YOLOv5 .autoshape() wrapper to the model for handling various input formats.
228
+ Default is True.
229
+ _verbose (bool, optional): Whether to print detailed information to the screen. Default is True.
230
+ device (str | torch.device | None, optional): Device specification to use for model parameters (e.g., 'cpu', 'cuda').
231
+ Default is None.
232
+
233
+ Returns:
234
+ torch.nn.Module: The instantiated YOLOv5-medium model.
235
+
236
+ Usage Example:
237
+ ```python
238
+ import torch
239
+
240
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5m') # Load YOLOv5-medium from Ultralytics repository
241
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5m') # Load from the master branch
242
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5m.pt') # Load a custom/local YOLOv5-medium model
243
+ model = torch.hub.load('.', 'custom', 'yolov5m.pt', source='local') # Load from a local repository
244
+ ```
245
+
246
+ For more information, visit https://pytorch.org/hub/ultralytics_yolov5.
247
+ """
248
+ return _create("yolov5m", pretrained, channels, classes, autoshape, _verbose, device)
249
+
250
+
251
+ def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
252
+ """
253
+ Creates YOLOv5-large model with options for pretraining, channels, classes, autoshaping, verbosity, and device
254
+ selection.
255
+
256
+ Args:
257
+ pretrained (bool): Load pretrained weights into the model. Default is True.
258
+ channels (int): Number of input channels. Default is 3.
259
+ classes (int): Number of model classes. Default is 80.
260
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to model. Default is True.
261
+ _verbose (bool): Print all information to screen. Default is True.
262
+ device (str | torch.device | None): Device to use for model parameters, e.g., 'cpu', 'cuda', or a torch.device instance.
263
+ Default is None.
264
+
265
+ Returns:
266
+ YOLOv5 model (torch.nn.Module): The YOLOv5-large model instantiated with specified configurations and possibly
267
+ pretrained weights.
268
+
269
+ Examples:
270
+ ```python
271
+ import torch
272
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5l')
273
+ ```
274
+
275
+ Notes:
276
+ For additional details, refer to the PyTorch Hub models documentation:
277
+ https://pytorch.org/hub/ultralytics_yolov5
278
+ """
279
+ return _create("yolov5l", pretrained, channels, classes, autoshape, _verbose, device)
280
+
281
+
282
+ def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
283
+ """
284
+ Perform object detection using the YOLOv5-xlarge model with options for pretraining, input channels, class count,
285
+ autoshaping, verbosity, and device specification.
286
+
287
+ Args:
288
+ pretrained (bool): If True, loads pretrained weights into the model. Defaults to True.
289
+ channels (int): Number of input channels for the model. Defaults to 3.
290
+ classes (int): Number of model classes for object detection. Defaults to 80.
291
+ autoshape (bool): If True, applies the YOLOv5 .autoshape() wrapper for handling different input formats. Defaults to
292
+ True.
293
+ _verbose (bool): If True, prints detailed information during model loading. Defaults to True.
294
+ device (str | torch.device | None): Device specification for computing the model, e.g., 'cpu', 'cuda:0', torch.device('cuda').
295
+ Defaults to None.
296
+
297
+ Returns:
298
+ torch.nn.Module: The YOLOv5-xlarge model loaded with the specified parameters, optionally with pretrained weights and
299
+ autoshaping applied.
300
+
301
+ Example:
302
+ ```python
303
+ import torch
304
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5x')
305
+ ```
306
+
307
+ For additional details, refer to the official YOLOv5 PyTorch Hub models documentation:
308
+ https://pytorch.org/hub/ultralytics_yolov5
309
+ """
310
+ return _create("yolov5x", pretrained, channels, classes, autoshape, _verbose, device)
311
+
312
+
313
+ def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
314
+ """
315
+ Creates YOLOv5-nano-P6 model with options for pretraining, channels, classes, autoshaping, verbosity, and device.
316
+
317
+ Args:
318
+ pretrained (bool, optional): If True, loads pretrained weights into the model. Default is True.
319
+ channels (int, optional): Number of input channels. Default is 3.
320
+ classes (int, optional): Number of model classes. Default is 80.
321
+ autoshape (bool, optional): If True, applies the YOLOv5 .autoshape() wrapper to the model. Default is True.
322
+ _verbose (bool, optional): If True, prints all information to screen. Default is True.
323
+ device (str | torch.device | None, optional): Device to use for model parameters. Can be 'cpu', 'cuda', or None.
324
+ Default is None.
325
+
326
+ Returns:
327
+ torch.nn.Module: YOLOv5-nano-P6 model loaded with the specified configurations.
328
+
329
+ Example:
330
+ ```python
331
+ import torch
332
+ model = yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device='cuda')
333
+ ```
334
+
335
+ Notes:
336
+ For more information on PyTorch Hub models, visit: https://pytorch.org/hub/ultralytics_yolov5
337
+ """
338
+ return _create("yolov5n6", pretrained, channels, classes, autoshape, _verbose, device)
339
+
340
+
341
+ def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
342
+ """
343
+ Instantiate the YOLOv5-small-P6 model with options for pretraining, input channels, number of classes, autoshaping,
344
+ verbosity, and device selection.
345
+
346
+ Args:
347
+ pretrained (bool): If True, loads pretrained weights. Default is True.
348
+ channels (int): Number of input channels. Default is 3.
349
+ classes (int): Number of object detection classes. Default is 80.
350
+ autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model, allowing for varied input formats.
351
+ Default is True.
352
+ _verbose (bool): If True, prints detailed information during model loading. Default is True.
353
+ device (str | torch.device | None): Device specification for model parameters (e.g., 'cpu', 'cuda', or torch.device).
354
+ Default is None, which selects an available device automatically.
355
+
356
+ Returns:
357
+ torch.nn.Module: The YOLOv5-small-P6 model instance.
358
+
359
+ Usage:
360
+ ```python
361
+ import torch
362
+
363
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s6')
364
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s6') # load from a specific branch
365
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5s6.pt') # custom/local model
366
+ model = torch.hub.load('.', 'custom', 'path/to/yolov5s6.pt', source='local') # local repo model
367
+ ```
368
+
369
+ Notes:
370
+ - For more information, refer to the PyTorch Hub models documentation at https://pytorch.org/hub/ultralytics_yolov5
371
+
372
+ Raises:
373
+ Exception: If there is an error during model creation or loading, with a suggestion to visit the YOLOv5
374
+ tutorials for help.
375
+ """
376
+ return _create("yolov5s6", pretrained, channels, classes, autoshape, _verbose, device)
377
+
378
+
379
+ def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
380
+ """
381
+ Create YOLOv5-medium-P6 model with options for pretraining, channel count, class count, autoshaping, verbosity, and
382
+ device.
383
+
384
+ Args:
385
+ pretrained (bool): If True, loads pretrained weights. Default is True.
386
+ channels (int): Number of input channels. Default is 3.
387
+ classes (int): Number of model classes. Default is 80.
388
+ autoshape (bool): Apply YOLOv5 .autoshape() wrapper to the model for file/URI/PIL/cv2/np inputs and NMS.
389
+ Default is True.
390
+ _verbose (bool): If True, prints detailed information to the screen. Default is True.
391
+ device (str | torch.device | None): Device to use for model parameters. Default is None, which uses the
392
+ best available device.
393
+
394
+ Returns:
395
+ torch.nn.Module: The YOLOv5-medium-P6 model.
396
+
397
+ Refer to the PyTorch Hub models documentation: https://pytorch.org/hub/ultralytics_yolov5 for additional details.
398
+
399
+ Example:
400
+ ```python
401
+ import torch
402
+
403
+ # Load YOLOv5-medium-P6 model
404
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5m6')
405
+ ```
406
+
407
+ Notes:
408
+ - The model can be loaded with pre-trained weights for better performance on specific tasks.
409
+ - The autoshape feature simplifies input handling by allowing various popular data formats.
410
+ """
411
+ return _create("yolov5m6", pretrained, channels, classes, autoshape, _verbose, device)
412
+
413
+
414
+ def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
415
+ """
416
+ Instantiate the YOLOv5-large-P6 model with options for pretraining, channel and class counts, autoshaping,
417
+ verbosity, and device selection.
418
+
419
+ Args:
420
+ pretrained (bool, optional): If True, load pretrained weights into the model. Default is True.
421
+ channels (int, optional): Number of input channels. Default is 3.
422
+ classes (int, optional): Number of model classes. Default is 80.
423
+ autoshape (bool, optional): If True, apply YOLOv5 .autoshape() wrapper to the model for input flexibility. Default is True.
424
+ _verbose (bool, optional): If True, print all information to the screen. Default is True.
425
+ device (str | torch.device | None, optional): Device to use for model parameters, e.g., 'cpu', 'cuda', or torch.device.
426
+ If None, automatically selects the best available device. Default is None.
427
+
428
+ Returns:
429
+ torch.nn.Module: The instantiated YOLOv5-large-P6 model.
430
+
431
+ Example:
432
+ ```python
433
+ import torch
434
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5l6') # official model
435
+ model = torch.hub.load('ultralytics/yolov5:master', 'yolov5l6') # from specific branch
436
+ model = torch.hub.load('ultralytics/yolov5', 'custom', 'path/to/yolov5l6.pt') # custom/local model
437
+ model = torch.hub.load('.', 'custom', 'path/to/yolov5l6.pt', source='local') # local repository
438
+ ```
439
+
440
+ Note:
441
+ Refer to [PyTorch Hub Documentation](https://pytorch.org/hub/ultralytics_yolov5) for additional usage instructions.
442
+ """
443
+ return _create("yolov5l6", pretrained, channels, classes, autoshape, _verbose, device)
444
+
445
+
446
+ def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None):
447
+ """
448
+ Creates the YOLOv5-xlarge-P6 model with options for pretraining, number of input channels, class count, autoshaping,
449
+ verbosity, and device selection.
450
+
451
+ Args:
452
+ pretrained (bool): If True, loads pretrained weights into the model. Default is True.
453
+ channels (int): Number of input channels. Default is 3.
454
+ classes (int): Number of model classes. Default is 80.
455
+ autoshape (bool): If True, applies YOLOv5 .autoshape() wrapper to the model. Default is True.
456
+ _verbose (bool): If True, prints all information to the screen. Default is True.
457
+ device (str | torch.device | None): Device to use for model parameters, can be a string, torch.device object, or
458
+ None for default device selection. Default is None.
459
+
460
+ Returns:
461
+ torch.nn.Module: The instantiated YOLOv5-xlarge-P6 model.
462
+
463
+ Example:
464
+ ```python
465
+ import torch
466
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5x6') # load the YOLOv5-xlarge-P6 model
467
+ ```
468
+
469
+ Note:
470
+ For more information on YOLOv5 models, visit the official documentation:
471
+ https://docs.ultralytics.com/yolov5
472
+ """
473
+ return _create("yolov5x6", pretrained, channels, classes, autoshape, _verbose, device)
474
+
475
+
476
+ if __name__ == "__main__":
477
+ import argparse
478
+ from pathlib import Path
479
+
480
+ import numpy as np
481
+ from PIL import Image
482
+
483
+ from utils.general import cv2, print_args
484
+
485
+ # Argparser
486
+ parser = argparse.ArgumentParser()
487
+ parser.add_argument("--model", type=str, default="yolov5s", help="model name")
488
+ opt = parser.parse_args()
489
+ print_args(vars(opt))
490
+
491
+ # Model
492
+ model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True)
493
+ # model = custom(path='path/to/model.pt') # custom
494
+
495
+ # Images
496
+ imgs = [
497
+ "data/images/zidane.jpg", # filename
498
+ Path("data/images/zidane.jpg"), # Path
499
+ "https://ultralytics.com/images/zidane.jpg", # URI
500
+ cv2.imread("data/images/bus.jpg")[:, :, ::-1], # OpenCV
501
+ Image.open("data/images/bus.jpg"), # PIL
502
+ np.zeros((320, 640, 3)),
503
+ ] # numpy
504
+
505
+ # Inference
506
+ results = model(imgs, size=320) # batched inference
507
+
508
+ # Results
509
+ results.print()
510
+ results.save()
iteach_toolkit/DHYOLO/model.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .detect import run as run_detection
2
+ import torch
3
+ import cv2
4
+ import logging
5
+
6
+ # Configure logging
7
+ logging.basicConfig(level=logging.INFO)
8
+ logger = logging.getLogger(__name__)
9
+
10
+ class DHYOLODetector:
11
+ """
12
+ A class to encapsulate the YOLO model prediction for object detection.
13
+
14
+ Attributes:
15
+ model_path (str): Path to the trained YOLO model weights.
16
+
17
+ Methods:
18
+ predict(image_path, conf_thres, iou_thres, max_det):
19
+ Runs object detection on the given image using the YOLO model.
20
+ Logs errors in case of failure and handles exceptions.
21
+ plot_bboxes():
22
+ Plots bounding boxes on the input image based on YOLO predictions and returns the modified image.
23
+ """
24
+
25
+ def __init__(self, model_path):
26
+ """
27
+ Initializes DHYOLODetector with the path to the YOLO model weights.
28
+
29
+ Args:
30
+ model_path (str): Path to the YOLO model weights.
31
+ """
32
+ self.model_path = model_path
33
+ self.image = None
34
+ self.preds = None
35
+
36
+ def predict(self, image_path, conf_thres=0.25, iou_thres=0.45, max_det=1000):
37
+ """
38
+ Runs object detection on the provided image using the YOLO model.
39
+
40
+ Args:
41
+ image_path (str): Path to the image file (file path or a web URL).
42
+ conf_thres (float): Confidence threshold for YOLO detections (default is 0.25).
43
+ iou_thres (float): IOU threshold for non-max suppression (default is 0.45).
44
+ max_det (int): Maximum number of detections per image (default is 1000).
45
+
46
+ Returns:
47
+ tuple: A tuple containing:
48
+ - numpy.ndarray: The image read as a NumPy array.
49
+ - dict: A dictionary with the bounding boxes in xyxy format, confidence scores, and class labels:
50
+ - 'boxes': List of bounding boxes in xyxy format.
51
+ - 'confidences': List of confidence scores for each detection.
52
+ - 'class_labels': List of class labels corresponding to each detection.
53
+
54
+ Raises:
55
+ FileNotFoundError: If the image file does not exist or cannot be opened.
56
+ Exception: For any general errors during prediction.
57
+ """
58
+ try:
59
+ logger.info(f"Starting prediction for image: {image_path} with model: {self.model_path}")
60
+
61
+
62
+ self.image = cv2.imread(image_path) # Read the image as a NumPy array
63
+ self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB
64
+
65
+ if self.image is None:
66
+ raise FileNotFoundError(f"Image file not found or cannot be opened: {image_path}")
67
+
68
+ # Run YOLO detection with custom parameters
69
+ result = run_detection(
70
+ weights=self.model_path,
71
+ source=image_path,
72
+ conf_thres=conf_thres, # Confidence threshold
73
+ iou_thres=iou_thres, # IOU threshold
74
+ max_det=max_det # Maximum detections
75
+ )
76
+
77
+ self.preds = result[0] # We are only running for one image
78
+
79
+ # Prepare predictions as a dictionary
80
+ detections_dict = {
81
+ 'boxes': [], # List of bounding boxes in xyxy format
82
+ 'confidences': [], # List of confidence scores
83
+ 'class_labels': [] # List of class labels
84
+ }
85
+
86
+ for detection in self.preds:
87
+ if isinstance(detection, torch.Tensor):
88
+ detection = detection.cpu().numpy()
89
+
90
+ # Extract details
91
+ x1, y1, x2, y2, conf, cls = detection[:6]
92
+ detections_dict['boxes'].append([float(x1), float(y1), float(x2), float(y2)]) # xyxy format
93
+ detections_dict['confidences'].append(float(conf))
94
+ detections_dict['class_labels'].append(int(cls))
95
+
96
+ logger.info(f"Prediction completed successfully for image: {image_path}")
97
+ return self.image, detections_dict # Return image and detections as a tuple
98
+
99
+ except FileNotFoundError as e:
100
+ logger.error(f"Image file not found: {image_path}. Exception: {e}")
101
+ raise
102
+
103
+ except Exception as e:
104
+ logger.error(f"An error occurred during prediction. Exception: {e}")
105
+ raise
106
+
107
+ def plot_bboxes(self, attach_watermark=False):
108
+ """
109
+ Plots bounding boxes on the input image based on YOLO predictions and returns the modified image.
110
+
111
+ Args:
112
+ attach_watermark (bool): Whether to attach a watermark text to the image (default is False).
113
+
114
+ Returns:
115
+ tuple: The original image and the image with bounding boxes plotted.
116
+ """
117
+ class_labels = {0: "door", 1: "handle"}
118
+ class_colors = {
119
+ 0: (255, 0, 0), # Red in RGB format for doors
120
+ 1: (255, 255, 0) # Yellow in RGB format for handles
121
+ }
122
+
123
+ bbox_img = self.image.copy() # Create a copy of the original image
124
+
125
+ # Check if there are predictions
126
+ if self.preds is None or len(self.preds) == 0:
127
+ logger.warning("No predictions to display.")
128
+ return bbox_img, bbox_img # Return the original image if no predictions
129
+
130
+ # Iterate through detections and plot each bounding box
131
+ for detection in self.preds:
132
+ if isinstance(detection, torch.Tensor):
133
+ detection = detection.cpu().numpy()
134
+
135
+ conf = detection[4]
136
+ x1, y1, x2, y2, _, cls = detection[:6].astype(float) # Ensure float for bounding box coordinates
137
+ label = class_labels[int(cls)]
138
+
139
+ # Draw the rectangle on the bbox_img
140
+ cv2.rectangle(bbox_img, (int(x1), int(y1)), (int(x2), int(y2)), class_colors[int(cls)], 2)
141
+
142
+ # Prepare text with confidence score
143
+ text = f'{label} ({conf:.2f})' # Include confidence score in the text
144
+ text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_DUPLEX, 0.5, 1)[0]
145
+
146
+ # Set text position directly above the bounding box
147
+ text_x = int(x1)
148
+ text_y = int(y1) - 2 # Adjust for a slight overlap with the bounding box
149
+
150
+ # Set text color based on class
151
+ text_color = (0, 0, 0) if cls == 1 else (255, 255, 255) # Black for handle, white for door
152
+
153
+ # Draw a background rectangle for the text
154
+ cv2.rectangle(bbox_img, (text_x, text_y - text_size[1] - 2), (text_x + text_size[0], text_y), class_colors[int(cls)], cv2.FILLED)
155
+
156
+ # Put the label text on the bbox_img
157
+ cv2.putText(bbox_img, text, (text_x, text_y - 2), cv2.FONT_HERSHEY_DUPLEX, 0.5, text_color, 1, cv2.LINE_AA)
158
+
159
+ # Attach watermark if specified
160
+ if attach_watermark:
161
+ watermark_text = "Predictions by DH-YOLO"
162
+ watermark_color = (200, 200, 200) # Greyish color for watermark
163
+ watermark_scale = 0.4 # Reduced scale for the watermark text
164
+ watermark_thickness = 1 # Decreased thickness for the watermark text
165
+
166
+ # Get the text size for positioning
167
+ text_size = cv2.getTextSize(watermark_text, cv2.FONT_HERSHEY_DUPLEX, watermark_scale, watermark_thickness)[0]
168
+ text_x = bbox_img.shape[1] - text_size[0] - 10 # 10 pixels from right
169
+ text_y = bbox_img.shape[0] - 10 # 10 pixels from bottom
170
+
171
+ # Put the watermark text on the bbox_img
172
+ cv2.putText(bbox_img, watermark_text, (text_x, text_y), cv2.FONT_HERSHEY_DUPLEX, watermark_scale, watermark_color, watermark_thickness, cv2.LINE_AA)
173
+
174
+ return self.image, bbox_img # Return the original image and modified image with bounding boxes
iteach_toolkit/DHYOLO/models/__init__.py ADDED
File without changes
iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (154 Bytes). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (162 Bytes). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (161 Bytes). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-38.pyc ADDED
Binary file (198 Bytes). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (148 Bytes). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-311.pyc ADDED
Binary file (79.5 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-312.pyc ADDED
Binary file (66.3 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-38.pyc ADDED
Binary file (37.8 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/common.cpython-39.pyc ADDED
Binary file (37.8 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-311.pyc ADDED
Binary file (9.72 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-312.pyc ADDED
Binary file (7.82 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-38.pyc ADDED
Binary file (4.91 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/experimental.cpython-39.pyc ADDED
Binary file (4.82 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-311.pyc ADDED
Binary file (34.1 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-312.pyc ADDED
Binary file (29.7 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-38.pyc ADDED
Binary file (16.2 kB). View file
 
iteach_toolkit/DHYOLO/models/__pycache__/yolo.cpython-39.pyc ADDED
Binary file (16.1 kB). View file