| |
| |
| |
| |
| |
|
|
| import argparse |
|
|
| import numpy as np |
| import cv2 as cv |
|
|
| |
| opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) |
| assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ |
| "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" |
|
|
| from yunet import YuNet |
|
|
| |
| backend_target_pairs = [ |
| [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], |
| [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], |
| [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], |
| [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], |
| [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] |
| ] |
|
|
| parser = argparse.ArgumentParser(description='YuNet: A Fast and Accurate CNN-based Face Detector (https://github.com/ShiqiYu/libfacedetection).') |
| parser.add_argument('--input', '-i', type=str, |
| help='Usage: Set input to a certain image, omit if using camera.') |
| parser.add_argument('--model', '-m', type=str, default='face_detection_yunet_2023mar.onnx', |
| help="Usage: Set model type, defaults to 'face_detection_yunet_2023mar.onnx'.") |
| parser.add_argument('--backend_target', '-bt', type=int, default=0, |
| help='''Choose one of the backend-target pair to run this demo: |
| {:d}: (default) OpenCV implementation + CPU, |
| {:d}: CUDA + GPU (CUDA), |
| {:d}: CUDA + GPU (CUDA FP16), |
| {:d}: TIM-VX + NPU, |
| {:d}: CANN + NPU |
| '''.format(*[x for x in range(len(backend_target_pairs))])) |
| parser.add_argument('--conf_threshold', type=float, default=0.9, |
| help='Usage: Set the minimum needed confidence for the model to identify a face, defauts to 0.9. Smaller values may result in faster detection, but will limit accuracy. Filter out faces of confidence < conf_threshold.') |
| parser.add_argument('--nms_threshold', type=float, default=0.3, |
| help='Usage: Suppress bounding boxes of iou >= nms_threshold. Default = 0.3.') |
| parser.add_argument('--top_k', type=int, default=5000, |
| help='Usage: Keep top_k bounding boxes before NMS.') |
| parser.add_argument('--save', '-s', action='store_true', |
| help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') |
| parser.add_argument('--vis', '-v', action='store_true', |
| help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') |
| args = parser.parse_args() |
|
|
| def visualize(image, results, box_color=(0, 255, 0), text_color=(0, 0, 255), fps=None): |
| output = image.copy() |
| landmark_color = [ |
| (255, 0, 0), |
| ( 0, 0, 255), |
| ( 0, 255, 0), |
| (255, 0, 255), |
| ( 0, 255, 255) |
| ] |
|
|
| if fps is not None: |
| cv.putText(output, 'FPS: {:.2f}'.format(fps), (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, text_color) |
|
|
| for det in results: |
| bbox = det[0:4].astype(np.int32) |
| cv.rectangle(output, (bbox[0], bbox[1]), (bbox[0]+bbox[2], bbox[1]+bbox[3]), box_color, 2) |
|
|
| conf = det[-1] |
| cv.putText(output, '{:.4f}'.format(conf), (bbox[0], bbox[1]+12), cv.FONT_HERSHEY_DUPLEX, 0.5, text_color) |
|
|
| landmarks = det[4:14].astype(np.int32).reshape((5,2)) |
| for idx, landmark in enumerate(landmarks): |
| cv.circle(output, landmark, 2, landmark_color[idx], 2) |
|
|
| return output |
|
|
| if __name__ == '__main__': |
| backend_id = backend_target_pairs[args.backend_target][0] |
| target_id = backend_target_pairs[args.backend_target][1] |
|
|
| |
| model = YuNet(modelPath=args.model, |
| inputSize=[320, 320], |
| confThreshold=args.conf_threshold, |
| nmsThreshold=args.nms_threshold, |
| topK=args.top_k, |
| backendId=backend_id, |
| targetId=target_id) |
|
|
| |
| if args.input is not None: |
| image = cv.imread(args.input) |
| h, w, _ = image.shape |
|
|
| |
| model.setInputSize([w, h]) |
| results = model.infer(image) |
|
|
| |
| print('{} faces detected.'.format(results.shape[0])) |
| for idx, det in enumerate(results): |
| print('{}: {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f} {:.0f}'.format( |
| idx, *det[:-1]) |
| ) |
|
|
| |
| image = visualize(image, results) |
|
|
| |
| if args.save: |
| print('Resutls saved to result.jpg\n') |
| cv.imwrite('result.jpg', image) |
|
|
| |
| if args.vis: |
| cv.namedWindow(args.input, cv.WINDOW_AUTOSIZE) |
| cv.imshow(args.input, image) |
| cv.waitKey(0) |
| else: |
| deviceId = 0 |
| cap = cv.VideoCapture(deviceId) |
| w = int(cap.get(cv.CAP_PROP_FRAME_WIDTH)) |
| h = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT)) |
| model.setInputSize([w, h]) |
|
|
| tm = cv.TickMeter() |
| while cv.waitKey(1) < 0: |
| hasFrame, frame = cap.read() |
| if not hasFrame: |
| print('No frames grabbed!') |
| break |
|
|
| |
| tm.start() |
| results = model.infer(frame) |
| tm.stop() |
|
|
| |
| frame = visualize(frame, results, fps=tm.getFPS()) |
|
|
| |
| cv.imshow('YuNet Demo', frame) |
|
|
| tm.reset() |
|
|