Upload 2 files
Browse files- cv2/Error/__init__.pyi +116 -0
- cv2/dnn/__init__.pyi +503 -0
cv2/Error/__init__.pyi
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Enumerations
|
| 2 |
+
StsOk: int
|
| 3 |
+
STS_OK: int
|
| 4 |
+
StsBackTrace: int
|
| 5 |
+
STS_BACK_TRACE: int
|
| 6 |
+
StsError: int
|
| 7 |
+
STS_ERROR: int
|
| 8 |
+
StsInternal: int
|
| 9 |
+
STS_INTERNAL: int
|
| 10 |
+
StsNoMem: int
|
| 11 |
+
STS_NO_MEM: int
|
| 12 |
+
StsBadArg: int
|
| 13 |
+
STS_BAD_ARG: int
|
| 14 |
+
StsBadFunc: int
|
| 15 |
+
STS_BAD_FUNC: int
|
| 16 |
+
StsNoConv: int
|
| 17 |
+
STS_NO_CONV: int
|
| 18 |
+
StsAutoTrace: int
|
| 19 |
+
STS_AUTO_TRACE: int
|
| 20 |
+
HeaderIsNull: int
|
| 21 |
+
HEADER_IS_NULL: int
|
| 22 |
+
BadImageSize: int
|
| 23 |
+
BAD_IMAGE_SIZE: int
|
| 24 |
+
BadOffset: int
|
| 25 |
+
BAD_OFFSET: int
|
| 26 |
+
BadDataPtr: int
|
| 27 |
+
BAD_DATA_PTR: int
|
| 28 |
+
BadStep: int
|
| 29 |
+
BAD_STEP: int
|
| 30 |
+
BadModelOrChSeq: int
|
| 31 |
+
BAD_MODEL_OR_CH_SEQ: int
|
| 32 |
+
BadNumChannels: int
|
| 33 |
+
BAD_NUM_CHANNELS: int
|
| 34 |
+
BadNumChannel1U: int
|
| 35 |
+
BAD_NUM_CHANNEL1U: int
|
| 36 |
+
BadDepth: int
|
| 37 |
+
BAD_DEPTH: int
|
| 38 |
+
BadAlphaChannel: int
|
| 39 |
+
BAD_ALPHA_CHANNEL: int
|
| 40 |
+
BadOrder: int
|
| 41 |
+
BAD_ORDER: int
|
| 42 |
+
BadOrigin: int
|
| 43 |
+
BAD_ORIGIN: int
|
| 44 |
+
BadAlign: int
|
| 45 |
+
BAD_ALIGN: int
|
| 46 |
+
BadCallBack: int
|
| 47 |
+
BAD_CALL_BACK: int
|
| 48 |
+
BadTileSize: int
|
| 49 |
+
BAD_TILE_SIZE: int
|
| 50 |
+
BadCOI: int
|
| 51 |
+
BAD_COI: int
|
| 52 |
+
BadROISize: int
|
| 53 |
+
BAD_ROISIZE: int
|
| 54 |
+
MaskIsTiled: int
|
| 55 |
+
MASK_IS_TILED: int
|
| 56 |
+
StsNullPtr: int
|
| 57 |
+
STS_NULL_PTR: int
|
| 58 |
+
StsVecLengthErr: int
|
| 59 |
+
STS_VEC_LENGTH_ERR: int
|
| 60 |
+
StsFilterStructContentErr: int
|
| 61 |
+
STS_FILTER_STRUCT_CONTENT_ERR: int
|
| 62 |
+
StsKernelStructContentErr: int
|
| 63 |
+
STS_KERNEL_STRUCT_CONTENT_ERR: int
|
| 64 |
+
StsFilterOffsetErr: int
|
| 65 |
+
STS_FILTER_OFFSET_ERR: int
|
| 66 |
+
StsBadSize: int
|
| 67 |
+
STS_BAD_SIZE: int
|
| 68 |
+
StsDivByZero: int
|
| 69 |
+
STS_DIV_BY_ZERO: int
|
| 70 |
+
StsInplaceNotSupported: int
|
| 71 |
+
STS_INPLACE_NOT_SUPPORTED: int
|
| 72 |
+
StsObjectNotFound: int
|
| 73 |
+
STS_OBJECT_NOT_FOUND: int
|
| 74 |
+
StsUnmatchedFormats: int
|
| 75 |
+
STS_UNMATCHED_FORMATS: int
|
| 76 |
+
StsBadFlag: int
|
| 77 |
+
STS_BAD_FLAG: int
|
| 78 |
+
StsBadPoint: int
|
| 79 |
+
STS_BAD_POINT: int
|
| 80 |
+
StsBadMask: int
|
| 81 |
+
STS_BAD_MASK: int
|
| 82 |
+
StsUnmatchedSizes: int
|
| 83 |
+
STS_UNMATCHED_SIZES: int
|
| 84 |
+
StsUnsupportedFormat: int
|
| 85 |
+
STS_UNSUPPORTED_FORMAT: int
|
| 86 |
+
StsOutOfRange: int
|
| 87 |
+
STS_OUT_OF_RANGE: int
|
| 88 |
+
StsParseError: int
|
| 89 |
+
STS_PARSE_ERROR: int
|
| 90 |
+
StsNotImplemented: int
|
| 91 |
+
STS_NOT_IMPLEMENTED: int
|
| 92 |
+
StsBadMemBlock: int
|
| 93 |
+
STS_BAD_MEM_BLOCK: int
|
| 94 |
+
StsAssert: int
|
| 95 |
+
STS_ASSERT: int
|
| 96 |
+
GpuNotSupported: int
|
| 97 |
+
GPU_NOT_SUPPORTED: int
|
| 98 |
+
GpuApiCallError: int
|
| 99 |
+
GPU_API_CALL_ERROR: int
|
| 100 |
+
OpenGlNotSupported: int
|
| 101 |
+
OPEN_GL_NOT_SUPPORTED: int
|
| 102 |
+
OpenGlApiCallError: int
|
| 103 |
+
OPEN_GL_API_CALL_ERROR: int
|
| 104 |
+
OpenCLApiCallError: int
|
| 105 |
+
OPEN_CLAPI_CALL_ERROR: int
|
| 106 |
+
OpenCLDoubleNotSupported: int
|
| 107 |
+
OPEN_CLDOUBLE_NOT_SUPPORTED: int
|
| 108 |
+
OpenCLInitError: int
|
| 109 |
+
OPEN_CLINIT_ERROR: int
|
| 110 |
+
OpenCLNoAMDBlasFft: int
|
| 111 |
+
OPEN_CLNO_AMDBLAS_FFT: int
|
| 112 |
+
Code = int
|
| 113 |
+
"""One of [StsOk, STS_OK, StsBackTrace, STS_BACK_TRACE, StsError, STS_ERROR, StsInternal, STS_INTERNAL, StsNoMem, STS_NO_MEM, StsBadArg, STS_BAD_ARG, StsBadFunc, STS_BAD_FUNC, StsNoConv, STS_NO_CONV, StsAutoTrace, STS_AUTO_TRACE, HeaderIsNull, HEADER_IS_NULL, BadImageSize, BAD_IMAGE_SIZE, BadOffset, BAD_OFFSET, BadDataPtr, BAD_DATA_PTR, BadStep, BAD_STEP, BadModelOrChSeq, BAD_MODEL_OR_CH_SEQ, BadNumChannels, BAD_NUM_CHANNELS, BadNumChannel1U, BAD_NUM_CHANNEL1U, BadDepth, BAD_DEPTH, BadAlphaChannel, BAD_ALPHA_CHANNEL, BadOrder, BAD_ORDER, BadOrigin, BAD_ORIGIN, BadAlign, BAD_ALIGN, BadCallBack, BAD_CALL_BACK, BadTileSize, BAD_TILE_SIZE, BadCOI, BAD_COI, BadROISize, BAD_ROISIZE, MaskIsTiled, MASK_IS_TILED, StsNullPtr, STS_NULL_PTR, StsVecLengthErr, STS_VEC_LENGTH_ERR, StsFilterStructContentErr, STS_FILTER_STRUCT_CONTENT_ERR, StsKernelStructContentErr, STS_KERNEL_STRUCT_CONTENT_ERR, StsFilterOffsetErr, STS_FILTER_OFFSET_ERR, StsBadSize, STS_BAD_SIZE, StsDivByZero, STS_DIV_BY_ZERO, StsInplaceNotSupported, STS_INPLACE_NOT_SUPPORTED, StsObjectNotFound, STS_OBJECT_NOT_FOUND, StsUnmatchedFormats, STS_UNMATCHED_FORMATS, StsBadFlag, STS_BAD_FLAG, StsBadPoint, STS_BAD_POINT, StsBadMask, STS_BAD_MASK, StsUnmatchedSizes, STS_UNMATCHED_SIZES, StsUnsupportedFormat, STS_UNSUPPORTED_FORMAT, StsOutOfRange, STS_OUT_OF_RANGE, StsParseError, STS_PARSE_ERROR, StsNotImplemented, STS_NOT_IMPLEMENTED, StsBadMemBlock, STS_BAD_MEM_BLOCK, StsAssert, STS_ASSERT, GpuNotSupported, GPU_NOT_SUPPORTED, GpuApiCallError, GPU_API_CALL_ERROR, OpenGlNotSupported, OPEN_GL_NOT_SUPPORTED, OpenGlApiCallError, OPEN_GL_API_CALL_ERROR, OpenCLApiCallError, OPEN_CLAPI_CALL_ERROR, OpenCLDoubleNotSupported, OPEN_CLDOUBLE_NOT_SUPPORTED, OpenCLInitError, OPEN_CLINIT_ERROR, OpenCLNoAMDBlasFft, OPEN_CLNO_AMDBLAS_FFT]"""
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
|
cv2/dnn/__init__.pyi
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import cv2.typing
|
| 3 |
+
import numpy
|
| 4 |
+
import typing
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# Enumerations
|
| 8 |
+
DNN_BACKEND_DEFAULT: int
|
| 9 |
+
DNN_BACKEND_HALIDE: int
|
| 10 |
+
DNN_BACKEND_INFERENCE_ENGINE: int
|
| 11 |
+
DNN_BACKEND_OPENCV: int
|
| 12 |
+
DNN_BACKEND_VKCOM: int
|
| 13 |
+
DNN_BACKEND_CUDA: int
|
| 14 |
+
DNN_BACKEND_WEBNN: int
|
| 15 |
+
DNN_BACKEND_TIMVX: int
|
| 16 |
+
DNN_BACKEND_CANN: int
|
| 17 |
+
Backend = int
|
| 18 |
+
"""One of [DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, DNN_BACKEND_CUDA, DNN_BACKEND_WEBNN, DNN_BACKEND_TIMVX, DNN_BACKEND_CANN]"""
|
| 19 |
+
|
| 20 |
+
DNN_TARGET_CPU: int
|
| 21 |
+
DNN_TARGET_OPENCL: int
|
| 22 |
+
DNN_TARGET_OPENCL_FP16: int
|
| 23 |
+
DNN_TARGET_MYRIAD: int
|
| 24 |
+
DNN_TARGET_VULKAN: int
|
| 25 |
+
DNN_TARGET_FPGA: int
|
| 26 |
+
DNN_TARGET_CUDA: int
|
| 27 |
+
DNN_TARGET_CUDA_FP16: int
|
| 28 |
+
DNN_TARGET_HDDL: int
|
| 29 |
+
DNN_TARGET_NPU: int
|
| 30 |
+
DNN_TARGET_CPU_FP16: int
|
| 31 |
+
Target = int
|
| 32 |
+
"""One of [DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD, DNN_TARGET_VULKAN, DNN_TARGET_FPGA, DNN_TARGET_CUDA, DNN_TARGET_CUDA_FP16, DNN_TARGET_HDDL, DNN_TARGET_NPU, DNN_TARGET_CPU_FP16]"""
|
| 33 |
+
|
| 34 |
+
DNN_LAYOUT_UNKNOWN: int
|
| 35 |
+
DNN_LAYOUT_ND: int
|
| 36 |
+
DNN_LAYOUT_NCHW: int
|
| 37 |
+
DNN_LAYOUT_NCDHW: int
|
| 38 |
+
DNN_LAYOUT_NHWC: int
|
| 39 |
+
DNN_LAYOUT_NDHWC: int
|
| 40 |
+
DNN_LAYOUT_PLANAR: int
|
| 41 |
+
DataLayout = int
|
| 42 |
+
"""One of [DNN_LAYOUT_UNKNOWN, DNN_LAYOUT_ND, DNN_LAYOUT_NCHW, DNN_LAYOUT_NCDHW, DNN_LAYOUT_NHWC, DNN_LAYOUT_NDHWC, DNN_LAYOUT_PLANAR]"""
|
| 43 |
+
|
| 44 |
+
DNN_PMODE_NULL: int
|
| 45 |
+
DNN_PMODE_CROP_CENTER: int
|
| 46 |
+
DNN_PMODE_LETTERBOX: int
|
| 47 |
+
ImagePaddingMode = int
|
| 48 |
+
"""One of [DNN_PMODE_NULL, DNN_PMODE_CROP_CENTER, DNN_PMODE_LETTERBOX]"""
|
| 49 |
+
|
| 50 |
+
SoftNMSMethod_SOFTNMS_LINEAR: int
|
| 51 |
+
SOFT_NMSMETHOD_SOFTNMS_LINEAR: int
|
| 52 |
+
SoftNMSMethod_SOFTNMS_GAUSSIAN: int
|
| 53 |
+
SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN: int
|
| 54 |
+
SoftNMSMethod = int
|
| 55 |
+
"""One of [SoftNMSMethod_SOFTNMS_LINEAR, SOFT_NMSMETHOD_SOFTNMS_LINEAR, SoftNMSMethod_SOFTNMS_GAUSSIAN, SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN]"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Classes
|
| 60 |
+
class DictValue:
|
| 61 |
+
# Functions
|
| 62 |
+
@typing.overload
|
| 63 |
+
def __init__(self, i: int) -> None: ...
|
| 64 |
+
@typing.overload
|
| 65 |
+
def __init__(self, p: float) -> None: ...
|
| 66 |
+
@typing.overload
|
| 67 |
+
def __init__(self, s: str) -> None: ...
|
| 68 |
+
|
| 69 |
+
def isInt(self) -> bool: ...
|
| 70 |
+
|
| 71 |
+
def isString(self) -> bool: ...
|
| 72 |
+
|
| 73 |
+
def isReal(self) -> bool: ...
|
| 74 |
+
|
| 75 |
+
def getIntValue(self, idx: int = ...) -> int: ...
|
| 76 |
+
|
| 77 |
+
def getRealValue(self, idx: int = ...) -> float: ...
|
| 78 |
+
|
| 79 |
+
def getStringValue(self, idx: int = ...) -> str: ...
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class Net:
|
| 83 |
+
# Functions
|
| 84 |
+
def __init__(self) -> None: ...
|
| 85 |
+
|
| 86 |
+
@classmethod
|
| 87 |
+
@typing.overload
|
| 88 |
+
def readFromModelOptimizer(cls, xml: str, bin: str) -> Net: ...
|
| 89 |
+
@classmethod
|
| 90 |
+
@typing.overload
|
| 91 |
+
def readFromModelOptimizer(cls, bufferModelConfig: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
|
| 92 |
+
|
| 93 |
+
def empty(self) -> bool: ...
|
| 94 |
+
|
| 95 |
+
def dump(self) -> str: ...
|
| 96 |
+
|
| 97 |
+
def dumpToFile(self, path: str) -> None: ...
|
| 98 |
+
|
| 99 |
+
def getLayerId(self, layer: str) -> int: ...
|
| 100 |
+
|
| 101 |
+
def getLayerNames(self) -> typing.Sequence[str]: ...
|
| 102 |
+
|
| 103 |
+
@typing.overload
|
| 104 |
+
def getLayer(self, layerId: int) -> Layer: ...
|
| 105 |
+
@typing.overload
|
| 106 |
+
def getLayer(self, layerName: str) -> Layer: ...
|
| 107 |
+
@typing.overload
|
| 108 |
+
def getLayer(self, layerId: cv2.typing.LayerId) -> Layer: ...
|
| 109 |
+
|
| 110 |
+
def connect(self, outPin: str, inpPin: str) -> None: ...
|
| 111 |
+
|
| 112 |
+
def setInputsNames(self, inputBlobNames: typing.Sequence[str]) -> None: ...
|
| 113 |
+
|
| 114 |
+
def setInputShape(self, inputName: str, shape: cv2.typing.MatShape) -> None: ...
|
| 115 |
+
|
| 116 |
+
@typing.overload
|
| 117 |
+
def forward(self, outputName: str = ...) -> cv2.typing.MatLike: ...
|
| 118 |
+
@typing.overload
|
| 119 |
+
def forward(self, outputBlobs: typing.Sequence[cv2.typing.MatLike] | None = ..., outputName: str = ...) -> typing.Sequence[cv2.typing.MatLike]: ...
|
| 120 |
+
@typing.overload
|
| 121 |
+
def forward(self, outputBlobs: typing.Sequence[cv2.UMat] | None = ..., outputName: str = ...) -> typing.Sequence[cv2.UMat]: ...
|
| 122 |
+
@typing.overload
|
| 123 |
+
def forward(self, outBlobNames: typing.Sequence[str], outputBlobs: typing.Sequence[cv2.typing.MatLike] | None = ...) -> typing.Sequence[cv2.typing.MatLike]: ...
|
| 124 |
+
@typing.overload
|
| 125 |
+
def forward(self, outBlobNames: typing.Sequence[str], outputBlobs: typing.Sequence[cv2.UMat] | None = ...) -> typing.Sequence[cv2.UMat]: ...
|
| 126 |
+
|
| 127 |
+
def forwardAsync(self, outputName: str = ...) -> cv2.AsyncArray: ...
|
| 128 |
+
|
| 129 |
+
def forwardAndRetrieve(self, outBlobNames: typing.Sequence[str]) -> typing.Sequence[typing.Sequence[cv2.typing.MatLike]]: ...
|
| 130 |
+
|
| 131 |
+
@typing.overload
|
| 132 |
+
def quantize(self, calibData: typing.Sequence[cv2.typing.MatLike], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
|
| 133 |
+
@typing.overload
|
| 134 |
+
def quantize(self, calibData: typing.Sequence[cv2.UMat], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
|
| 135 |
+
|
| 136 |
+
def getInputDetails(self) -> tuple[typing.Sequence[float], typing.Sequence[int]]: ...
|
| 137 |
+
|
| 138 |
+
def getOutputDetails(self) -> tuple[typing.Sequence[float], typing.Sequence[int]]: ...
|
| 139 |
+
|
| 140 |
+
def setHalideScheduler(self, scheduler: str) -> None: ...
|
| 141 |
+
|
| 142 |
+
def setPreferableBackend(self, backendId: int) -> None: ...
|
| 143 |
+
|
| 144 |
+
def setPreferableTarget(self, targetId: int) -> None: ...
|
| 145 |
+
|
| 146 |
+
@typing.overload
|
| 147 |
+
def setInput(self, blob: cv2.typing.MatLike, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
|
| 148 |
+
@typing.overload
|
| 149 |
+
def setInput(self, blob: cv2.UMat, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
|
| 150 |
+
|
| 151 |
+
@typing.overload
|
| 152 |
+
def setParam(self, layer: int, numParam: int, blob: cv2.typing.MatLike) -> None: ...
|
| 153 |
+
@typing.overload
|
| 154 |
+
def setParam(self, layerName: str, numParam: int, blob: cv2.typing.MatLike) -> None: ...
|
| 155 |
+
|
| 156 |
+
@typing.overload
|
| 157 |
+
def getParam(self, layer: int, numParam: int = ...) -> cv2.typing.MatLike: ...
|
| 158 |
+
@typing.overload
|
| 159 |
+
def getParam(self, layerName: str, numParam: int = ...) -> cv2.typing.MatLike: ...
|
| 160 |
+
|
| 161 |
+
def getUnconnectedOutLayers(self) -> typing.Sequence[int]: ...
|
| 162 |
+
|
| 163 |
+
def getUnconnectedOutLayersNames(self) -> typing.Sequence[str]: ...
|
| 164 |
+
|
| 165 |
+
@typing.overload
|
| 166 |
+
def getLayersShapes(self, netInputShapes: typing.Sequence[cv2.typing.MatShape]) -> tuple[typing.Sequence[int], typing.Sequence[typing.Sequence[cv2.typing.MatShape]], typing.Sequence[typing.Sequence[cv2.typing.MatShape]]]: ...
|
| 167 |
+
@typing.overload
|
| 168 |
+
def getLayersShapes(self, netInputShape: cv2.typing.MatShape) -> tuple[typing.Sequence[int], typing.Sequence[typing.Sequence[cv2.typing.MatShape]], typing.Sequence[typing.Sequence[cv2.typing.MatShape]]]: ...
|
| 169 |
+
|
| 170 |
+
@typing.overload
|
| 171 |
+
def getFLOPS(self, netInputShapes: typing.Sequence[cv2.typing.MatShape]) -> int: ...
|
| 172 |
+
@typing.overload
|
| 173 |
+
def getFLOPS(self, netInputShape: cv2.typing.MatShape) -> int: ...
|
| 174 |
+
@typing.overload
|
| 175 |
+
def getFLOPS(self, layerId: int, netInputShapes: typing.Sequence[cv2.typing.MatShape]) -> int: ...
|
| 176 |
+
@typing.overload
|
| 177 |
+
def getFLOPS(self, layerId: int, netInputShape: cv2.typing.MatShape) -> int: ...
|
| 178 |
+
|
| 179 |
+
def getLayerTypes(self) -> typing.Sequence[str]: ...
|
| 180 |
+
|
| 181 |
+
def getLayersCount(self, layerType: str) -> int: ...
|
| 182 |
+
|
| 183 |
+
@typing.overload
|
| 184 |
+
def getMemoryConsumption(self, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
|
| 185 |
+
@typing.overload
|
| 186 |
+
def getMemoryConsumption(self, layerId: int, netInputShapes: typing.Sequence[cv2.typing.MatShape]) -> tuple[int, int]: ...
|
| 187 |
+
@typing.overload
|
| 188 |
+
def getMemoryConsumption(self, layerId: int, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
|
| 189 |
+
|
| 190 |
+
def enableFusion(self, fusion: bool) -> None: ...
|
| 191 |
+
|
| 192 |
+
def enableWinograd(self, useWinograd: bool) -> None: ...
|
| 193 |
+
|
| 194 |
+
def getPerfProfile(self) -> tuple[int, typing.Sequence[float]]: ...
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
class Image2BlobParams:
|
| 198 |
+
scalefactor: cv2.typing.Scalar
|
| 199 |
+
size: cv2.typing.Size
|
| 200 |
+
mean: cv2.typing.Scalar
|
| 201 |
+
swapRB: bool
|
| 202 |
+
ddepth: int
|
| 203 |
+
datalayout: DataLayout
|
| 204 |
+
paddingmode: ImagePaddingMode
|
| 205 |
+
|
| 206 |
+
# Functions
|
| 207 |
+
@typing.overload
|
| 208 |
+
def __init__(self) -> None: ...
|
| 209 |
+
@typing.overload
|
| 210 |
+
def __init__(self, scalefactor: cv2.typing.Scalar, size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., ddepth: int = ..., datalayout: DataLayout = ..., mode: ImagePaddingMode = ...) -> None: ...
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
class Model:
|
| 214 |
+
# Functions
|
| 215 |
+
@typing.overload
|
| 216 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 217 |
+
@typing.overload
|
| 218 |
+
def __init__(self, network: Net) -> None: ...
|
| 219 |
+
|
| 220 |
+
@typing.overload
|
| 221 |
+
def setInputSize(self, size: cv2.typing.Size) -> Model: ...
|
| 222 |
+
@typing.overload
|
| 223 |
+
def setInputSize(self, width: int, height: int) -> Model: ...
|
| 224 |
+
|
| 225 |
+
def setInputMean(self, mean: cv2.typing.Scalar) -> Model: ...
|
| 226 |
+
|
| 227 |
+
def setInputScale(self, scale: cv2.typing.Scalar) -> Model: ...
|
| 228 |
+
|
| 229 |
+
def setInputCrop(self, crop: bool) -> Model: ...
|
| 230 |
+
|
| 231 |
+
def setInputSwapRB(self, swapRB: bool) -> Model: ...
|
| 232 |
+
|
| 233 |
+
def setInputParams(self, scale: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ...) -> None: ...
|
| 234 |
+
|
| 235 |
+
@typing.overload
|
| 236 |
+
def predict(self, frame: cv2.typing.MatLike, outs: typing.Sequence[cv2.typing.MatLike] | None = ...) -> typing.Sequence[cv2.typing.MatLike]: ...
|
| 237 |
+
@typing.overload
|
| 238 |
+
def predict(self, frame: cv2.UMat, outs: typing.Sequence[cv2.UMat] | None = ...) -> typing.Sequence[cv2.UMat]: ...
|
| 239 |
+
|
| 240 |
+
def setPreferableBackend(self, backendId: Backend) -> Model: ...
|
| 241 |
+
|
| 242 |
+
def setPreferableTarget(self, targetId: Target) -> Model: ...
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
class Layer(cv2.Algorithm):
|
| 246 |
+
blobs: typing.Sequence[cv2.typing.MatLike]
|
| 247 |
+
@property
|
| 248 |
+
def name(self) -> str: ...
|
| 249 |
+
@property
|
| 250 |
+
def type(self) -> str: ...
|
| 251 |
+
@property
|
| 252 |
+
def preferableTarget(self) -> int: ...
|
| 253 |
+
|
| 254 |
+
# Functions
|
| 255 |
+
@typing.overload
|
| 256 |
+
def finalize(self, inputs: typing.Sequence[cv2.typing.MatLike], outputs: typing.Sequence[cv2.typing.MatLike] | None = ...) -> typing.Sequence[cv2.typing.MatLike]: ...
|
| 257 |
+
@typing.overload
|
| 258 |
+
def finalize(self, inputs: typing.Sequence[cv2.UMat], outputs: typing.Sequence[cv2.UMat] | None = ...) -> typing.Sequence[cv2.UMat]: ...
|
| 259 |
+
|
| 260 |
+
def run(self, inputs: typing.Sequence[cv2.typing.MatLike], internals: typing.Sequence[cv2.typing.MatLike], outputs: typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[typing.Sequence[cv2.typing.MatLike], typing.Sequence[cv2.typing.MatLike]]: ...
|
| 261 |
+
|
| 262 |
+
def outputNameToIndex(self, outputName: str) -> int: ...
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
class ClassificationModel(Model):
|
| 266 |
+
# Functions
|
| 267 |
+
@typing.overload
|
| 268 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 269 |
+
@typing.overload
|
| 270 |
+
def __init__(self, network: Net) -> None: ...
|
| 271 |
+
|
| 272 |
+
def setEnableSoftmaxPostProcessing(self, enable: bool) -> ClassificationModel: ...
|
| 273 |
+
|
| 274 |
+
def getEnableSoftmaxPostProcessing(self) -> bool: ...
|
| 275 |
+
|
| 276 |
+
@typing.overload
|
| 277 |
+
def classify(self, frame: cv2.typing.MatLike) -> tuple[int, float]: ...
|
| 278 |
+
@typing.overload
|
| 279 |
+
def classify(self, frame: cv2.UMat) -> tuple[int, float]: ...
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
class KeypointsModel(Model):
|
| 283 |
+
# Functions
|
| 284 |
+
@typing.overload
|
| 285 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 286 |
+
@typing.overload
|
| 287 |
+
def __init__(self, network: Net) -> None: ...
|
| 288 |
+
|
| 289 |
+
@typing.overload
|
| 290 |
+
def estimate(self, frame: cv2.typing.MatLike, thresh: float = ...) -> typing.Sequence[cv2.typing.Point2f]: ...
|
| 291 |
+
@typing.overload
|
| 292 |
+
def estimate(self, frame: cv2.UMat, thresh: float = ...) -> typing.Sequence[cv2.typing.Point2f]: ...
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
class SegmentationModel(Model):
|
| 296 |
+
# Functions
|
| 297 |
+
@typing.overload
|
| 298 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 299 |
+
@typing.overload
|
| 300 |
+
def __init__(self, network: Net) -> None: ...
|
| 301 |
+
|
| 302 |
+
@typing.overload
|
| 303 |
+
def segment(self, frame: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
| 304 |
+
@typing.overload
|
| 305 |
+
def segment(self, frame: cv2.UMat, mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
class DetectionModel(Model):
|
| 309 |
+
# Functions
|
| 310 |
+
@typing.overload
|
| 311 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 312 |
+
@typing.overload
|
| 313 |
+
def __init__(self, network: Net) -> None: ...
|
| 314 |
+
|
| 315 |
+
def setNmsAcrossClasses(self, value: bool) -> DetectionModel: ...
|
| 316 |
+
|
| 317 |
+
def getNmsAcrossClasses(self) -> bool: ...
|
| 318 |
+
|
| 319 |
+
@typing.overload
|
| 320 |
+
def detect(self, frame: cv2.typing.MatLike, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[typing.Sequence[int], typing.Sequence[float], typing.Sequence[cv2.typing.Rect]]: ...
|
| 321 |
+
@typing.overload
|
| 322 |
+
def detect(self, frame: cv2.UMat, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[typing.Sequence[int], typing.Sequence[float], typing.Sequence[cv2.typing.Rect]]: ...
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
class TextRecognitionModel(Model):
|
| 326 |
+
# Functions
|
| 327 |
+
@typing.overload
|
| 328 |
+
def __init__(self, network: Net) -> None: ...
|
| 329 |
+
@typing.overload
|
| 330 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 331 |
+
|
| 332 |
+
def setDecodeType(self, decodeType: str) -> TextRecognitionModel: ...
|
| 333 |
+
|
| 334 |
+
def getDecodeType(self) -> str: ...
|
| 335 |
+
|
| 336 |
+
def setDecodeOptsCTCPrefixBeamSearch(self, beamSize: int, vocPruneSize: int = ...) -> TextRecognitionModel: ...
|
| 337 |
+
|
| 338 |
+
def setVocabulary(self, vocabulary: typing.Sequence[str]) -> TextRecognitionModel: ...
|
| 339 |
+
|
| 340 |
+
def getVocabulary(self) -> typing.Sequence[str]: ...
|
| 341 |
+
|
| 342 |
+
@typing.overload
|
| 343 |
+
def recognize(self, frame: cv2.typing.MatLike) -> str: ...
|
| 344 |
+
@typing.overload
|
| 345 |
+
def recognize(self, frame: cv2.UMat) -> str: ...
|
| 346 |
+
@typing.overload
|
| 347 |
+
def recognize(self, frame: cv2.typing.MatLike, roiRects: typing.Sequence[cv2.typing.MatLike]) -> typing.Sequence[str]: ...
|
| 348 |
+
@typing.overload
|
| 349 |
+
def recognize(self, frame: cv2.UMat, roiRects: typing.Sequence[cv2.UMat]) -> typing.Sequence[str]: ...
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
class TextDetectionModel(Model):
|
| 353 |
+
# Functions
|
| 354 |
+
@typing.overload
|
| 355 |
+
def detect(self, frame: cv2.typing.MatLike) -> tuple[typing.Sequence[typing.Sequence[cv2.typing.Point]], typing.Sequence[float]]: ...
|
| 356 |
+
@typing.overload
|
| 357 |
+
def detect(self, frame: cv2.UMat) -> tuple[typing.Sequence[typing.Sequence[cv2.typing.Point]], typing.Sequence[float]]: ...
|
| 358 |
+
@typing.overload
|
| 359 |
+
def detect(self, frame: cv2.typing.MatLike) -> typing.Sequence[typing.Sequence[cv2.typing.Point]]: ...
|
| 360 |
+
@typing.overload
|
| 361 |
+
def detect(self, frame: cv2.UMat) -> typing.Sequence[typing.Sequence[cv2.typing.Point]]: ...
|
| 362 |
+
|
| 363 |
+
@typing.overload
|
| 364 |
+
def detectTextRectangles(self, frame: cv2.typing.MatLike) -> tuple[typing.Sequence[cv2.typing.RotatedRect], typing.Sequence[float]]: ...
|
| 365 |
+
@typing.overload
|
| 366 |
+
def detectTextRectangles(self, frame: cv2.UMat) -> tuple[typing.Sequence[cv2.typing.RotatedRect], typing.Sequence[float]]: ...
|
| 367 |
+
@typing.overload
|
| 368 |
+
def detectTextRectangles(self, frame: cv2.typing.MatLike) -> typing.Sequence[cv2.typing.RotatedRect]: ...
|
| 369 |
+
@typing.overload
|
| 370 |
+
def detectTextRectangles(self, frame: cv2.UMat) -> typing.Sequence[cv2.typing.RotatedRect]: ...
|
| 371 |
+
|
| 372 |
+
|
| 373 |
+
class TextDetectionModel_EAST(TextDetectionModel):
|
| 374 |
+
# Functions
|
| 375 |
+
@typing.overload
|
| 376 |
+
def __init__(self, network: Net) -> None: ...
|
| 377 |
+
@typing.overload
|
| 378 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 379 |
+
|
| 380 |
+
def setConfidenceThreshold(self, confThreshold: float) -> TextDetectionModel_EAST: ...
|
| 381 |
+
|
| 382 |
+
def getConfidenceThreshold(self) -> float: ...
|
| 383 |
+
|
| 384 |
+
def setNMSThreshold(self, nmsThreshold: float) -> TextDetectionModel_EAST: ...
|
| 385 |
+
|
| 386 |
+
def getNMSThreshold(self) -> float: ...
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class TextDetectionModel_DB(TextDetectionModel):
|
| 390 |
+
# Functions
|
| 391 |
+
@typing.overload
|
| 392 |
+
def __init__(self, network: Net) -> None: ...
|
| 393 |
+
@typing.overload
|
| 394 |
+
def __init__(self, model: str, config: str = ...) -> None: ...
|
| 395 |
+
|
| 396 |
+
def setBinaryThreshold(self, binaryThreshold: float) -> TextDetectionModel_DB: ...
|
| 397 |
+
|
| 398 |
+
def getBinaryThreshold(self) -> float: ...
|
| 399 |
+
|
| 400 |
+
def setPolygonThreshold(self, polygonThreshold: float) -> TextDetectionModel_DB: ...
|
| 401 |
+
|
| 402 |
+
def getPolygonThreshold(self) -> float: ...
|
| 403 |
+
|
| 404 |
+
def setUnclipRatio(self, unclipRatio: float) -> TextDetectionModel_DB: ...
|
| 405 |
+
|
| 406 |
+
def getUnclipRatio(self) -> float: ...
|
| 407 |
+
|
| 408 |
+
def setMaxCandidates(self, maxCandidates: int) -> TextDetectionModel_DB: ...
|
| 409 |
+
|
| 410 |
+
def getMaxCandidates(self) -> int: ...
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
# Functions
|
| 415 |
+
def NMSBoxes(bboxes: typing.Sequence[cv2.typing.Rect2d], scores: typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> typing.Sequence[int]: ...
|
| 416 |
+
|
| 417 |
+
def NMSBoxesBatched(bboxes: typing.Sequence[cv2.typing.Rect2d], scores: typing.Sequence[float], class_ids: typing.Sequence[int], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> typing.Sequence[int]: ...
|
| 418 |
+
|
| 419 |
+
def NMSBoxesRotated(bboxes: typing.Sequence[cv2.typing.RotatedRect], scores: typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> typing.Sequence[int]: ...
|
| 420 |
+
|
| 421 |
+
@typing.overload
|
| 422 |
+
def blobFromImage(image: cv2.typing.MatLike, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
|
| 423 |
+
@typing.overload
|
| 424 |
+
def blobFromImage(image: cv2.UMat, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
|
| 425 |
+
|
| 426 |
+
@typing.overload
|
| 427 |
+
def blobFromImageWithParams(image: cv2.typing.MatLike, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 428 |
+
@typing.overload
|
| 429 |
+
def blobFromImageWithParams(image: cv2.UMat, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 430 |
+
@typing.overload
|
| 431 |
+
def blobFromImageWithParams(image: cv2.typing.MatLike, blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 432 |
+
@typing.overload
|
| 433 |
+
def blobFromImageWithParams(image: cv2.UMat, blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
|
| 434 |
+
|
| 435 |
+
@typing.overload
|
| 436 |
+
def blobFromImages(images: typing.Sequence[cv2.typing.MatLike], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
|
| 437 |
+
@typing.overload
|
| 438 |
+
def blobFromImages(images: typing.Sequence[cv2.UMat], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
|
| 439 |
+
|
| 440 |
+
@typing.overload
|
| 441 |
+
def blobFromImagesWithParams(images: typing.Sequence[cv2.typing.MatLike], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 442 |
+
@typing.overload
|
| 443 |
+
def blobFromImagesWithParams(images: typing.Sequence[cv2.UMat], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 444 |
+
@typing.overload
|
| 445 |
+
def blobFromImagesWithParams(images: typing.Sequence[cv2.typing.MatLike], blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
|
| 446 |
+
@typing.overload
|
| 447 |
+
def blobFromImagesWithParams(images: typing.Sequence[cv2.UMat], blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
|
| 448 |
+
|
| 449 |
+
def getAvailableTargets(be: Backend) -> typing.Sequence[Target]: ...
|
| 450 |
+
|
| 451 |
+
@typing.overload
|
| 452 |
+
def imagesFromBlob(blob_: cv2.typing.MatLike, images_: typing.Sequence[cv2.typing.MatLike] | None = ...) -> typing.Sequence[cv2.typing.MatLike]: ...
|
| 453 |
+
@typing.overload
|
| 454 |
+
def imagesFromBlob(blob_: cv2.typing.MatLike, images_: typing.Sequence[cv2.UMat] | None = ...) -> typing.Sequence[cv2.UMat]: ...
|
| 455 |
+
|
| 456 |
+
@typing.overload
|
| 457 |
+
def readNet(model: str, config: str = ..., framework: str = ...) -> Net: ...
|
| 458 |
+
@typing.overload
|
| 459 |
+
def readNet(framework: str, bufferModel: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
|
| 460 |
+
|
| 461 |
+
@typing.overload
|
| 462 |
+
def readNetFromCaffe(prototxt: str, caffeModel: str = ...) -> Net: ...
|
| 463 |
+
@typing.overload
|
| 464 |
+
def readNetFromCaffe(bufferProto: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
|
| 465 |
+
|
| 466 |
+
@typing.overload
|
| 467 |
+
def readNetFromDarknet(cfgFile: str, darknetModel: str = ...) -> Net: ...
|
| 468 |
+
@typing.overload
|
| 469 |
+
def readNetFromDarknet(bufferCfg: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
|
| 470 |
+
|
| 471 |
+
@typing.overload
|
| 472 |
+
def readNetFromModelOptimizer(xml: str, bin: str) -> Net: ...
|
| 473 |
+
@typing.overload
|
| 474 |
+
def readNetFromModelOptimizer(bufferModelConfig: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
|
| 475 |
+
|
| 476 |
+
@typing.overload
|
| 477 |
+
def readNetFromONNX(onnxFile: str) -> Net: ...
|
| 478 |
+
@typing.overload
|
| 479 |
+
def readNetFromONNX(buffer: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
|
| 480 |
+
|
| 481 |
+
@typing.overload
|
| 482 |
+
def readNetFromTFLite(model: str) -> Net: ...
|
| 483 |
+
@typing.overload
|
| 484 |
+
def readNetFromTFLite(bufferModel: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
|
| 485 |
+
|
| 486 |
+
@typing.overload
|
| 487 |
+
def readNetFromTensorflow(model: str, config: str = ...) -> Net: ...
|
| 488 |
+
@typing.overload
|
| 489 |
+
def readNetFromTensorflow(bufferModel: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
|
| 490 |
+
|
| 491 |
+
def readNetFromTorch(model: str, isBinary: bool = ..., evaluate: bool = ...) -> Net: ...
|
| 492 |
+
|
| 493 |
+
def readTensorFromONNX(path: str) -> cv2.typing.MatLike: ...
|
| 494 |
+
|
| 495 |
+
def readTorchBlob(filename: str, isBinary: bool = ...) -> cv2.typing.MatLike: ...
|
| 496 |
+
|
| 497 |
+
def shrinkCaffeModel(src: str, dst: str, layersTypes: typing.Sequence[str] = ...) -> None: ...
|
| 498 |
+
|
| 499 |
+
def softNMSBoxes(bboxes: typing.Sequence[cv2.typing.Rect], scores: typing.Sequence[float], score_threshold: float, nms_threshold: float, top_k: int = ..., sigma: float = ..., method: SoftNMSMethod = ...) -> tuple[typing.Sequence[float], typing.Sequence[int]]: ...
|
| 500 |
+
|
| 501 |
+
def writeTextGraph(model: str, output: str) -> None: ...
|
| 502 |
+
|
| 503 |
+
|