| __all__: list[str] = [] |
|
|
| import cv2.aruco |
| import cv2.cuda |
| import cv2.detail |
| import cv2.dnn |
| import cv2.gapi |
| import cv2.gapi.ot |
| import cv2.gapi.streaming |
| import cv2.typing |
| import numpy |
| import typing as _typing |
|
|
|
|
| from cv2 import Error as Error |
| from cv2 import aruco as aruco |
| from cv2 import barcode as barcode |
| from cv2 import cuda as cuda |
| from cv2 import detail as detail |
| from cv2 import dnn as dnn |
| from cv2 import fisheye as fisheye |
| from cv2 import flann as flann |
| from cv2 import gapi as gapi |
| from cv2 import ipp as ipp |
| from cv2 import ml as ml |
| from cv2 import ocl as ocl |
| from cv2 import ogl as ogl |
| from cv2 import parallel as parallel |
| from cv2 import samples as samples |
| from cv2 import segmentation as segmentation |
| from cv2 import typing as typing |
| from cv2 import utils as utils |
| from cv2 import videoio_registry as videoio_registry |
| from cv2.mat_wrapper import Mat as Mat |
|
|
|
|
| |
| SORT_EVERY_ROW: int |
| SORT_EVERY_COLUMN: int |
| SORT_ASCENDING: int |
| SORT_DESCENDING: int |
| SortFlags = int |
| """One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING]""" |
|
|
| COVAR_SCRAMBLED: int |
| COVAR_NORMAL: int |
| COVAR_USE_AVG: int |
| COVAR_SCALE: int |
| COVAR_ROWS: int |
| COVAR_COLS: int |
| CovarFlags = int |
| """One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS]""" |
|
|
| REDUCE_SUM: int |
| REDUCE_AVG: int |
| REDUCE_MAX: int |
| REDUCE_MIN: int |
| REDUCE_SUM2: int |
| ReduceTypes = int |
| """One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2]""" |
|
|
| ROTATE_90_CLOCKWISE: int |
| ROTATE_180: int |
| ROTATE_90_COUNTERCLOCKWISE: int |
| RotateFlags = int |
| """One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE]""" |
|
|
| KMEANS_RANDOM_CENTERS: int |
| KMEANS_PP_CENTERS: int |
| KMEANS_USE_INITIAL_LABELS: int |
| KmeansFlags = int |
| """One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS]""" |
|
|
| Param_INT: int |
| PARAM_INT: int |
| Param_BOOLEAN: int |
| PARAM_BOOLEAN: int |
| Param_REAL: int |
| PARAM_REAL: int |
| Param_STRING: int |
| PARAM_STRING: int |
| Param_MAT: int |
| PARAM_MAT: int |
| Param_MAT_VECTOR: int |
| PARAM_MAT_VECTOR: int |
| Param_ALGORITHM: int |
| PARAM_ALGORITHM: int |
| Param_FLOAT: int |
| PARAM_FLOAT: int |
| Param_UNSIGNED_INT: int |
| PARAM_UNSIGNED_INT: int |
| Param_UINT64: int |
| PARAM_UINT64: int |
| Param_UCHAR: int |
| PARAM_UCHAR: int |
| Param_SCALAR: int |
| PARAM_SCALAR: int |
| Param = int |
| """One of [Param_INT, PARAM_INT, Param_BOOLEAN, PARAM_BOOLEAN, Param_REAL, PARAM_REAL, Param_STRING, PARAM_STRING, Param_MAT, PARAM_MAT, Param_MAT_VECTOR, PARAM_MAT_VECTOR, Param_ALGORITHM, PARAM_ALGORITHM, Param_FLOAT, PARAM_FLOAT, Param_UNSIGNED_INT, PARAM_UNSIGNED_INT, Param_UINT64, PARAM_UINT64, Param_UCHAR, PARAM_UCHAR, Param_SCALAR, PARAM_SCALAR]""" |
|
|
| DECOMP_LU: int |
| DECOMP_SVD: int |
| DECOMP_EIG: int |
| DECOMP_CHOLESKY: int |
| DECOMP_QR: int |
| DECOMP_NORMAL: int |
| DecompTypes = int |
| """One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL]""" |
|
|
| NORM_INF: int |
| NORM_L1: int |
| NORM_L2: int |
| NORM_L2SQR: int |
| NORM_HAMMING: int |
| NORM_HAMMING2: int |
| NORM_TYPE_MASK: int |
| NORM_RELATIVE: int |
| NORM_MINMAX: int |
| NormTypes = int |
| """One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX]""" |
|
|
| CMP_EQ: int |
| CMP_GT: int |
| CMP_GE: int |
| CMP_LT: int |
| CMP_LE: int |
| CMP_NE: int |
| CmpTypes = int |
| """One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE]""" |
|
|
| GEMM_1_T: int |
| GEMM_2_T: int |
| GEMM_3_T: int |
| GemmFlags = int |
| """One of [GEMM_1_T, GEMM_2_T, GEMM_3_T]""" |
|
|
| DFT_INVERSE: int |
| DFT_SCALE: int |
| DFT_ROWS: int |
| DFT_COMPLEX_OUTPUT: int |
| DFT_REAL_OUTPUT: int |
| DFT_COMPLEX_INPUT: int |
| DCT_INVERSE: int |
| DCT_ROWS: int |
| DftFlags = int |
| """One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, DCT_ROWS]""" |
|
|
| BORDER_CONSTANT: int |
| BORDER_REPLICATE: int |
| BORDER_REFLECT: int |
| BORDER_WRAP: int |
| BORDER_REFLECT_101: int |
| BORDER_TRANSPARENT: int |
| BORDER_REFLECT101: int |
| BORDER_DEFAULT: int |
| BORDER_ISOLATED: int |
| BorderTypes = int |
| """One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, BORDER_REFLECT_101, BORDER_TRANSPARENT, BORDER_REFLECT101, BORDER_DEFAULT, BORDER_ISOLATED]""" |
|
|
| ACCESS_READ: int |
| ACCESS_WRITE: int |
| ACCESS_RW: int |
| ACCESS_MASK: int |
| ACCESS_FAST: int |
| AccessFlag = int |
| """One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST]""" |
|
|
| USAGE_DEFAULT: int |
| USAGE_ALLOCATE_HOST_MEMORY: int |
| USAGE_ALLOCATE_DEVICE_MEMORY: int |
| USAGE_ALLOCATE_SHARED_MEMORY: int |
| __UMAT_USAGE_FLAGS_32BIT: int |
| UMatUsageFlags = int |
| """One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, __UMAT_USAGE_FLAGS_32BIT]""" |
|
|
| SOLVELP_LOST: int |
| SOLVELP_UNBOUNDED: int |
| SOLVELP_UNFEASIBLE: int |
| SOLVELP_SINGLE: int |
| SOLVELP_MULTI: int |
| SolveLPResult = int |
| """One of [SOLVELP_LOST, SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI]""" |
|
|
| QUAT_ASSUME_NOT_UNIT: int |
| QUAT_ASSUME_UNIT: int |
| QuatAssumeType = int |
| """One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT]""" |
|
|
| ALGO_HINT_DEFAULT: int |
| ALGO_HINT_ACCURATE: int |
| ALGO_HINT_APPROX: int |
| AlgorithmHint = int |
| """One of [ALGO_HINT_DEFAULT, ALGO_HINT_ACCURATE, ALGO_HINT_APPROX]""" |
|
|
| FILTER_SCHARR: int |
| SpecialFilter = int |
| """One of [FILTER_SCHARR]""" |
|
|
| MORPH_ERODE: int |
| MORPH_DILATE: int |
| MORPH_OPEN: int |
| MORPH_CLOSE: int |
| MORPH_GRADIENT: int |
| MORPH_TOPHAT: int |
| MORPH_BLACKHAT: int |
| MORPH_HITMISS: int |
| MorphTypes = int |
| """One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, MORPH_HITMISS]""" |
|
|
| MORPH_RECT: int |
| MORPH_CROSS: int |
| MORPH_ELLIPSE: int |
| MorphShapes = int |
| """One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE]""" |
|
|
| INTER_NEAREST: int |
| INTER_LINEAR: int |
| INTER_CUBIC: int |
| INTER_AREA: int |
| INTER_LANCZOS4: int |
| INTER_LINEAR_EXACT: int |
| INTER_NEAREST_EXACT: int |
| INTER_MAX: int |
| WARP_FILL_OUTLIERS: int |
| WARP_INVERSE_MAP: int |
| WARP_RELATIVE_MAP: int |
| InterpolationFlags = int |
| """One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, INTER_LANCZOS4, INTER_LINEAR_EXACT, INTER_NEAREST_EXACT, INTER_MAX, WARP_FILL_OUTLIERS, WARP_INVERSE_MAP, WARP_RELATIVE_MAP]""" |
|
|
| WARP_POLAR_LINEAR: int |
| WARP_POLAR_LOG: int |
| WarpPolarMode = int |
| """One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG]""" |
|
|
| INTER_BITS: int |
| INTER_BITS2: int |
| INTER_TAB_SIZE: int |
| INTER_TAB_SIZE2: int |
| InterpolationMasks = int |
| """One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2]""" |
|
|
| DIST_USER: int |
| DIST_L1: int |
| DIST_L2: int |
| DIST_C: int |
| DIST_L12: int |
| DIST_FAIR: int |
| DIST_WELSCH: int |
| DIST_HUBER: int |
| DistanceTypes = int |
| """One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER]""" |
|
|
| DIST_MASK_3: int |
| DIST_MASK_5: int |
| DIST_MASK_PRECISE: int |
| DistanceTransformMasks = int |
| """One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE]""" |
|
|
| THRESH_BINARY: int |
| THRESH_BINARY_INV: int |
| THRESH_TRUNC: int |
| THRESH_TOZERO: int |
| THRESH_TOZERO_INV: int |
| THRESH_MASK: int |
| THRESH_OTSU: int |
| THRESH_TRIANGLE: int |
| ThresholdTypes = int |
| """One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE]""" |
|
|
| ADAPTIVE_THRESH_MEAN_C: int |
| ADAPTIVE_THRESH_GAUSSIAN_C: int |
| AdaptiveThresholdTypes = int |
| """One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C]""" |
|
|
| GC_BGD: int |
| GC_FGD: int |
| GC_PR_BGD: int |
| GC_PR_FGD: int |
| GrabCutClasses = int |
| """One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD]""" |
|
|
| GC_INIT_WITH_RECT: int |
| GC_INIT_WITH_MASK: int |
| GC_EVAL: int |
| GC_EVAL_FREEZE_MODEL: int |
| GrabCutModes = int |
| """One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL]""" |
|
|
| DIST_LABEL_CCOMP: int |
| DIST_LABEL_PIXEL: int |
| DistanceTransformLabelTypes = int |
| """One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL]""" |
|
|
| FLOODFILL_FIXED_RANGE: int |
| FLOODFILL_MASK_ONLY: int |
| FloodFillFlags = int |
| """One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY]""" |
|
|
| CC_STAT_LEFT: int |
| CC_STAT_TOP: int |
| CC_STAT_WIDTH: int |
| CC_STAT_HEIGHT: int |
| CC_STAT_AREA: int |
| CC_STAT_MAX: int |
| ConnectedComponentsTypes = int |
| """One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX]""" |
|
|
| CCL_DEFAULT: int |
| CCL_WU: int |
| CCL_GRANA: int |
| CCL_BOLELLI: int |
| CCL_SAUF: int |
| CCL_BBDT: int |
| CCL_SPAGHETTI: int |
| ConnectedComponentsAlgorithmsTypes = int |
| """One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI]""" |
|
|
| RETR_EXTERNAL: int |
| RETR_LIST: int |
| RETR_CCOMP: int |
| RETR_TREE: int |
| RETR_FLOODFILL: int |
| RetrievalModes = int |
| """One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL]""" |
|
|
| CHAIN_APPROX_NONE: int |
| CHAIN_APPROX_SIMPLE: int |
| CHAIN_APPROX_TC89_L1: int |
| CHAIN_APPROX_TC89_KCOS: int |
| ContourApproximationModes = int |
| """One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS]""" |
|
|
| CONTOURS_MATCH_I1: int |
| CONTOURS_MATCH_I2: int |
| CONTOURS_MATCH_I3: int |
| ShapeMatchModes = int |
| """One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3]""" |
|
|
| HOUGH_STANDARD: int |
| HOUGH_PROBABILISTIC: int |
| HOUGH_MULTI_SCALE: int |
| HOUGH_GRADIENT: int |
| HOUGH_GRADIENT_ALT: int |
| HoughModes = int |
| """One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT]""" |
|
|
| LSD_REFINE_NONE: int |
| LSD_REFINE_STD: int |
| LSD_REFINE_ADV: int |
| LineSegmentDetectorModes = int |
| """One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV]""" |
|
|
| HISTCMP_CORREL: int |
| HISTCMP_CHISQR: int |
| HISTCMP_INTERSECT: int |
| HISTCMP_BHATTACHARYYA: int |
| HISTCMP_HELLINGER: int |
| HISTCMP_CHISQR_ALT: int |
| HISTCMP_KL_DIV: int |
| HistCompMethods = int |
| """One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, HISTCMP_CHISQR_ALT, HISTCMP_KL_DIV]""" |
|
|
| COLOR_BGR2BGRA: int |
| COLOR_RGB2RGBA: int |
| COLOR_BGRA2BGR: int |
| COLOR_RGBA2RGB: int |
| COLOR_BGR2RGBA: int |
| COLOR_RGB2BGRA: int |
| COLOR_RGBA2BGR: int |
| COLOR_BGRA2RGB: int |
| COLOR_BGR2RGB: int |
| COLOR_RGB2BGR: int |
| COLOR_BGRA2RGBA: int |
| COLOR_RGBA2BGRA: int |
| COLOR_BGR2GRAY: int |
| COLOR_RGB2GRAY: int |
| COLOR_GRAY2BGR: int |
| COLOR_GRAY2RGB: int |
| COLOR_GRAY2BGRA: int |
| COLOR_GRAY2RGBA: int |
| COLOR_BGRA2GRAY: int |
| COLOR_RGBA2GRAY: int |
| COLOR_BGR2BGR565: int |
| COLOR_RGB2BGR565: int |
| COLOR_BGR5652BGR: int |
| COLOR_BGR5652RGB: int |
| COLOR_BGRA2BGR565: int |
| COLOR_RGBA2BGR565: int |
| COLOR_BGR5652BGRA: int |
| COLOR_BGR5652RGBA: int |
| COLOR_GRAY2BGR565: int |
| COLOR_BGR5652GRAY: int |
| COLOR_BGR2BGR555: int |
| COLOR_RGB2BGR555: int |
| COLOR_BGR5552BGR: int |
| COLOR_BGR5552RGB: int |
| COLOR_BGRA2BGR555: int |
| COLOR_RGBA2BGR555: int |
| COLOR_BGR5552BGRA: int |
| COLOR_BGR5552RGBA: int |
| COLOR_GRAY2BGR555: int |
| COLOR_BGR5552GRAY: int |
| COLOR_BGR2XYZ: int |
| COLOR_RGB2XYZ: int |
| COLOR_XYZ2BGR: int |
| COLOR_XYZ2RGB: int |
| COLOR_BGR2YCrCb: int |
| COLOR_BGR2YCR_CB: int |
| COLOR_RGB2YCrCb: int |
| COLOR_RGB2YCR_CB: int |
| COLOR_YCrCb2BGR: int |
| COLOR_YCR_CB2BGR: int |
| COLOR_YCrCb2RGB: int |
| COLOR_YCR_CB2RGB: int |
| COLOR_BGR2HSV: int |
| COLOR_RGB2HSV: int |
| COLOR_BGR2Lab: int |
| COLOR_BGR2LAB: int |
| COLOR_RGB2Lab: int |
| COLOR_RGB2LAB: int |
| COLOR_BGR2Luv: int |
| COLOR_BGR2LUV: int |
| COLOR_RGB2Luv: int |
| COLOR_RGB2LUV: int |
| COLOR_BGR2HLS: int |
| COLOR_RGB2HLS: int |
| COLOR_HSV2BGR: int |
| COLOR_HSV2RGB: int |
| COLOR_Lab2BGR: int |
| COLOR_LAB2BGR: int |
| COLOR_Lab2RGB: int |
| COLOR_LAB2RGB: int |
| COLOR_Luv2BGR: int |
| COLOR_LUV2BGR: int |
| COLOR_Luv2RGB: int |
| COLOR_LUV2RGB: int |
| COLOR_HLS2BGR: int |
| COLOR_HLS2RGB: int |
| COLOR_BGR2HSV_FULL: int |
| COLOR_RGB2HSV_FULL: int |
| COLOR_BGR2HLS_FULL: int |
| COLOR_RGB2HLS_FULL: int |
| COLOR_HSV2BGR_FULL: int |
| COLOR_HSV2RGB_FULL: int |
| COLOR_HLS2BGR_FULL: int |
| COLOR_HLS2RGB_FULL: int |
| COLOR_LBGR2Lab: int |
| COLOR_LBGR2LAB: int |
| COLOR_LRGB2Lab: int |
| COLOR_LRGB2LAB: int |
| COLOR_LBGR2Luv: int |
| COLOR_LBGR2LUV: int |
| COLOR_LRGB2Luv: int |
| COLOR_LRGB2LUV: int |
| COLOR_Lab2LBGR: int |
| COLOR_LAB2LBGR: int |
| COLOR_Lab2LRGB: int |
| COLOR_LAB2LRGB: int |
| COLOR_Luv2LBGR: int |
| COLOR_LUV2LBGR: int |
| COLOR_Luv2LRGB: int |
| COLOR_LUV2LRGB: int |
| COLOR_BGR2YUV: int |
| COLOR_RGB2YUV: int |
| COLOR_YUV2BGR: int |
| COLOR_YUV2RGB: int |
| COLOR_YUV2RGB_NV12: int |
| COLOR_YUV2BGR_NV12: int |
| COLOR_YUV2RGB_NV21: int |
| COLOR_YUV2BGR_NV21: int |
| COLOR_YUV420sp2RGB: int |
| COLOR_YUV420SP2RGB: int |
| COLOR_YUV420sp2BGR: int |
| COLOR_YUV420SP2BGR: int |
| COLOR_YUV2RGBA_NV12: int |
| COLOR_YUV2BGRA_NV12: int |
| COLOR_YUV2RGBA_NV21: int |
| COLOR_YUV2BGRA_NV21: int |
| COLOR_YUV420sp2RGBA: int |
| COLOR_YUV420SP2RGBA: int |
| COLOR_YUV420sp2BGRA: int |
| COLOR_YUV420SP2BGRA: int |
| COLOR_YUV2RGB_YV12: int |
| COLOR_YUV2BGR_YV12: int |
| COLOR_YUV2RGB_IYUV: int |
| COLOR_YUV2BGR_IYUV: int |
| COLOR_YUV2RGB_I420: int |
| COLOR_YUV2BGR_I420: int |
| COLOR_YUV420p2RGB: int |
| COLOR_YUV420P2RGB: int |
| COLOR_YUV420p2BGR: int |
| COLOR_YUV420P2BGR: int |
| COLOR_YUV2RGBA_YV12: int |
| COLOR_YUV2BGRA_YV12: int |
| COLOR_YUV2RGBA_IYUV: int |
| COLOR_YUV2BGRA_IYUV: int |
| COLOR_YUV2RGBA_I420: int |
| COLOR_YUV2BGRA_I420: int |
| COLOR_YUV420p2RGBA: int |
| COLOR_YUV420P2RGBA: int |
| COLOR_YUV420p2BGRA: int |
| COLOR_YUV420P2BGRA: int |
| COLOR_YUV2GRAY_420: int |
| COLOR_YUV2GRAY_NV21: int |
| COLOR_YUV2GRAY_NV12: int |
| COLOR_YUV2GRAY_YV12: int |
| COLOR_YUV2GRAY_IYUV: int |
| COLOR_YUV2GRAY_I420: int |
| COLOR_YUV420sp2GRAY: int |
| COLOR_YUV420SP2GRAY: int |
| COLOR_YUV420p2GRAY: int |
| COLOR_YUV420P2GRAY: int |
| COLOR_YUV2RGB_UYVY: int |
| COLOR_YUV2BGR_UYVY: int |
| COLOR_YUV2RGB_Y422: int |
| COLOR_YUV2BGR_Y422: int |
| COLOR_YUV2RGB_UYNV: int |
| COLOR_YUV2BGR_UYNV: int |
| COLOR_YUV2RGBA_UYVY: int |
| COLOR_YUV2BGRA_UYVY: int |
| COLOR_YUV2RGBA_Y422: int |
| COLOR_YUV2BGRA_Y422: int |
| COLOR_YUV2RGBA_UYNV: int |
| COLOR_YUV2BGRA_UYNV: int |
| COLOR_YUV2RGB_YUY2: int |
| COLOR_YUV2BGR_YUY2: int |
| COLOR_YUV2RGB_YVYU: int |
| COLOR_YUV2BGR_YVYU: int |
| COLOR_YUV2RGB_YUYV: int |
| COLOR_YUV2BGR_YUYV: int |
| COLOR_YUV2RGB_YUNV: int |
| COLOR_YUV2BGR_YUNV: int |
| COLOR_YUV2RGBA_YUY2: int |
| COLOR_YUV2BGRA_YUY2: int |
| COLOR_YUV2RGBA_YVYU: int |
| COLOR_YUV2BGRA_YVYU: int |
| COLOR_YUV2RGBA_YUYV: int |
| COLOR_YUV2BGRA_YUYV: int |
| COLOR_YUV2RGBA_YUNV: int |
| COLOR_YUV2BGRA_YUNV: int |
| COLOR_YUV2GRAY_UYVY: int |
| COLOR_YUV2GRAY_YUY2: int |
| COLOR_YUV2GRAY_Y422: int |
| COLOR_YUV2GRAY_UYNV: int |
| COLOR_YUV2GRAY_YVYU: int |
| COLOR_YUV2GRAY_YUYV: int |
| COLOR_YUV2GRAY_YUNV: int |
| COLOR_RGBA2mRGBA: int |
| COLOR_RGBA2M_RGBA: int |
| COLOR_mRGBA2RGBA: int |
| COLOR_M_RGBA2RGBA: int |
| COLOR_RGB2YUV_I420: int |
| COLOR_BGR2YUV_I420: int |
| COLOR_RGB2YUV_IYUV: int |
| COLOR_BGR2YUV_IYUV: int |
| COLOR_RGBA2YUV_I420: int |
| COLOR_BGRA2YUV_I420: int |
| COLOR_RGBA2YUV_IYUV: int |
| COLOR_BGRA2YUV_IYUV: int |
| COLOR_RGB2YUV_YV12: int |
| COLOR_BGR2YUV_YV12: int |
| COLOR_RGBA2YUV_YV12: int |
| COLOR_BGRA2YUV_YV12: int |
| COLOR_BayerBG2BGR: int |
| COLOR_BAYER_BG2BGR: int |
| COLOR_BayerGB2BGR: int |
| COLOR_BAYER_GB2BGR: int |
| COLOR_BayerRG2BGR: int |
| COLOR_BAYER_RG2BGR: int |
| COLOR_BayerGR2BGR: int |
| COLOR_BAYER_GR2BGR: int |
| COLOR_BayerRGGB2BGR: int |
| COLOR_BAYER_RGGB2BGR: int |
| COLOR_BayerGRBG2BGR: int |
| COLOR_BAYER_GRBG2BGR: int |
| COLOR_BayerBGGR2BGR: int |
| COLOR_BAYER_BGGR2BGR: int |
| COLOR_BayerGBRG2BGR: int |
| COLOR_BAYER_GBRG2BGR: int |
| COLOR_BayerRGGB2RGB: int |
| COLOR_BAYER_RGGB2RGB: int |
| COLOR_BayerGRBG2RGB: int |
| COLOR_BAYER_GRBG2RGB: int |
| COLOR_BayerBGGR2RGB: int |
| COLOR_BAYER_BGGR2RGB: int |
| COLOR_BayerGBRG2RGB: int |
| COLOR_BAYER_GBRG2RGB: int |
| COLOR_BayerBG2RGB: int |
| COLOR_BAYER_BG2RGB: int |
| COLOR_BayerGB2RGB: int |
| COLOR_BAYER_GB2RGB: int |
| COLOR_BayerRG2RGB: int |
| COLOR_BAYER_RG2RGB: int |
| COLOR_BayerGR2RGB: int |
| COLOR_BAYER_GR2RGB: int |
| COLOR_BayerBG2GRAY: int |
| COLOR_BAYER_BG2GRAY: int |
| COLOR_BayerGB2GRAY: int |
| COLOR_BAYER_GB2GRAY: int |
| COLOR_BayerRG2GRAY: int |
| COLOR_BAYER_RG2GRAY: int |
| COLOR_BayerGR2GRAY: int |
| COLOR_BAYER_GR2GRAY: int |
| COLOR_BayerRGGB2GRAY: int |
| COLOR_BAYER_RGGB2GRAY: int |
| COLOR_BayerGRBG2GRAY: int |
| COLOR_BAYER_GRBG2GRAY: int |
| COLOR_BayerBGGR2GRAY: int |
| COLOR_BAYER_BGGR2GRAY: int |
| COLOR_BayerGBRG2GRAY: int |
| COLOR_BAYER_GBRG2GRAY: int |
| COLOR_BayerBG2BGR_VNG: int |
| COLOR_BAYER_BG2BGR_VNG: int |
| COLOR_BayerGB2BGR_VNG: int |
| COLOR_BAYER_GB2BGR_VNG: int |
| COLOR_BayerRG2BGR_VNG: int |
| COLOR_BAYER_RG2BGR_VNG: int |
| COLOR_BayerGR2BGR_VNG: int |
| COLOR_BAYER_GR2BGR_VNG: int |
| COLOR_BayerRGGB2BGR_VNG: int |
| COLOR_BAYER_RGGB2BGR_VNG: int |
| COLOR_BayerGRBG2BGR_VNG: int |
| COLOR_BAYER_GRBG2BGR_VNG: int |
| COLOR_BayerBGGR2BGR_VNG: int |
| COLOR_BAYER_BGGR2BGR_VNG: int |
| COLOR_BayerGBRG2BGR_VNG: int |
| COLOR_BAYER_GBRG2BGR_VNG: int |
| COLOR_BayerRGGB2RGB_VNG: int |
| COLOR_BAYER_RGGB2RGB_VNG: int |
| COLOR_BayerGRBG2RGB_VNG: int |
| COLOR_BAYER_GRBG2RGB_VNG: int |
| COLOR_BayerBGGR2RGB_VNG: int |
| COLOR_BAYER_BGGR2RGB_VNG: int |
| COLOR_BayerGBRG2RGB_VNG: int |
| COLOR_BAYER_GBRG2RGB_VNG: int |
| COLOR_BayerBG2RGB_VNG: int |
| COLOR_BAYER_BG2RGB_VNG: int |
| COLOR_BayerGB2RGB_VNG: int |
| COLOR_BAYER_GB2RGB_VNG: int |
| COLOR_BayerRG2RGB_VNG: int |
| COLOR_BAYER_RG2RGB_VNG: int |
| COLOR_BayerGR2RGB_VNG: int |
| COLOR_BAYER_GR2RGB_VNG: int |
| COLOR_BayerBG2BGR_EA: int |
| COLOR_BAYER_BG2BGR_EA: int |
| COLOR_BayerGB2BGR_EA: int |
| COLOR_BAYER_GB2BGR_EA: int |
| COLOR_BayerRG2BGR_EA: int |
| COLOR_BAYER_RG2BGR_EA: int |
| COLOR_BayerGR2BGR_EA: int |
| COLOR_BAYER_GR2BGR_EA: int |
| COLOR_BayerRGGB2BGR_EA: int |
| COLOR_BAYER_RGGB2BGR_EA: int |
| COLOR_BayerGRBG2BGR_EA: int |
| COLOR_BAYER_GRBG2BGR_EA: int |
| COLOR_BayerBGGR2BGR_EA: int |
| COLOR_BAYER_BGGR2BGR_EA: int |
| COLOR_BayerGBRG2BGR_EA: int |
| COLOR_BAYER_GBRG2BGR_EA: int |
| COLOR_BayerRGGB2RGB_EA: int |
| COLOR_BAYER_RGGB2RGB_EA: int |
| COLOR_BayerGRBG2RGB_EA: int |
| COLOR_BAYER_GRBG2RGB_EA: int |
| COLOR_BayerBGGR2RGB_EA: int |
| COLOR_BAYER_BGGR2RGB_EA: int |
| COLOR_BayerGBRG2RGB_EA: int |
| COLOR_BAYER_GBRG2RGB_EA: int |
| COLOR_BayerBG2RGB_EA: int |
| COLOR_BAYER_BG2RGB_EA: int |
| COLOR_BayerGB2RGB_EA: int |
| COLOR_BAYER_GB2RGB_EA: int |
| COLOR_BayerRG2RGB_EA: int |
| COLOR_BAYER_RG2RGB_EA: int |
| COLOR_BayerGR2RGB_EA: int |
| COLOR_BAYER_GR2RGB_EA: int |
| COLOR_BayerBG2BGRA: int |
| COLOR_BAYER_BG2BGRA: int |
| COLOR_BayerGB2BGRA: int |
| COLOR_BAYER_GB2BGRA: int |
| COLOR_BayerRG2BGRA: int |
| COLOR_BAYER_RG2BGRA: int |
| COLOR_BayerGR2BGRA: int |
| COLOR_BAYER_GR2BGRA: int |
| COLOR_BayerRGGB2BGRA: int |
| COLOR_BAYER_RGGB2BGRA: int |
| COLOR_BayerGRBG2BGRA: int |
| COLOR_BAYER_GRBG2BGRA: int |
| COLOR_BayerBGGR2BGRA: int |
| COLOR_BAYER_BGGR2BGRA: int |
| COLOR_BayerGBRG2BGRA: int |
| COLOR_BAYER_GBRG2BGRA: int |
| COLOR_BayerRGGB2RGBA: int |
| COLOR_BAYER_RGGB2RGBA: int |
| COLOR_BayerGRBG2RGBA: int |
| COLOR_BAYER_GRBG2RGBA: int |
| COLOR_BayerBGGR2RGBA: int |
| COLOR_BAYER_BGGR2RGBA: int |
| COLOR_BayerGBRG2RGBA: int |
| COLOR_BAYER_GBRG2RGBA: int |
| COLOR_BayerBG2RGBA: int |
| COLOR_BAYER_BG2RGBA: int |
| COLOR_BayerGB2RGBA: int |
| COLOR_BAYER_GB2RGBA: int |
| COLOR_BayerRG2RGBA: int |
| COLOR_BAYER_RG2RGBA: int |
| COLOR_BayerGR2RGBA: int |
| COLOR_BAYER_GR2RGBA: int |
| COLOR_RGB2YUV_UYVY: int |
| COLOR_BGR2YUV_UYVY: int |
| COLOR_RGB2YUV_Y422: int |
| COLOR_BGR2YUV_Y422: int |
| COLOR_RGB2YUV_UYNV: int |
| COLOR_BGR2YUV_UYNV: int |
| COLOR_RGBA2YUV_UYVY: int |
| COLOR_BGRA2YUV_UYVY: int |
| COLOR_RGBA2YUV_Y422: int |
| COLOR_BGRA2YUV_Y422: int |
| COLOR_RGBA2YUV_UYNV: int |
| COLOR_BGRA2YUV_UYNV: int |
| COLOR_RGB2YUV_YUY2: int |
| COLOR_BGR2YUV_YUY2: int |
| COLOR_RGB2YUV_YVYU: int |
| COLOR_BGR2YUV_YVYU: int |
| COLOR_RGB2YUV_YUYV: int |
| COLOR_BGR2YUV_YUYV: int |
| COLOR_RGB2YUV_YUNV: int |
| COLOR_BGR2YUV_YUNV: int |
| COLOR_RGBA2YUV_YUY2: int |
| COLOR_BGRA2YUV_YUY2: int |
| COLOR_RGBA2YUV_YVYU: int |
| COLOR_BGRA2YUV_YVYU: int |
| COLOR_RGBA2YUV_YUYV: int |
| COLOR_BGRA2YUV_YUYV: int |
| COLOR_RGBA2YUV_YUNV: int |
| COLOR_BGRA2YUV_YUNV: int |
| COLOR_COLORCVT_MAX: int |
| ColorConversionCodes = int |
| """One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, COLOR_RGB2BGRA, COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, COLOR_BGR2GRAY, COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, COLOR_RGBA2GRAY, COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, COLOR_RGBA2BGR565, COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, COLOR_RGB2BGR555, COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, COLOR_BGR5552RGBA, COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, COLOR_BGR2YCrCb, COLOR_BGR2YCR_CB, COLOR_RGB2YCrCb, COLOR_RGB2YCR_CB, COLOR_YCrCb2BGR, COLOR_YCR_CB2BGR, COLOR_YCrCb2RGB, COLOR_YCR_CB2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, COLOR_BGR2LAB, COLOR_RGB2Lab, COLOR_RGB2LAB, COLOR_BGR2Luv, COLOR_BGR2LUV, COLOR_RGB2Luv, COLOR_RGB2LUV, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, COLOR_Lab2BGR, COLOR_LAB2BGR, COLOR_Lab2RGB, COLOR_LAB2RGB, COLOR_Luv2BGR, COLOR_LUV2BGR, COLOR_Luv2RGB, COLOR_LUV2RGB, COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LBGR2LAB, COLOR_LRGB2Lab, COLOR_LRGB2LAB, COLOR_LBGR2Luv, COLOR_LBGR2LUV, COLOR_LRGB2Luv, COLOR_LRGB2LUV, COLOR_Lab2LBGR, COLOR_LAB2LBGR, COLOR_Lab2LRGB, COLOR_LAB2LRGB, COLOR_Luv2LBGR, COLOR_LUV2LBGR, COLOR_Luv2LRGB, COLOR_LUV2LRGB, COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, COLOR_YUV420SP2RGB, COLOR_YUV420sp2BGR, COLOR_YUV420SP2BGR, COLOR_YUV2RGBA_NV12, COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, COLOR_YUV420sp2RGBA, COLOR_YUV420SP2RGBA, COLOR_YUV420sp2BGRA, COLOR_YUV420SP2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420P2RGB, COLOR_YUV420p2BGR, COLOR_YUV420P2BGR, COLOR_YUV2RGBA_YV12, COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, COLOR_YUV420p2RGBA, COLOR_YUV420P2RGBA, COLOR_YUV420p2BGRA, COLOR_YUV420P2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, COLOR_YUV2GRAY_YV12, COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420SP2GRAY, COLOR_YUV420p2GRAY, COLOR_YUV420P2GRAY, COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, COLOR_YUV2RGBA_YUY2, COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, COLOR_YUV2BGRA_YUYV, COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, COLOR_YUV2GRAY_Y422, COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, COLOR_RGBA2mRGBA, COLOR_RGBA2M_RGBA, COLOR_mRGBA2RGBA, COLOR_M_RGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BAYER_BG2BGR, COLOR_BayerGB2BGR, COLOR_BAYER_GB2BGR, COLOR_BayerRG2BGR, COLOR_BAYER_RG2BGR, COLOR_BayerGR2BGR, COLOR_BAYER_GR2BGR, COLOR_BayerRGGB2BGR, COLOR_BAYER_RGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BAYER_GRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BAYER_BGGR2BGR, COLOR_BayerGBRG2BGR, COLOR_BAYER_GBRG2BGR, COLOR_BayerRGGB2RGB, COLOR_BAYER_RGGB2RGB, COLOR_BayerGRBG2RGB, COLOR_BAYER_GRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BAYER_BGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BAYER_GBRG2RGB, COLOR_BayerBG2RGB, COLOR_BAYER_BG2RGB, COLOR_BayerGB2RGB, COLOR_BAYER_GB2RGB, COLOR_BayerRG2RGB, COLOR_BAYER_RG2RGB, COLOR_BayerGR2RGB, COLOR_BAYER_GR2RGB, COLOR_BayerBG2GRAY, COLOR_BAYER_BG2GRAY, COLOR_BayerGB2GRAY, COLOR_BAYER_GB2GRAY, COLOR_BayerRG2GRAY, COLOR_BAYER_RG2GRAY, COLOR_BayerGR2GRAY, COLOR_BAYER_GR2GRAY, COLOR_BayerRGGB2GRAY, COLOR_BAYER_RGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BAYER_GRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BAYER_BGGR2GRAY, COLOR_BayerGBRG2GRAY, COLOR_BAYER_GBRG2GRAY, COLOR_BayerBG2BGR_VNG, COLOR_BAYER_BG2BGR_VNG, COLOR_BayerGB2BGR_VNG, COLOR_BAYER_GB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BAYER_RG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BAYER_GR2BGR_VNG, COLOR_BayerRGGB2BGR_VNG, COLOR_BAYER_RGGB2BGR_VNG, COLOR_BayerGRBG2BGR_VNG, COLOR_BAYER_GRBG2BGR_VNG, COLOR_BayerBGGR2BGR_VNG, COLOR_BAYER_BGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BAYER_GBRG2BGR_VNG, COLOR_BayerRGGB2RGB_VNG, COLOR_BAYER_RGGB2RGB_VNG, COLOR_BayerGRBG2RGB_VNG, COLOR_BAYER_GRBG2RGB_VNG, COLOR_BayerBGGR2RGB_VNG, COLOR_BAYER_BGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BAYER_GBRG2RGB_VNG, COLOR_BayerBG2RGB_VNG, COLOR_BAYER_BG2RGB_VNG, COLOR_BayerGB2RGB_VNG, COLOR_BAYER_GB2RGB_VNG, COLOR_BayerRG2RGB_VNG, COLOR_BAYER_RG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BAYER_GR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BAYER_BG2BGR_EA, COLOR_BayerGB2BGR_EA, COLOR_BAYER_GB2BGR_EA, COLOR_BayerRG2BGR_EA, COLOR_BAYER_RG2BGR_EA, COLOR_BayerGR2BGR_EA, COLOR_BAYER_GR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BAYER_RGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BAYER_GRBG2BGR_EA, COLOR_BayerBGGR2BGR_EA, COLOR_BAYER_BGGR2BGR_EA, COLOR_BayerGBRG2BGR_EA, COLOR_BAYER_GBRG2BGR_EA, COLOR_BayerRGGB2RGB_EA, COLOR_BAYER_RGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BAYER_GRBG2RGB_EA, COLOR_BayerBGGR2RGB_EA, COLOR_BAYER_BGGR2RGB_EA, COLOR_BayerGBRG2RGB_EA, COLOR_BAYER_GBRG2RGB_EA, COLOR_BayerBG2RGB_EA, COLOR_BAYER_BG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BAYER_GB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BAYER_RG2RGB_EA, COLOR_BayerGR2RGB_EA, COLOR_BAYER_GR2RGB_EA, COLOR_BayerBG2BGRA, COLOR_BAYER_BG2BGRA, COLOR_BayerGB2BGRA, COLOR_BAYER_GB2BGRA, COLOR_BayerRG2BGRA, COLOR_BAYER_RG2BGRA, COLOR_BayerGR2BGRA, COLOR_BAYER_GR2BGRA, COLOR_BayerRGGB2BGRA, COLOR_BAYER_RGGB2BGRA, COLOR_BayerGRBG2BGRA, COLOR_BAYER_GRBG2BGRA, COLOR_BayerBGGR2BGRA, COLOR_BAYER_BGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BAYER_GBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BAYER_RGGB2RGBA, COLOR_BayerGRBG2RGBA, COLOR_BAYER_GRBG2RGBA, COLOR_BayerBGGR2RGBA, COLOR_BAYER_BGGR2RGBA, COLOR_BayerGBRG2RGBA, COLOR_BAYER_GBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BAYER_BG2RGBA, COLOR_BayerGB2RGBA, COLOR_BAYER_GB2RGBA, COLOR_BayerRG2RGBA, COLOR_BAYER_RG2RGBA, COLOR_BayerGR2RGBA, COLOR_BAYER_GR2RGBA, COLOR_RGB2YUV_UYVY, COLOR_BGR2YUV_UYVY, COLOR_RGB2YUV_Y422, COLOR_BGR2YUV_Y422, COLOR_RGB2YUV_UYNV, COLOR_BGR2YUV_UYNV, COLOR_RGBA2YUV_UYVY, COLOR_BGRA2YUV_UYVY, COLOR_RGBA2YUV_Y422, COLOR_BGRA2YUV_Y422, COLOR_RGBA2YUV_UYNV, COLOR_BGRA2YUV_UYNV, COLOR_RGB2YUV_YUY2, COLOR_BGR2YUV_YUY2, COLOR_RGB2YUV_YVYU, COLOR_BGR2YUV_YVYU, COLOR_RGB2YUV_YUYV, COLOR_BGR2YUV_YUYV, COLOR_RGB2YUV_YUNV, COLOR_BGR2YUV_YUNV, COLOR_RGBA2YUV_YUY2, COLOR_BGRA2YUV_YUY2, COLOR_RGBA2YUV_YVYU, COLOR_BGRA2YUV_YVYU, COLOR_RGBA2YUV_YUYV, COLOR_BGRA2YUV_YUYV, COLOR_RGBA2YUV_YUNV, COLOR_BGRA2YUV_YUNV, COLOR_COLORCVT_MAX]""" |
|
|
| INTERSECT_NONE: int |
| INTERSECT_PARTIAL: int |
| INTERSECT_FULL: int |
| RectanglesIntersectTypes = int |
| """One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL]""" |
|
|
| FILLED: int |
| LINE_4: int |
| LINE_8: int |
| LINE_AA: int |
| LineTypes = int |
| """One of [FILLED, LINE_4, LINE_8, LINE_AA]""" |
|
|
| FONT_HERSHEY_SIMPLEX: int |
| FONT_HERSHEY_PLAIN: int |
| FONT_HERSHEY_DUPLEX: int |
| FONT_HERSHEY_COMPLEX: int |
| FONT_HERSHEY_TRIPLEX: int |
| FONT_HERSHEY_COMPLEX_SMALL: int |
| FONT_HERSHEY_SCRIPT_SIMPLEX: int |
| FONT_HERSHEY_SCRIPT_COMPLEX: int |
| FONT_ITALIC: int |
| HersheyFonts = int |
| """One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC]""" |
|
|
| MARKER_CROSS: int |
| MARKER_TILTED_CROSS: int |
| MARKER_STAR: int |
| MARKER_DIAMOND: int |
| MARKER_SQUARE: int |
| MARKER_TRIANGLE_UP: int |
| MARKER_TRIANGLE_DOWN: int |
| MarkerTypes = int |
| """One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, MARKER_SQUARE, MARKER_TRIANGLE_UP, MARKER_TRIANGLE_DOWN]""" |
|
|
| TM_SQDIFF: int |
| TM_SQDIFF_NORMED: int |
| TM_CCORR: int |
| TM_CCORR_NORMED: int |
| TM_CCOEFF: int |
| TM_CCOEFF_NORMED: int |
| TemplateMatchModes = int |
| """One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED]""" |
|
|
| COLORMAP_AUTUMN: int |
| COLORMAP_BONE: int |
| COLORMAP_JET: int |
| COLORMAP_WINTER: int |
| COLORMAP_RAINBOW: int |
| COLORMAP_OCEAN: int |
| COLORMAP_SUMMER: int |
| COLORMAP_SPRING: int |
| COLORMAP_COOL: int |
| COLORMAP_HSV: int |
| COLORMAP_PINK: int |
| COLORMAP_HOT: int |
| COLORMAP_PARULA: int |
| COLORMAP_MAGMA: int |
| COLORMAP_INFERNO: int |
| COLORMAP_PLASMA: int |
| COLORMAP_VIRIDIS: int |
| COLORMAP_CIVIDIS: int |
| COLORMAP_TWILIGHT: int |
| COLORMAP_TWILIGHT_SHIFTED: int |
| COLORMAP_TURBO: int |
| COLORMAP_DEEPGREEN: int |
| ColormapTypes = int |
| """One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, COLORMAP_OCEAN, COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, COLORMAP_PARULA, COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, COLORMAP_TWILIGHT, COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN]""" |
|
|
| INPAINT_NS: int |
| INPAINT_TELEA: int |
| LDR_SIZE: int |
| RECURS_FILTER: int |
| NORMCONV_FILTER: int |
| CAP_PROP_DC1394_OFF: int |
| CAP_PROP_DC1394_MODE_MANUAL: int |
| CAP_PROP_DC1394_MODE_AUTO: int |
| CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO: int |
| CAP_PROP_DC1394_MAX: int |
| CAP_OPENNI_DEPTH_GENERATOR: int |
| CAP_OPENNI_IMAGE_GENERATOR: int |
| CAP_OPENNI_IR_GENERATOR: int |
| CAP_OPENNI_GENERATORS_MASK: int |
| CAP_PROP_OPENNI_OUTPUT_MODE: int |
| CAP_PROP_OPENNI_FRAME_MAX_DEPTH: int |
| CAP_PROP_OPENNI_BASELINE: int |
| CAP_PROP_OPENNI_FOCAL_LENGTH: int |
| CAP_PROP_OPENNI_REGISTRATION: int |
| CAP_PROP_OPENNI_REGISTRATION_ON: int |
| CAP_PROP_OPENNI_APPROX_FRAME_SYNC: int |
| CAP_PROP_OPENNI_MAX_BUFFER_SIZE: int |
| CAP_PROP_OPENNI_CIRCLE_BUFFER: int |
| CAP_PROP_OPENNI_MAX_TIME_DURATION: int |
| CAP_PROP_OPENNI_GENERATOR_PRESENT: int |
| CAP_PROP_OPENNI2_SYNC: int |
| CAP_PROP_OPENNI2_MIRROR: int |
| CAP_OPENNI_IMAGE_GENERATOR_PRESENT: int |
| CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE: int |
| CAP_OPENNI_DEPTH_GENERATOR_PRESENT: int |
| CAP_OPENNI_DEPTH_GENERATOR_BASELINE: int |
| CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH: int |
| CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION: int |
| CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON: int |
| CAP_OPENNI_IR_GENERATOR_PRESENT: int |
| CAP_OPENNI_DEPTH_MAP: int |
| CAP_OPENNI_POINT_CLOUD_MAP: int |
| CAP_OPENNI_DISPARITY_MAP: int |
| CAP_OPENNI_DISPARITY_MAP_32F: int |
| CAP_OPENNI_VALID_DEPTH_MASK: int |
| CAP_OPENNI_BGR_IMAGE: int |
| CAP_OPENNI_GRAY_IMAGE: int |
| CAP_OPENNI_IR_IMAGE: int |
| CAP_OPENNI_VGA_30HZ: int |
| CAP_OPENNI_SXGA_15HZ: int |
| CAP_OPENNI_SXGA_30HZ: int |
| CAP_OPENNI_QVGA_30HZ: int |
| CAP_OPENNI_QVGA_60HZ: int |
| CAP_PROP_GSTREAMER_QUEUE_LENGTH: int |
| CAP_PROP_PVAPI_MULTICASTIP: int |
| CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE: int |
| CAP_PROP_PVAPI_DECIMATIONHORIZONTAL: int |
| CAP_PROP_PVAPI_DECIMATIONVERTICAL: int |
| CAP_PROP_PVAPI_BINNINGX: int |
| CAP_PROP_PVAPI_BINNINGY: int |
| CAP_PROP_PVAPI_PIXELFORMAT: int |
| CAP_PVAPI_FSTRIGMODE_FREERUN: int |
| CAP_PVAPI_FSTRIGMODE_SYNCIN1: int |
| CAP_PVAPI_FSTRIGMODE_SYNCIN2: int |
| CAP_PVAPI_FSTRIGMODE_FIXEDRATE: int |
| CAP_PVAPI_FSTRIGMODE_SOFTWARE: int |
| CAP_PVAPI_DECIMATION_OFF: int |
| CAP_PVAPI_DECIMATION_2OUTOF4: int |
| CAP_PVAPI_DECIMATION_2OUTOF8: int |
| CAP_PVAPI_DECIMATION_2OUTOF16: int |
| CAP_PVAPI_PIXELFORMAT_MONO8: int |
| CAP_PVAPI_PIXELFORMAT_MONO16: int |
| CAP_PVAPI_PIXELFORMAT_BAYER8: int |
| CAP_PVAPI_PIXELFORMAT_BAYER16: int |
| CAP_PVAPI_PIXELFORMAT_RGB24: int |
| CAP_PVAPI_PIXELFORMAT_BGR24: int |
| CAP_PVAPI_PIXELFORMAT_RGBA32: int |
| CAP_PVAPI_PIXELFORMAT_BGRA32: int |
| CAP_PROP_XI_DOWNSAMPLING: int |
| CAP_PROP_XI_DATA_FORMAT: int |
| CAP_PROP_XI_OFFSET_X: int |
| CAP_PROP_XI_OFFSET_Y: int |
| CAP_PROP_XI_TRG_SOURCE: int |
| CAP_PROP_XI_TRG_SOFTWARE: int |
| CAP_PROP_XI_GPI_SELECTOR: int |
| CAP_PROP_XI_GPI_MODE: int |
| CAP_PROP_XI_GPI_LEVEL: int |
| CAP_PROP_XI_GPO_SELECTOR: int |
| CAP_PROP_XI_GPO_MODE: int |
| CAP_PROP_XI_LED_SELECTOR: int |
| CAP_PROP_XI_LED_MODE: int |
| CAP_PROP_XI_MANUAL_WB: int |
| CAP_PROP_XI_AUTO_WB: int |
| CAP_PROP_XI_AEAG: int |
| CAP_PROP_XI_EXP_PRIORITY: int |
| CAP_PROP_XI_AE_MAX_LIMIT: int |
| CAP_PROP_XI_AG_MAX_LIMIT: int |
| CAP_PROP_XI_AEAG_LEVEL: int |
| CAP_PROP_XI_TIMEOUT: int |
| CAP_PROP_XI_EXPOSURE: int |
| CAP_PROP_XI_EXPOSURE_BURST_COUNT: int |
| CAP_PROP_XI_GAIN_SELECTOR: int |
| CAP_PROP_XI_GAIN: int |
| CAP_PROP_XI_DOWNSAMPLING_TYPE: int |
| CAP_PROP_XI_BINNING_SELECTOR: int |
| CAP_PROP_XI_BINNING_VERTICAL: int |
| CAP_PROP_XI_BINNING_HORIZONTAL: int |
| CAP_PROP_XI_BINNING_PATTERN: int |
| CAP_PROP_XI_DECIMATION_SELECTOR: int |
| CAP_PROP_XI_DECIMATION_VERTICAL: int |
| CAP_PROP_XI_DECIMATION_HORIZONTAL: int |
| CAP_PROP_XI_DECIMATION_PATTERN: int |
| CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR: int |
| CAP_PROP_XI_TEST_PATTERN: int |
| CAP_PROP_XI_IMAGE_DATA_FORMAT: int |
| CAP_PROP_XI_SHUTTER_TYPE: int |
| CAP_PROP_XI_SENSOR_TAPS: int |
| CAP_PROP_XI_AEAG_ROI_OFFSET_X: int |
| CAP_PROP_XI_AEAG_ROI_OFFSET_Y: int |
| CAP_PROP_XI_AEAG_ROI_WIDTH: int |
| CAP_PROP_XI_AEAG_ROI_HEIGHT: int |
| CAP_PROP_XI_BPC: int |
| CAP_PROP_XI_WB_KR: int |
| CAP_PROP_XI_WB_KG: int |
| CAP_PROP_XI_WB_KB: int |
| CAP_PROP_XI_WIDTH: int |
| CAP_PROP_XI_HEIGHT: int |
| CAP_PROP_XI_REGION_SELECTOR: int |
| CAP_PROP_XI_REGION_MODE: int |
| CAP_PROP_XI_LIMIT_BANDWIDTH: int |
| CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH: int |
| CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH: int |
| CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH: int |
| CAP_PROP_XI_OUTPUT_DATA_PACKING: int |
| CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE: int |
| CAP_PROP_XI_IS_COOLED: int |
| CAP_PROP_XI_COOLING: int |
| CAP_PROP_XI_TARGET_TEMP: int |
| CAP_PROP_XI_CHIP_TEMP: int |
| CAP_PROP_XI_HOUS_TEMP: int |
| CAP_PROP_XI_HOUS_BACK_SIDE_TEMP: int |
| CAP_PROP_XI_SENSOR_BOARD_TEMP: int |
| CAP_PROP_XI_CMS: int |
| CAP_PROP_XI_APPLY_CMS: int |
| CAP_PROP_XI_IMAGE_IS_COLOR: int |
| CAP_PROP_XI_COLOR_FILTER_ARRAY: int |
| CAP_PROP_XI_GAMMAY: int |
| CAP_PROP_XI_GAMMAC: int |
| CAP_PROP_XI_SHARPNESS: int |
| CAP_PROP_XI_CC_MATRIX_00: int |
| CAP_PROP_XI_CC_MATRIX_01: int |
| CAP_PROP_XI_CC_MATRIX_02: int |
| CAP_PROP_XI_CC_MATRIX_03: int |
| CAP_PROP_XI_CC_MATRIX_10: int |
| CAP_PROP_XI_CC_MATRIX_11: int |
| CAP_PROP_XI_CC_MATRIX_12: int |
| CAP_PROP_XI_CC_MATRIX_13: int |
| CAP_PROP_XI_CC_MATRIX_20: int |
| CAP_PROP_XI_CC_MATRIX_21: int |
| CAP_PROP_XI_CC_MATRIX_22: int |
| CAP_PROP_XI_CC_MATRIX_23: int |
| CAP_PROP_XI_CC_MATRIX_30: int |
| CAP_PROP_XI_CC_MATRIX_31: int |
| CAP_PROP_XI_CC_MATRIX_32: int |
| CAP_PROP_XI_CC_MATRIX_33: int |
| CAP_PROP_XI_DEFAULT_CC_MATRIX: int |
| CAP_PROP_XI_TRG_SELECTOR: int |
| CAP_PROP_XI_ACQ_FRAME_BURST_COUNT: int |
| CAP_PROP_XI_DEBOUNCE_EN: int |
| CAP_PROP_XI_DEBOUNCE_T0: int |
| CAP_PROP_XI_DEBOUNCE_T1: int |
| CAP_PROP_XI_DEBOUNCE_POL: int |
| CAP_PROP_XI_LENS_MODE: int |
| CAP_PROP_XI_LENS_APERTURE_VALUE: int |
| CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE: int |
| CAP_PROP_XI_LENS_FOCUS_MOVE: int |
| CAP_PROP_XI_LENS_FOCUS_DISTANCE: int |
| CAP_PROP_XI_LENS_FOCAL_LENGTH: int |
| CAP_PROP_XI_LENS_FEATURE_SELECTOR: int |
| CAP_PROP_XI_LENS_FEATURE: int |
| CAP_PROP_XI_DEVICE_MODEL_ID: int |
| CAP_PROP_XI_DEVICE_SN: int |
| CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA: int |
| CAP_PROP_XI_IMAGE_PAYLOAD_SIZE: int |
| CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT: int |
| CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ: int |
| CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX: int |
| CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT: int |
| CAP_PROP_XI_FRAMERATE: int |
| CAP_PROP_XI_COUNTER_SELECTOR: int |
| CAP_PROP_XI_COUNTER_VALUE: int |
| CAP_PROP_XI_ACQ_TIMING_MODE: int |
| CAP_PROP_XI_AVAILABLE_BANDWIDTH: int |
| CAP_PROP_XI_BUFFER_POLICY: int |
| CAP_PROP_XI_LUT_EN: int |
| CAP_PROP_XI_LUT_INDEX: int |
| CAP_PROP_XI_LUT_VALUE: int |
| CAP_PROP_XI_TRG_DELAY: int |
| CAP_PROP_XI_TS_RST_MODE: int |
| CAP_PROP_XI_TS_RST_SOURCE: int |
| CAP_PROP_XI_IS_DEVICE_EXIST: int |
| CAP_PROP_XI_ACQ_BUFFER_SIZE: int |
| CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT: int |
| CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE: int |
| CAP_PROP_XI_BUFFERS_QUEUE_SIZE: int |
| CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT: int |
| CAP_PROP_XI_RECENT_FRAME: int |
| CAP_PROP_XI_DEVICE_RESET: int |
| CAP_PROP_XI_COLUMN_FPN_CORRECTION: int |
| CAP_PROP_XI_ROW_FPN_CORRECTION: int |
| CAP_PROP_XI_SENSOR_MODE: int |
| CAP_PROP_XI_HDR: int |
| CAP_PROP_XI_HDR_KNEEPOINT_COUNT: int |
| CAP_PROP_XI_HDR_T1: int |
| CAP_PROP_XI_HDR_T2: int |
| CAP_PROP_XI_KNEEPOINT1: int |
| CAP_PROP_XI_KNEEPOINT2: int |
| CAP_PROP_XI_IMAGE_BLACK_LEVEL: int |
| CAP_PROP_XI_HW_REVISION: int |
| CAP_PROP_XI_DEBUG_LEVEL: int |
| CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION: int |
| CAP_PROP_XI_FFS_FILE_ID: int |
| CAP_PROP_XI_FFS_FILE_SIZE: int |
| CAP_PROP_XI_FREE_FFS_SIZE: int |
| CAP_PROP_XI_USED_FFS_SIZE: int |
| CAP_PROP_XI_FFS_ACCESS_KEY: int |
| CAP_PROP_XI_SENSOR_FEATURE_SELECTOR: int |
| CAP_PROP_XI_SENSOR_FEATURE_VALUE: int |
| CAP_PROP_ARAVIS_AUTOTRIGGER: int |
| CAP_PROP_ANDROID_DEVICE_TORCH: int |
| CAP_PROP_IOS_DEVICE_FOCUS: int |
| CAP_PROP_IOS_DEVICE_EXPOSURE: int |
| CAP_PROP_IOS_DEVICE_FLASH: int |
| CAP_PROP_IOS_DEVICE_WHITEBALANCE: int |
| CAP_PROP_IOS_DEVICE_TORCH: int |
| CAP_PROP_GIGA_FRAME_OFFSET_X: int |
| CAP_PROP_GIGA_FRAME_OFFSET_Y: int |
| CAP_PROP_GIGA_FRAME_WIDTH_MAX: int |
| CAP_PROP_GIGA_FRAME_HEIGH_MAX: int |
| CAP_PROP_GIGA_FRAME_SENS_WIDTH: int |
| CAP_PROP_GIGA_FRAME_SENS_HEIGH: int |
| CAP_PROP_INTELPERC_PROFILE_COUNT: int |
| CAP_PROP_INTELPERC_PROFILE_IDX: int |
| CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE: int |
| CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE: int |
| CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD: int |
| CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ: int |
| CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT: int |
| CAP_INTELPERC_DEPTH_GENERATOR: int |
| CAP_INTELPERC_IMAGE_GENERATOR: int |
| CAP_INTELPERC_IR_GENERATOR: int |
| CAP_INTELPERC_GENERATORS_MASK: int |
| CAP_INTELPERC_DEPTH_MAP: int |
| CAP_INTELPERC_UVDEPTH_MAP: int |
| CAP_INTELPERC_IR_MAP: int |
| CAP_INTELPERC_IMAGE: int |
| CAP_PROP_GPHOTO2_PREVIEW: int |
| CAP_PROP_GPHOTO2_WIDGET_ENUMERATE: int |
| CAP_PROP_GPHOTO2_RELOAD_CONFIG: int |
| CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE: int |
| CAP_PROP_GPHOTO2_COLLECT_MSGS: int |
| CAP_PROP_GPHOTO2_FLUSH_MSGS: int |
| CAP_PROP_SPEED: int |
| CAP_PROP_APERTURE: int |
| CAP_PROP_EXPOSUREPROGRAM: int |
| CAP_PROP_VIEWFINDER: int |
| CAP_PROP_IMAGES_BASE: int |
| CAP_PROP_IMAGES_LAST: int |
| LMEDS: int |
| RANSAC: int |
| RHO: int |
| USAC_DEFAULT: int |
| USAC_PARALLEL: int |
| USAC_FM_8PTS: int |
| USAC_FAST: int |
| USAC_ACCURATE: int |
| USAC_PROSAC: int |
| USAC_MAGSAC: int |
| CALIB_CB_ADAPTIVE_THRESH: int |
| CALIB_CB_NORMALIZE_IMAGE: int |
| CALIB_CB_FILTER_QUADS: int |
| CALIB_CB_FAST_CHECK: int |
| CALIB_CB_EXHAUSTIVE: int |
| CALIB_CB_ACCURACY: int |
| CALIB_CB_LARGER: int |
| CALIB_CB_MARKER: int |
| CALIB_CB_PLAIN: int |
| CALIB_CB_SYMMETRIC_GRID: int |
| CALIB_CB_ASYMMETRIC_GRID: int |
| CALIB_CB_CLUSTERING: int |
| CALIB_NINTRINSIC: int |
| CALIB_USE_INTRINSIC_GUESS: int |
| CALIB_FIX_ASPECT_RATIO: int |
| CALIB_FIX_PRINCIPAL_POINT: int |
| CALIB_ZERO_TANGENT_DIST: int |
| CALIB_FIX_FOCAL_LENGTH: int |
| CALIB_FIX_K1: int |
| CALIB_FIX_K2: int |
| CALIB_FIX_K3: int |
| CALIB_FIX_K4: int |
| CALIB_FIX_K5: int |
| CALIB_FIX_K6: int |
| CALIB_RATIONAL_MODEL: int |
| CALIB_THIN_PRISM_MODEL: int |
| CALIB_FIX_S1_S2_S3_S4: int |
| CALIB_TILTED_MODEL: int |
| CALIB_FIX_TAUX_TAUY: int |
| CALIB_USE_QR: int |
| CALIB_FIX_TANGENT_DIST: int |
| CALIB_FIX_INTRINSIC: int |
| CALIB_SAME_FOCAL_LENGTH: int |
| CALIB_ZERO_DISPARITY: int |
| CALIB_USE_LU: int |
| CALIB_USE_EXTRINSIC_GUESS: int |
| FM_7POINT: int |
| FM_8POINT: int |
| FM_LMEDS: int |
| FM_RANSAC: int |
| CASCADE_DO_CANNY_PRUNING: int |
| CASCADE_SCALE_IMAGE: int |
| CASCADE_FIND_BIGGEST_OBJECT: int |
| CASCADE_DO_ROUGH_SEARCH: int |
| OPTFLOW_USE_INITIAL_FLOW: int |
| OPTFLOW_LK_GET_MIN_EIGENVALS: int |
| OPTFLOW_FARNEBACK_GAUSSIAN: int |
| MOTION_TRANSLATION: int |
| MOTION_EUCLIDEAN: int |
| MOTION_AFFINE: int |
| MOTION_HOMOGRAPHY: int |
|
|
| NORMAL_CLONE: int |
| MIXED_CLONE: int |
| MONOCHROME_TRANSFER: int |
| NORMAL_CLONE_WIDE: int |
| MIXED_CLONE_WIDE: int |
| MONOCHROME_TRANSFER_WIDE: int |
| SeamlessCloneFlags = int |
| """One of [NORMAL_CLONE, MIXED_CLONE, MONOCHROME_TRANSFER, NORMAL_CLONE_WIDE, MIXED_CLONE_WIDE, MONOCHROME_TRANSFER_WIDE]""" |
|
|
| DrawMatchesFlags_DEFAULT: int |
| DRAW_MATCHES_FLAGS_DEFAULT: int |
| DrawMatchesFlags_DRAW_OVER_OUTIMG: int |
| DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG: int |
| DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int |
| DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS: int |
| DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int |
| DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS: int |
| DrawMatchesFlags = int |
| """One of [DrawMatchesFlags_DEFAULT, DRAW_MATCHES_FLAGS_DEFAULT, DrawMatchesFlags_DRAW_OVER_OUTIMG, DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS, DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS, DrawMatchesFlags_DRAW_RICH_KEYPOINTS, DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS]""" |
|
|
| IMREAD_UNCHANGED: int |
| IMREAD_GRAYSCALE: int |
| IMREAD_COLOR_BGR: int |
| IMREAD_COLOR: int |
| IMREAD_ANYDEPTH: int |
| IMREAD_ANYCOLOR: int |
| IMREAD_LOAD_GDAL: int |
| IMREAD_REDUCED_GRAYSCALE_2: int |
| IMREAD_REDUCED_COLOR_2: int |
| IMREAD_REDUCED_GRAYSCALE_4: int |
| IMREAD_REDUCED_COLOR_4: int |
| IMREAD_REDUCED_GRAYSCALE_8: int |
| IMREAD_REDUCED_COLOR_8: int |
| IMREAD_IGNORE_ORIENTATION: int |
| IMREAD_COLOR_RGB: int |
| ImreadModes = int |
| """One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR_BGR, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, IMREAD_LOAD_GDAL, IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, IMREAD_REDUCED_COLOR_4, IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION, IMREAD_COLOR_RGB]""" |
|
|
| IMWRITE_JPEG_QUALITY: int |
| IMWRITE_JPEG_PROGRESSIVE: int |
| IMWRITE_JPEG_OPTIMIZE: int |
| IMWRITE_JPEG_RST_INTERVAL: int |
| IMWRITE_JPEG_LUMA_QUALITY: int |
| IMWRITE_JPEG_CHROMA_QUALITY: int |
| IMWRITE_JPEG_SAMPLING_FACTOR: int |
| IMWRITE_PNG_COMPRESSION: int |
| IMWRITE_PNG_STRATEGY: int |
| IMWRITE_PNG_BILEVEL: int |
| IMWRITE_PXM_BINARY: int |
| IMWRITE_EXR_TYPE: int |
| IMWRITE_EXR_COMPRESSION: int |
| IMWRITE_EXR_DWA_COMPRESSION_LEVEL: int |
| IMWRITE_WEBP_QUALITY: int |
| IMWRITE_HDR_COMPRESSION: int |
| IMWRITE_PAM_TUPLETYPE: int |
| IMWRITE_TIFF_RESUNIT: int |
| IMWRITE_TIFF_XDPI: int |
| IMWRITE_TIFF_YDPI: int |
| IMWRITE_TIFF_COMPRESSION: int |
| IMWRITE_TIFF_ROWSPERSTRIP: int |
| IMWRITE_TIFF_PREDICTOR: int |
| IMWRITE_JPEG2000_COMPRESSION_X1000: int |
| IMWRITE_AVIF_QUALITY: int |
| IMWRITE_AVIF_DEPTH: int |
| IMWRITE_AVIF_SPEED: int |
| IMWRITE_JPEGXL_QUALITY: int |
| IMWRITE_JPEGXL_EFFORT: int |
| IMWRITE_JPEGXL_DISTANCE: int |
| IMWRITE_JPEGXL_DECODING_SPEED: int |
| IMWRITE_GIF_LOOP: int |
| IMWRITE_GIF_SPEED: int |
| IMWRITE_GIF_QUALITY: int |
| IMWRITE_GIF_DITHER: int |
| IMWRITE_GIF_TRANSPARENCY: int |
| IMWRITE_GIF_COLORTABLE: int |
| ImwriteFlags = int |
| """One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, IMWRITE_TIFF_ROWSPERSTRIP, IMWRITE_TIFF_PREDICTOR, IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED, IMWRITE_JPEGXL_QUALITY, IMWRITE_JPEGXL_EFFORT, IMWRITE_JPEGXL_DISTANCE, IMWRITE_JPEGXL_DECODING_SPEED, IMWRITE_GIF_LOOP, IMWRITE_GIF_SPEED, IMWRITE_GIF_QUALITY, IMWRITE_GIF_DITHER, IMWRITE_GIF_TRANSPARENCY, IMWRITE_GIF_COLORTABLE]""" |
|
|
| IMWRITE_JPEG_SAMPLING_FACTOR_411: int |
| IMWRITE_JPEG_SAMPLING_FACTOR_420: int |
| IMWRITE_JPEG_SAMPLING_FACTOR_422: int |
| IMWRITE_JPEG_SAMPLING_FACTOR_440: int |
| IMWRITE_JPEG_SAMPLING_FACTOR_444: int |
| ImwriteJPEGSamplingFactorParams = int |
| """One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444]""" |
|
|
| IMWRITE_TIFF_COMPRESSION_NONE: int |
| IMWRITE_TIFF_COMPRESSION_CCITTRLE: int |
| IMWRITE_TIFF_COMPRESSION_CCITTFAX3: int |
| IMWRITE_TIFF_COMPRESSION_CCITT_T4: int |
| IMWRITE_TIFF_COMPRESSION_CCITTFAX4: int |
| IMWRITE_TIFF_COMPRESSION_CCITT_T6: int |
| IMWRITE_TIFF_COMPRESSION_LZW: int |
| IMWRITE_TIFF_COMPRESSION_OJPEG: int |
| IMWRITE_TIFF_COMPRESSION_JPEG: int |
| IMWRITE_TIFF_COMPRESSION_T85: int |
| IMWRITE_TIFF_COMPRESSION_T43: int |
| IMWRITE_TIFF_COMPRESSION_NEXT: int |
| IMWRITE_TIFF_COMPRESSION_CCITTRLEW: int |
| IMWRITE_TIFF_COMPRESSION_PACKBITS: int |
| IMWRITE_TIFF_COMPRESSION_THUNDERSCAN: int |
| IMWRITE_TIFF_COMPRESSION_IT8CTPAD: int |
| IMWRITE_TIFF_COMPRESSION_IT8LW: int |
| IMWRITE_TIFF_COMPRESSION_IT8MP: int |
| IMWRITE_TIFF_COMPRESSION_IT8BL: int |
| IMWRITE_TIFF_COMPRESSION_PIXARFILM: int |
| IMWRITE_TIFF_COMPRESSION_PIXARLOG: int |
| IMWRITE_TIFF_COMPRESSION_DEFLATE: int |
| IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE: int |
| IMWRITE_TIFF_COMPRESSION_DCS: int |
| IMWRITE_TIFF_COMPRESSION_JBIG: int |
| IMWRITE_TIFF_COMPRESSION_SGILOG: int |
| IMWRITE_TIFF_COMPRESSION_SGILOG24: int |
| IMWRITE_TIFF_COMPRESSION_JP2000: int |
| IMWRITE_TIFF_COMPRESSION_LERC: int |
| IMWRITE_TIFF_COMPRESSION_LZMA: int |
| IMWRITE_TIFF_COMPRESSION_ZSTD: int |
| IMWRITE_TIFF_COMPRESSION_WEBP: int |
| IMWRITE_TIFF_COMPRESSION_JXL: int |
| ImwriteTiffCompressionFlags = int |
| """One of [IMWRITE_TIFF_COMPRESSION_NONE, IMWRITE_TIFF_COMPRESSION_CCITTRLE, IMWRITE_TIFF_COMPRESSION_CCITTFAX3, IMWRITE_TIFF_COMPRESSION_CCITT_T4, IMWRITE_TIFF_COMPRESSION_CCITTFAX4, IMWRITE_TIFF_COMPRESSION_CCITT_T6, IMWRITE_TIFF_COMPRESSION_LZW, IMWRITE_TIFF_COMPRESSION_OJPEG, IMWRITE_TIFF_COMPRESSION_JPEG, IMWRITE_TIFF_COMPRESSION_T85, IMWRITE_TIFF_COMPRESSION_T43, IMWRITE_TIFF_COMPRESSION_NEXT, IMWRITE_TIFF_COMPRESSION_CCITTRLEW, IMWRITE_TIFF_COMPRESSION_PACKBITS, IMWRITE_TIFF_COMPRESSION_THUNDERSCAN, IMWRITE_TIFF_COMPRESSION_IT8CTPAD, IMWRITE_TIFF_COMPRESSION_IT8LW, IMWRITE_TIFF_COMPRESSION_IT8MP, IMWRITE_TIFF_COMPRESSION_IT8BL, IMWRITE_TIFF_COMPRESSION_PIXARFILM, IMWRITE_TIFF_COMPRESSION_PIXARLOG, IMWRITE_TIFF_COMPRESSION_DEFLATE, IMWRITE_TIFF_COMPRESSION_ADOBE_DEFLATE, IMWRITE_TIFF_COMPRESSION_DCS, IMWRITE_TIFF_COMPRESSION_JBIG, IMWRITE_TIFF_COMPRESSION_SGILOG, IMWRITE_TIFF_COMPRESSION_SGILOG24, IMWRITE_TIFF_COMPRESSION_JP2000, IMWRITE_TIFF_COMPRESSION_LERC, IMWRITE_TIFF_COMPRESSION_LZMA, IMWRITE_TIFF_COMPRESSION_ZSTD, IMWRITE_TIFF_COMPRESSION_WEBP, IMWRITE_TIFF_COMPRESSION_JXL]""" |
|
|
| IMWRITE_TIFF_PREDICTOR_NONE: int |
| IMWRITE_TIFF_PREDICTOR_HORIZONTAL: int |
| IMWRITE_TIFF_PREDICTOR_FLOATINGPOINT: int |
| ImwriteTiffPredictorFlags = int |
| """One of [IMWRITE_TIFF_PREDICTOR_NONE, IMWRITE_TIFF_PREDICTOR_HORIZONTAL, IMWRITE_TIFF_PREDICTOR_FLOATINGPOINT]""" |
|
|
| IMWRITE_EXR_TYPE_HALF: int |
| IMWRITE_EXR_TYPE_FLOAT: int |
| ImwriteEXRTypeFlags = int |
| """One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT]""" |
|
|
| IMWRITE_EXR_COMPRESSION_NO: int |
| IMWRITE_EXR_COMPRESSION_RLE: int |
| IMWRITE_EXR_COMPRESSION_ZIPS: int |
| IMWRITE_EXR_COMPRESSION_ZIP: int |
| IMWRITE_EXR_COMPRESSION_PIZ: int |
| IMWRITE_EXR_COMPRESSION_PXR24: int |
| IMWRITE_EXR_COMPRESSION_B44: int |
| IMWRITE_EXR_COMPRESSION_B44A: int |
| IMWRITE_EXR_COMPRESSION_DWAA: int |
| IMWRITE_EXR_COMPRESSION_DWAB: int |
| ImwriteEXRCompressionFlags = int |
| """One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, IMWRITE_EXR_COMPRESSION_ZIPS, IMWRITE_EXR_COMPRESSION_ZIP, IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, IMWRITE_EXR_COMPRESSION_B44, IMWRITE_EXR_COMPRESSION_B44A, IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB]""" |
|
|
| IMWRITE_PNG_STRATEGY_DEFAULT: int |
| IMWRITE_PNG_STRATEGY_FILTERED: int |
| IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int |
| IMWRITE_PNG_STRATEGY_RLE: int |
| IMWRITE_PNG_STRATEGY_FIXED: int |
| ImwritePNGFlags = int |
| """One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, IMWRITE_PNG_STRATEGY_RLE, IMWRITE_PNG_STRATEGY_FIXED]""" |
|
|
| IMWRITE_PAM_FORMAT_NULL: int |
| IMWRITE_PAM_FORMAT_BLACKANDWHITE: int |
| IMWRITE_PAM_FORMAT_GRAYSCALE: int |
| IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int |
| IMWRITE_PAM_FORMAT_RGB: int |
| IMWRITE_PAM_FORMAT_RGB_ALPHA: int |
| ImwritePAMFlags = int |
| """One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, IMWRITE_PAM_FORMAT_GRAYSCALE, IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA]""" |
|
|
| IMWRITE_HDR_COMPRESSION_NONE: int |
| IMWRITE_HDR_COMPRESSION_RLE: int |
| ImwriteHDRCompressionFlags = int |
| """One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE]""" |
|
|
| IMWRITE_GIF_FAST_NO_DITHER: int |
| IMWRITE_GIF_FAST_FLOYD_DITHER: int |
| IMWRITE_GIF_COLORTABLE_SIZE_8: int |
| IMWRITE_GIF_COLORTABLE_SIZE_16: int |
| IMWRITE_GIF_COLORTABLE_SIZE_32: int |
| IMWRITE_GIF_COLORTABLE_SIZE_64: int |
| IMWRITE_GIF_COLORTABLE_SIZE_128: int |
| IMWRITE_GIF_COLORTABLE_SIZE_256: int |
| ImwriteGIFCompressionFlags = int |
| """One of [IMWRITE_GIF_FAST_NO_DITHER, IMWRITE_GIF_FAST_FLOYD_DITHER, IMWRITE_GIF_COLORTABLE_SIZE_8, IMWRITE_GIF_COLORTABLE_SIZE_16, IMWRITE_GIF_COLORTABLE_SIZE_32, IMWRITE_GIF_COLORTABLE_SIZE_64, IMWRITE_GIF_COLORTABLE_SIZE_128, IMWRITE_GIF_COLORTABLE_SIZE_256]""" |
|
|
| CAP_ANY: int |
| CAP_VFW: int |
| CAP_V4L: int |
| CAP_V4L2: int |
| CAP_FIREWIRE: int |
| CAP_FIREWARE: int |
| CAP_IEEE1394: int |
| CAP_DC1394: int |
| CAP_CMU1394: int |
| CAP_QT: int |
| CAP_UNICAP: int |
| CAP_DSHOW: int |
| CAP_PVAPI: int |
| CAP_OPENNI: int |
| CAP_OPENNI_ASUS: int |
| CAP_ANDROID: int |
| CAP_XIAPI: int |
| CAP_AVFOUNDATION: int |
| CAP_GIGANETIX: int |
| CAP_MSMF: int |
| CAP_WINRT: int |
| CAP_INTELPERC: int |
| CAP_REALSENSE: int |
| CAP_OPENNI2: int |
| CAP_OPENNI2_ASUS: int |
| CAP_OPENNI2_ASTRA: int |
| CAP_GPHOTO2: int |
| CAP_GSTREAMER: int |
| CAP_FFMPEG: int |
| CAP_IMAGES: int |
| CAP_ARAVIS: int |
| CAP_OPENCV_MJPEG: int |
| CAP_INTEL_MFX: int |
| CAP_XINE: int |
| CAP_UEYE: int |
| CAP_OBSENSOR: int |
| VideoCaptureAPIs = int |
| """One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, CAP_DC1394, CAP_CMU1394, CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, CAP_XIAPI, CAP_AVFOUNDATION, CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, CAP_OBSENSOR]""" |
|
|
| CAP_PROP_POS_MSEC: int |
| CAP_PROP_POS_FRAMES: int |
| CAP_PROP_POS_AVI_RATIO: int |
| CAP_PROP_FRAME_WIDTH: int |
| CAP_PROP_FRAME_HEIGHT: int |
| CAP_PROP_FPS: int |
| CAP_PROP_FOURCC: int |
| CAP_PROP_FRAME_COUNT: int |
| CAP_PROP_FORMAT: int |
| CAP_PROP_MODE: int |
| CAP_PROP_BRIGHTNESS: int |
| CAP_PROP_CONTRAST: int |
| CAP_PROP_SATURATION: int |
| CAP_PROP_HUE: int |
| CAP_PROP_GAIN: int |
| CAP_PROP_EXPOSURE: int |
| CAP_PROP_CONVERT_RGB: int |
| CAP_PROP_WHITE_BALANCE_BLUE_U: int |
| CAP_PROP_RECTIFICATION: int |
| CAP_PROP_MONOCHROME: int |
| CAP_PROP_SHARPNESS: int |
| CAP_PROP_AUTO_EXPOSURE: int |
| CAP_PROP_GAMMA: int |
| CAP_PROP_TEMPERATURE: int |
| CAP_PROP_TRIGGER: int |
| CAP_PROP_TRIGGER_DELAY: int |
| CAP_PROP_WHITE_BALANCE_RED_V: int |
| CAP_PROP_ZOOM: int |
| CAP_PROP_FOCUS: int |
| CAP_PROP_GUID: int |
| CAP_PROP_ISO_SPEED: int |
| CAP_PROP_BACKLIGHT: int |
| CAP_PROP_PAN: int |
| CAP_PROP_TILT: int |
| CAP_PROP_ROLL: int |
| CAP_PROP_IRIS: int |
| CAP_PROP_SETTINGS: int |
| CAP_PROP_BUFFERSIZE: int |
| CAP_PROP_AUTOFOCUS: int |
| CAP_PROP_SAR_NUM: int |
| CAP_PROP_SAR_DEN: int |
| CAP_PROP_BACKEND: int |
| CAP_PROP_CHANNEL: int |
| CAP_PROP_AUTO_WB: int |
| CAP_PROP_WB_TEMPERATURE: int |
| CAP_PROP_CODEC_PIXEL_FORMAT: int |
| CAP_PROP_BITRATE: int |
| CAP_PROP_ORIENTATION_META: int |
| CAP_PROP_ORIENTATION_AUTO: int |
| CAP_PROP_HW_ACCELERATION: int |
| CAP_PROP_HW_DEVICE: int |
| CAP_PROP_HW_ACCELERATION_USE_OPENCL: int |
| CAP_PROP_OPEN_TIMEOUT_MSEC: int |
| CAP_PROP_READ_TIMEOUT_MSEC: int |
| CAP_PROP_STREAM_OPEN_TIME_USEC: int |
| CAP_PROP_VIDEO_TOTAL_CHANNELS: int |
| CAP_PROP_VIDEO_STREAM: int |
| CAP_PROP_AUDIO_STREAM: int |
| CAP_PROP_AUDIO_POS: int |
| CAP_PROP_AUDIO_SHIFT_NSEC: int |
| CAP_PROP_AUDIO_DATA_DEPTH: int |
| CAP_PROP_AUDIO_SAMPLES_PER_SECOND: int |
| CAP_PROP_AUDIO_BASE_INDEX: int |
| CAP_PROP_AUDIO_TOTAL_CHANNELS: int |
| CAP_PROP_AUDIO_TOTAL_STREAMS: int |
| CAP_PROP_AUDIO_SYNCHRONIZE: int |
| CAP_PROP_LRF_HAS_KEY_FRAME: int |
| CAP_PROP_CODEC_EXTRADATA_INDEX: int |
| CAP_PROP_FRAME_TYPE: int |
| CAP_PROP_N_THREADS: int |
| CAP_PROP_PTS: int |
| CAP_PROP_DTS_DELAY: int |
| VideoCaptureProperties = int |
| """One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, CAP_PROP_ORIENTATION_AUTO, CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, CAP_PROP_OPEN_TIMEOUT_MSEC, CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, CAP_PROP_VIDEO_STREAM, CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, CAP_PROP_AUDIO_DATA_DEPTH, CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, CAP_PROP_AUDIO_TOTAL_CHANNELS, CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, CAP_PROP_CODEC_EXTRADATA_INDEX, CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS, CAP_PROP_PTS, CAP_PROP_DTS_DELAY]""" |
|
|
| VIDEOWRITER_PROP_QUALITY: int |
| VIDEOWRITER_PROP_FRAMEBYTES: int |
| VIDEOWRITER_PROP_NSTRIPES: int |
| VIDEOWRITER_PROP_IS_COLOR: int |
| VIDEOWRITER_PROP_DEPTH: int |
| VIDEOWRITER_PROP_HW_ACCELERATION: int |
| VIDEOWRITER_PROP_HW_DEVICE: int |
| VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int |
| VIDEOWRITER_PROP_RAW_VIDEO: int |
| VIDEOWRITER_PROP_KEY_INTERVAL: int |
| VIDEOWRITER_PROP_KEY_FLAG: int |
| VIDEOWRITER_PROP_PTS: int |
| VIDEOWRITER_PROP_DTS_DELAY: int |
| VideoWriterProperties = int |
| """One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, VIDEOWRITER_PROP_HW_DEVICE, VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL, VIDEOWRITER_PROP_RAW_VIDEO, VIDEOWRITER_PROP_KEY_INTERVAL, VIDEOWRITER_PROP_KEY_FLAG, VIDEOWRITER_PROP_PTS, VIDEOWRITER_PROP_DTS_DELAY]""" |
|
|
| VIDEO_ACCELERATION_NONE: int |
| VIDEO_ACCELERATION_ANY: int |
| VIDEO_ACCELERATION_D3D11: int |
| VIDEO_ACCELERATION_VAAPI: int |
| VIDEO_ACCELERATION_MFX: int |
| VideoAccelerationType = int |
| """One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, VIDEO_ACCELERATION_MFX]""" |
|
|
| CAP_OBSENSOR_DEPTH_MAP: int |
| CAP_OBSENSOR_BGR_IMAGE: int |
| CAP_OBSENSOR_IR_IMAGE: int |
| VideoCaptureOBSensorDataType = int |
| """One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE]""" |
|
|
| CAP_OBSENSOR_DEPTH_GENERATOR: int |
| CAP_OBSENSOR_IMAGE_GENERATOR: int |
| CAP_OBSENSOR_IR_GENERATOR: int |
| CAP_OBSENSOR_GENERATORS_MASK: int |
| VideoCaptureOBSensorGenerators = int |
| """One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, CAP_OBSENSOR_IR_GENERATOR, CAP_OBSENSOR_GENERATORS_MASK]""" |
|
|
| CAP_PROP_OBSENSOR_INTRINSIC_FX: int |
| CAP_PROP_OBSENSOR_INTRINSIC_FY: int |
| CAP_PROP_OBSENSOR_INTRINSIC_CX: int |
| CAP_PROP_OBSENSOR_INTRINSIC_CY: int |
| VideoCaptureOBSensorProperties = int |
| """One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, CAP_PROP_OBSENSOR_INTRINSIC_CX, CAP_PROP_OBSENSOR_INTRINSIC_CY]""" |
|
|
| SOLVEPNP_ITERATIVE: int |
| SOLVEPNP_EPNP: int |
| SOLVEPNP_P3P: int |
| SOLVEPNP_DLS: int |
| SOLVEPNP_UPNP: int |
| SOLVEPNP_AP3P: int |
| SOLVEPNP_IPPE: int |
| SOLVEPNP_IPPE_SQUARE: int |
| SOLVEPNP_SQPNP: int |
| SOLVEPNP_MAX_COUNT: int |
| SolvePnPMethod = int |
| """One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE, SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT]""" |
|
|
| CALIB_HAND_EYE_TSAI: int |
| CALIB_HAND_EYE_PARK: int |
| CALIB_HAND_EYE_HORAUD: int |
| CALIB_HAND_EYE_ANDREFF: int |
| CALIB_HAND_EYE_DANIILIDIS: int |
| HandEyeCalibrationMethod = int |
| """One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, CALIB_HAND_EYE_ANDREFF, CALIB_HAND_EYE_DANIILIDIS]""" |
|
|
| CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int |
| CALIB_ROBOT_WORLD_HAND_EYE_LI: int |
| RobotWorldHandEyeCalibrationMethod = int |
| """One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI]""" |
|
|
| SAMPLING_UNIFORM: int |
| SAMPLING_PROGRESSIVE_NAPSAC: int |
| SAMPLING_NAPSAC: int |
| SAMPLING_PROSAC: int |
| SamplingMethod = int |
| """One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC]""" |
|
|
| LOCAL_OPTIM_NULL: int |
| LOCAL_OPTIM_INNER_LO: int |
| LOCAL_OPTIM_INNER_AND_ITER_LO: int |
| LOCAL_OPTIM_GC: int |
| LOCAL_OPTIM_SIGMA: int |
| LocalOptimMethod = int |
| """One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA]""" |
|
|
| SCORE_METHOD_RANSAC: int |
| SCORE_METHOD_MSAC: int |
| SCORE_METHOD_MAGSAC: int |
| SCORE_METHOD_LMEDS: int |
| ScoreMethod = int |
| """One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS]""" |
|
|
| NEIGH_FLANN_KNN: int |
| NEIGH_GRID: int |
| NEIGH_FLANN_RADIUS: int |
| NeighborSearchMethod = int |
| """One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS]""" |
|
|
| NONE_POLISHER: int |
| LSQ_POLISHER: int |
| MAGSAC: int |
| COV_POLISHER: int |
| PolishingMethod = int |
| """One of [NONE_POLISHER, LSQ_POLISHER, MAGSAC, COV_POLISHER]""" |
|
|
| PROJ_SPHERICAL_ORTHO: int |
| PROJ_SPHERICAL_EQRECT: int |
| UndistortTypes = int |
| """One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT]""" |
|
|
| WINDOW_NORMAL: int |
| WINDOW_AUTOSIZE: int |
| WINDOW_OPENGL: int |
| WINDOW_FULLSCREEN: int |
| WINDOW_FREERATIO: int |
| WINDOW_KEEPRATIO: int |
| WINDOW_GUI_EXPANDED: int |
| WINDOW_GUI_NORMAL: int |
| WindowFlags = int |
| """One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL]""" |
|
|
| WND_PROP_FULLSCREEN: int |
| WND_PROP_AUTOSIZE: int |
| WND_PROP_ASPECT_RATIO: int |
| WND_PROP_OPENGL: int |
| WND_PROP_VISIBLE: int |
| WND_PROP_TOPMOST: int |
| WND_PROP_VSYNC: int |
| WindowPropertyFlags = int |
| """One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, WND_PROP_OPENGL, WND_PROP_VISIBLE, WND_PROP_TOPMOST, WND_PROP_VSYNC]""" |
|
|
| EVENT_MOUSEMOVE: int |
| EVENT_LBUTTONDOWN: int |
| EVENT_RBUTTONDOWN: int |
| EVENT_MBUTTONDOWN: int |
| EVENT_LBUTTONUP: int |
| EVENT_RBUTTONUP: int |
| EVENT_MBUTTONUP: int |
| EVENT_LBUTTONDBLCLK: int |
| EVENT_RBUTTONDBLCLK: int |
| EVENT_MBUTTONDBLCLK: int |
| EVENT_MOUSEWHEEL: int |
| EVENT_MOUSEHWHEEL: int |
| MouseEventTypes = int |
| """One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, EVENT_MBUTTONUP, EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL]""" |
|
|
| EVENT_FLAG_LBUTTON: int |
| EVENT_FLAG_RBUTTON: int |
| EVENT_FLAG_MBUTTON: int |
| EVENT_FLAG_CTRLKEY: int |
| EVENT_FLAG_SHIFTKEY: int |
| EVENT_FLAG_ALTKEY: int |
| MouseEventFlags = int |
| """One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, EVENT_FLAG_ALTKEY]""" |
|
|
| QT_FONT_LIGHT: int |
| QT_FONT_NORMAL: int |
| QT_FONT_DEMIBOLD: int |
| QT_FONT_BOLD: int |
| QT_FONT_BLACK: int |
| QtFontWeights = int |
| """One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK]""" |
|
|
| QT_STYLE_NORMAL: int |
| QT_STYLE_ITALIC: int |
| QT_STYLE_OBLIQUE: int |
| QtFontStyles = int |
| """One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE]""" |
|
|
| QT_PUSH_BUTTON: int |
| QT_CHECKBOX: int |
| QT_RADIOBOX: int |
| QT_NEW_BUTTONBAR: int |
| QtButtonTypes = int |
| """One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR]""" |
|
|
| GShape_GMAT: int |
| GSHAPE_GMAT: int |
| GShape_GSCALAR: int |
| GSHAPE_GSCALAR: int |
| GShape_GARRAY: int |
| GSHAPE_GARRAY: int |
| GShape_GOPAQUE: int |
| GSHAPE_GOPAQUE: int |
| GShape_GFRAME: int |
| GSHAPE_GFRAME: int |
| GShape = int |
| """One of [GShape_GMAT, GSHAPE_GMAT, GShape_GSCALAR, GSHAPE_GSCALAR, GShape_GARRAY, GSHAPE_GARRAY, GShape_GOPAQUE, GSHAPE_GOPAQUE, GShape_GFRAME, GSHAPE_GFRAME]""" |
|
|
| MediaFormat_BGR: int |
| MEDIA_FORMAT_BGR: int |
| MediaFormat_NV12: int |
| MEDIA_FORMAT_NV12: int |
| MediaFormat_GRAY: int |
| MEDIA_FORMAT_GRAY: int |
| MediaFormat = int |
| """One of [MediaFormat_BGR, MEDIA_FORMAT_BGR, MediaFormat_NV12, MEDIA_FORMAT_NV12, MediaFormat_GRAY, MEDIA_FORMAT_GRAY]""" |
|
|
|
|
| FileStorage_READ: int |
| FILE_STORAGE_READ: int |
| FileStorage_WRITE: int |
| FILE_STORAGE_WRITE: int |
| FileStorage_APPEND: int |
| FILE_STORAGE_APPEND: int |
| FileStorage_MEMORY: int |
| FILE_STORAGE_MEMORY: int |
| FileStorage_FORMAT_MASK: int |
| FILE_STORAGE_FORMAT_MASK: int |
| FileStorage_FORMAT_AUTO: int |
| FILE_STORAGE_FORMAT_AUTO: int |
| FileStorage_FORMAT_XML: int |
| FILE_STORAGE_FORMAT_XML: int |
| FileStorage_FORMAT_YAML: int |
| FILE_STORAGE_FORMAT_YAML: int |
| FileStorage_FORMAT_JSON: int |
| FILE_STORAGE_FORMAT_JSON: int |
| FileStorage_BASE64: int |
| FILE_STORAGE_BASE64: int |
| FileStorage_WRITE_BASE64: int |
| FILE_STORAGE_WRITE_BASE64: int |
| FileStorage_Mode = int |
| """One of [FileStorage_READ, FILE_STORAGE_READ, FileStorage_WRITE, FILE_STORAGE_WRITE, FileStorage_APPEND, FILE_STORAGE_APPEND, FileStorage_MEMORY, FILE_STORAGE_MEMORY, FileStorage_FORMAT_MASK, FILE_STORAGE_FORMAT_MASK, FileStorage_FORMAT_AUTO, FILE_STORAGE_FORMAT_AUTO, FileStorage_FORMAT_XML, FILE_STORAGE_FORMAT_XML, FileStorage_FORMAT_YAML, FILE_STORAGE_FORMAT_YAML, FileStorage_FORMAT_JSON, FILE_STORAGE_FORMAT_JSON, FileStorage_BASE64, FILE_STORAGE_BASE64, FileStorage_WRITE_BASE64, FILE_STORAGE_WRITE_BASE64]""" |
|
|
| FileStorage_UNDEFINED: int |
| FILE_STORAGE_UNDEFINED: int |
| FileStorage_VALUE_EXPECTED: int |
| FILE_STORAGE_VALUE_EXPECTED: int |
| FileStorage_NAME_EXPECTED: int |
| FILE_STORAGE_NAME_EXPECTED: int |
| FileStorage_INSIDE_MAP: int |
| FILE_STORAGE_INSIDE_MAP: int |
| FileStorage_State = int |
| """One of [FileStorage_UNDEFINED, FILE_STORAGE_UNDEFINED, FileStorage_VALUE_EXPECTED, FILE_STORAGE_VALUE_EXPECTED, FileStorage_NAME_EXPECTED, FILE_STORAGE_NAME_EXPECTED, FileStorage_INSIDE_MAP, FILE_STORAGE_INSIDE_MAP]""" |
|
|
| FileNode_NONE: int |
| FILE_NODE_NONE: int |
| FileNode_INT: int |
| FILE_NODE_INT: int |
| FileNode_REAL: int |
| FILE_NODE_REAL: int |
| FileNode_FLOAT: int |
| FILE_NODE_FLOAT: int |
| FileNode_STR: int |
| FILE_NODE_STR: int |
| FileNode_STRING: int |
| FILE_NODE_STRING: int |
| FileNode_SEQ: int |
| FILE_NODE_SEQ: int |
| FileNode_MAP: int |
| FILE_NODE_MAP: int |
| FileNode_TYPE_MASK: int |
| FILE_NODE_TYPE_MASK: int |
| FileNode_FLOW: int |
| FILE_NODE_FLOW: int |
| FileNode_UNIFORM: int |
| FILE_NODE_UNIFORM: int |
| FileNode_EMPTY: int |
| FILE_NODE_EMPTY: int |
| FileNode_NAMED: int |
| FILE_NODE_NAMED: int |
|
|
| UMat_MAGIC_VAL: int |
| UMAT_MAGIC_VAL: int |
| UMat_AUTO_STEP: int |
| UMAT_AUTO_STEP: int |
| UMat_CONTINUOUS_FLAG: int |
| UMAT_CONTINUOUS_FLAG: int |
| UMat_SUBMATRIX_FLAG: int |
| UMAT_SUBMATRIX_FLAG: int |
| UMat_MAGIC_MASK: int |
| UMAT_MAGIC_MASK: int |
| UMat_TYPE_MASK: int |
| UMAT_TYPE_MASK: int |
| UMat_DEPTH_MASK: int |
| UMAT_DEPTH_MASK: int |
|
|
| Subdiv2D_PTLOC_ERROR: int |
| SUBDIV2D_PTLOC_ERROR: int |
| Subdiv2D_PTLOC_OUTSIDE_RECT: int |
| SUBDIV2D_PTLOC_OUTSIDE_RECT: int |
| Subdiv2D_PTLOC_INSIDE: int |
| SUBDIV2D_PTLOC_INSIDE: int |
| Subdiv2D_PTLOC_VERTEX: int |
| SUBDIV2D_PTLOC_VERTEX: int |
| Subdiv2D_PTLOC_ON_EDGE: int |
| SUBDIV2D_PTLOC_ON_EDGE: int |
| Subdiv2D_NEXT_AROUND_ORG: int |
| SUBDIV2D_NEXT_AROUND_ORG: int |
| Subdiv2D_NEXT_AROUND_DST: int |
| SUBDIV2D_NEXT_AROUND_DST: int |
| Subdiv2D_PREV_AROUND_ORG: int |
| SUBDIV2D_PREV_AROUND_ORG: int |
| Subdiv2D_PREV_AROUND_DST: int |
| SUBDIV2D_PREV_AROUND_DST: int |
| Subdiv2D_NEXT_AROUND_LEFT: int |
| SUBDIV2D_NEXT_AROUND_LEFT: int |
| Subdiv2D_NEXT_AROUND_RIGHT: int |
| SUBDIV2D_NEXT_AROUND_RIGHT: int |
| Subdiv2D_PREV_AROUND_LEFT: int |
| SUBDIV2D_PREV_AROUND_LEFT: int |
| Subdiv2D_PREV_AROUND_RIGHT: int |
| SUBDIV2D_PREV_AROUND_RIGHT: int |
|
|
| ORB_HARRIS_SCORE: int |
| ORB_FAST_SCORE: int |
| ORB_ScoreType = int |
| """One of [ORB_HARRIS_SCORE, ORB_FAST_SCORE]""" |
|
|
| FastFeatureDetector_TYPE_5_8: int |
| FAST_FEATURE_DETECTOR_TYPE_5_8: int |
| FastFeatureDetector_TYPE_7_12: int |
| FAST_FEATURE_DETECTOR_TYPE_7_12: int |
| FastFeatureDetector_TYPE_9_16: int |
| FAST_FEATURE_DETECTOR_TYPE_9_16: int |
| FastFeatureDetector_DetectorType = int |
| """One of [FastFeatureDetector_TYPE_5_8, FAST_FEATURE_DETECTOR_TYPE_5_8, FastFeatureDetector_TYPE_7_12, FAST_FEATURE_DETECTOR_TYPE_7_12, FastFeatureDetector_TYPE_9_16, FAST_FEATURE_DETECTOR_TYPE_9_16]""" |
|
|
| FastFeatureDetector_THRESHOLD: int |
| FAST_FEATURE_DETECTOR_THRESHOLD: int |
| FastFeatureDetector_NONMAX_SUPPRESSION: int |
| FAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int |
| FastFeatureDetector_FAST_N: int |
| FAST_FEATURE_DETECTOR_FAST_N: int |
|
|
| AgastFeatureDetector_AGAST_5_8: int |
| AGAST_FEATURE_DETECTOR_AGAST_5_8: int |
| AgastFeatureDetector_AGAST_7_12d: int |
| AGAST_FEATURE_DETECTOR_AGAST_7_12D: int |
| AgastFeatureDetector_AGAST_7_12s: int |
| AGAST_FEATURE_DETECTOR_AGAST_7_12S: int |
| AgastFeatureDetector_OAST_9_16: int |
| AGAST_FEATURE_DETECTOR_OAST_9_16: int |
| AgastFeatureDetector_DetectorType = int |
| """One of [AgastFeatureDetector_AGAST_5_8, AGAST_FEATURE_DETECTOR_AGAST_5_8, AgastFeatureDetector_AGAST_7_12d, AGAST_FEATURE_DETECTOR_AGAST_7_12D, AgastFeatureDetector_AGAST_7_12s, AGAST_FEATURE_DETECTOR_AGAST_7_12S, AgastFeatureDetector_OAST_9_16, AGAST_FEATURE_DETECTOR_OAST_9_16]""" |
|
|
| AgastFeatureDetector_THRESHOLD: int |
| AGAST_FEATURE_DETECTOR_THRESHOLD: int |
| AgastFeatureDetector_NONMAX_SUPPRESSION: int |
| AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int |
|
|
| KAZE_DIFF_PM_G1: int |
| KAZE_DIFF_PM_G2: int |
| KAZE_DIFF_WEICKERT: int |
| KAZE_DIFF_CHARBONNIER: int |
| KAZE_DiffusivityType = int |
| """One of [KAZE_DIFF_PM_G1, KAZE_DIFF_PM_G2, KAZE_DIFF_WEICKERT, KAZE_DIFF_CHARBONNIER]""" |
|
|
| AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int |
| AKAZE_DESCRIPTOR_KAZE: int |
| AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int |
| AKAZE_DESCRIPTOR_MLDB: int |
| AKAZE_DescriptorType = int |
| """One of [AKAZE_DESCRIPTOR_KAZE_UPRIGHT, AKAZE_DESCRIPTOR_KAZE, AKAZE_DESCRIPTOR_MLDB_UPRIGHT, AKAZE_DESCRIPTOR_MLDB]""" |
|
|
| DescriptorMatcher_FLANNBASED: int |
| DESCRIPTOR_MATCHER_FLANNBASED: int |
| DescriptorMatcher_BRUTEFORCE: int |
| DESCRIPTOR_MATCHER_BRUTEFORCE: int |
| DescriptorMatcher_BRUTEFORCE_L1: int |
| DESCRIPTOR_MATCHER_BRUTEFORCE_L1: int |
| DescriptorMatcher_BRUTEFORCE_HAMMING: int |
| DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING: int |
| DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int |
| DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT: int |
| DescriptorMatcher_BRUTEFORCE_SL2: int |
| DESCRIPTOR_MATCHER_BRUTEFORCE_SL2: int |
| DescriptorMatcher_MatcherType = int |
| """One of [DescriptorMatcher_FLANNBASED, DESCRIPTOR_MATCHER_FLANNBASED, DescriptorMatcher_BRUTEFORCE, DESCRIPTOR_MATCHER_BRUTEFORCE, DescriptorMatcher_BRUTEFORCE_L1, DESCRIPTOR_MATCHER_BRUTEFORCE_L1, DescriptorMatcher_BRUTEFORCE_HAMMING, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING, DescriptorMatcher_BRUTEFORCE_HAMMINGLUT, DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT, DescriptorMatcher_BRUTEFORCE_SL2, DESCRIPTOR_MATCHER_BRUTEFORCE_SL2]""" |
|
|
| CirclesGridFinderParameters_SYMMETRIC_GRID: int |
| CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID: int |
| CirclesGridFinderParameters_ASYMMETRIC_GRID: int |
| CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID: int |
| CirclesGridFinderParameters_GridType = int |
| """One of [CirclesGridFinderParameters_SYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID, CirclesGridFinderParameters_ASYMMETRIC_GRID, CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID]""" |
|
|
| StereoMatcher_DISP_SHIFT: int |
| STEREO_MATCHER_DISP_SHIFT: int |
| StereoMatcher_DISP_SCALE: int |
| STEREO_MATCHER_DISP_SCALE: int |
|
|
| StereoBM_PREFILTER_NORMALIZED_RESPONSE: int |
| STEREO_BM_PREFILTER_NORMALIZED_RESPONSE: int |
| StereoBM_PREFILTER_XSOBEL: int |
| STEREO_BM_PREFILTER_XSOBEL: int |
|
|
| StereoSGBM_MODE_SGBM: int |
| STEREO_SGBM_MODE_SGBM: int |
| StereoSGBM_MODE_HH: int |
| STEREO_SGBM_MODE_HH: int |
| StereoSGBM_MODE_SGBM_3WAY: int |
| STEREO_SGBM_MODE_SGBM_3WAY: int |
| StereoSGBM_MODE_HH4: int |
| STEREO_SGBM_MODE_HH4: int |
|
|
| HOGDescriptor_L2Hys: int |
| HOGDESCRIPTOR_L2HYS: int |
| HOGDescriptor_HistogramNormType = int |
| """One of [HOGDescriptor_L2Hys, HOGDESCRIPTOR_L2HYS]""" |
|
|
| HOGDescriptor_DEFAULT_NLEVELS: int |
| HOGDESCRIPTOR_DEFAULT_NLEVELS: int |
|
|
| HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int |
| HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL: int |
| HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int |
| HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW: int |
| HOGDescriptor_DescriptorStorageFormat = int |
| """One of [HOGDescriptor_DESCR_FORMAT_COL_BY_COL, HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL, HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW, HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW]""" |
|
|
| QRCodeEncoder_MODE_AUTO: int |
| QRCODE_ENCODER_MODE_AUTO: int |
| QRCodeEncoder_MODE_NUMERIC: int |
| QRCODE_ENCODER_MODE_NUMERIC: int |
| QRCodeEncoder_MODE_ALPHANUMERIC: int |
| QRCODE_ENCODER_MODE_ALPHANUMERIC: int |
| QRCodeEncoder_MODE_BYTE: int |
| QRCODE_ENCODER_MODE_BYTE: int |
| QRCodeEncoder_MODE_ECI: int |
| QRCODE_ENCODER_MODE_ECI: int |
| QRCodeEncoder_MODE_KANJI: int |
| QRCODE_ENCODER_MODE_KANJI: int |
| QRCodeEncoder_MODE_STRUCTURED_APPEND: int |
| QRCODE_ENCODER_MODE_STRUCTURED_APPEND: int |
| QRCodeEncoder_EncodeMode = int |
| """One of [QRCodeEncoder_MODE_AUTO, QRCODE_ENCODER_MODE_AUTO, QRCodeEncoder_MODE_NUMERIC, QRCODE_ENCODER_MODE_NUMERIC, QRCodeEncoder_MODE_ALPHANUMERIC, QRCODE_ENCODER_MODE_ALPHANUMERIC, QRCodeEncoder_MODE_BYTE, QRCODE_ENCODER_MODE_BYTE, QRCodeEncoder_MODE_ECI, QRCODE_ENCODER_MODE_ECI, QRCodeEncoder_MODE_KANJI, QRCODE_ENCODER_MODE_KANJI, QRCodeEncoder_MODE_STRUCTURED_APPEND, QRCODE_ENCODER_MODE_STRUCTURED_APPEND]""" |
|
|
| QRCodeEncoder_CORRECT_LEVEL_L: int |
| QRCODE_ENCODER_CORRECT_LEVEL_L: int |
| QRCodeEncoder_CORRECT_LEVEL_M: int |
| QRCODE_ENCODER_CORRECT_LEVEL_M: int |
| QRCodeEncoder_CORRECT_LEVEL_Q: int |
| QRCODE_ENCODER_CORRECT_LEVEL_Q: int |
| QRCodeEncoder_CORRECT_LEVEL_H: int |
| QRCODE_ENCODER_CORRECT_LEVEL_H: int |
| QRCodeEncoder_CorrectionLevel = int |
| """One of [QRCodeEncoder_CORRECT_LEVEL_L, QRCODE_ENCODER_CORRECT_LEVEL_L, QRCodeEncoder_CORRECT_LEVEL_M, QRCODE_ENCODER_CORRECT_LEVEL_M, QRCodeEncoder_CORRECT_LEVEL_Q, QRCODE_ENCODER_CORRECT_LEVEL_Q, QRCodeEncoder_CORRECT_LEVEL_H, QRCODE_ENCODER_CORRECT_LEVEL_H]""" |
|
|
| QRCodeEncoder_ECI_UTF8: int |
| QRCODE_ENCODER_ECI_UTF8: int |
| QRCodeEncoder_ECIEncodings = int |
| """One of [QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]""" |
|
|
| FaceRecognizerSF_FR_COSINE: int |
| FACE_RECOGNIZER_SF_FR_COSINE: int |
| FaceRecognizerSF_FR_NORM_L2: int |
| FACE_RECOGNIZER_SF_FR_NORM_L2: int |
| FaceRecognizerSF_DisType = int |
| """One of [FaceRecognizerSF_FR_COSINE, FACE_RECOGNIZER_SF_FR_COSINE, FaceRecognizerSF_FR_NORM_L2, FACE_RECOGNIZER_SF_FR_NORM_L2]""" |
|
|
| Stitcher_OK: int |
| STITCHER_OK: int |
| Stitcher_ERR_NEED_MORE_IMGS: int |
| STITCHER_ERR_NEED_MORE_IMGS: int |
| Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int |
| STITCHER_ERR_HOMOGRAPHY_EST_FAIL: int |
| Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int |
| STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL: int |
| Stitcher_Status = int |
| """One of [Stitcher_OK, STITCHER_OK, Stitcher_ERR_NEED_MORE_IMGS, STITCHER_ERR_NEED_MORE_IMGS, Stitcher_ERR_HOMOGRAPHY_EST_FAIL, STITCHER_ERR_HOMOGRAPHY_EST_FAIL, Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL, STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL]""" |
|
|
| Stitcher_PANORAMA: int |
| STITCHER_PANORAMA: int |
| Stitcher_SCANS: int |
| STITCHER_SCANS: int |
| Stitcher_Mode = int |
| """One of [Stitcher_PANORAMA, STITCHER_PANORAMA, Stitcher_SCANS, STITCHER_SCANS]""" |
|
|
| DISOpticalFlow_PRESET_ULTRAFAST: int |
| DISOPTICAL_FLOW_PRESET_ULTRAFAST: int |
| DISOpticalFlow_PRESET_FAST: int |
| DISOPTICAL_FLOW_PRESET_FAST: int |
| DISOpticalFlow_PRESET_MEDIUM: int |
| DISOPTICAL_FLOW_PRESET_MEDIUM: int |
|
|
| PCA_DATA_AS_ROW: int |
| PCA_DATA_AS_COL: int |
| PCA_USE_AVG: int |
| PCA_Flags = int |
| """One of [PCA_DATA_AS_ROW, PCA_DATA_AS_COL, PCA_USE_AVG]""" |
|
|
| SVD_MODIFY_A: int |
| SVD_NO_UV: int |
| SVD_FULL_UV: int |
| SVD_Flags = int |
| """One of [SVD_MODIFY_A, SVD_NO_UV, SVD_FULL_UV]""" |
|
|
| RNG_UNIFORM: int |
| RNG_NORMAL: int |
|
|
| Formatter_FMT_DEFAULT: int |
| FORMATTER_FMT_DEFAULT: int |
| Formatter_FMT_MATLAB: int |
| FORMATTER_FMT_MATLAB: int |
| Formatter_FMT_CSV: int |
| FORMATTER_FMT_CSV: int |
| Formatter_FMT_PYTHON: int |
| FORMATTER_FMT_PYTHON: int |
| Formatter_FMT_NUMPY: int |
| FORMATTER_FMT_NUMPY: int |
| Formatter_FMT_C: int |
| FORMATTER_FMT_C: int |
| Formatter_FormatType = int |
| """One of [Formatter_FMT_DEFAULT, FORMATTER_FMT_DEFAULT, Formatter_FMT_MATLAB, FORMATTER_FMT_MATLAB, Formatter_FMT_CSV, FORMATTER_FMT_CSV, Formatter_FMT_PYTHON, FORMATTER_FMT_PYTHON, Formatter_FMT_NUMPY, FORMATTER_FMT_NUMPY, Formatter_FMT_C, FORMATTER_FMT_C]""" |
|
|
| _InputArray_KIND_SHIFT: int |
| _INPUT_ARRAY_KIND_SHIFT: int |
| _InputArray_FIXED_TYPE: int |
| _INPUT_ARRAY_FIXED_TYPE: int |
| _InputArray_FIXED_SIZE: int |
| _INPUT_ARRAY_FIXED_SIZE: int |
| _InputArray_KIND_MASK: int |
| _INPUT_ARRAY_KIND_MASK: int |
| _InputArray_NONE: int |
| _INPUT_ARRAY_NONE: int |
| _InputArray_MAT: int |
| _INPUT_ARRAY_MAT: int |
| _InputArray_MATX: int |
| _INPUT_ARRAY_MATX: int |
| _InputArray_STD_VECTOR: int |
| _INPUT_ARRAY_STD_VECTOR: int |
| _InputArray_STD_VECTOR_VECTOR: int |
| _INPUT_ARRAY_STD_VECTOR_VECTOR: int |
| _InputArray_STD_VECTOR_MAT: int |
| _INPUT_ARRAY_STD_VECTOR_MAT: int |
| _InputArray_EXPR: int |
| _INPUT_ARRAY_EXPR: int |
| _InputArray_OPENGL_BUFFER: int |
| _INPUT_ARRAY_OPENGL_BUFFER: int |
| _InputArray_CUDA_HOST_MEM: int |
| _INPUT_ARRAY_CUDA_HOST_MEM: int |
| _InputArray_CUDA_GPU_MAT: int |
| _INPUT_ARRAY_CUDA_GPU_MAT: int |
| _InputArray_UMAT: int |
| _INPUT_ARRAY_UMAT: int |
| _InputArray_STD_VECTOR_UMAT: int |
| _INPUT_ARRAY_STD_VECTOR_UMAT: int |
| _InputArray_STD_BOOL_VECTOR: int |
| _INPUT_ARRAY_STD_BOOL_VECTOR: int |
| _InputArray_STD_VECTOR_CUDA_GPU_MAT: int |
| _INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT: int |
| _InputArray_STD_ARRAY: int |
| _INPUT_ARRAY_STD_ARRAY: int |
| _InputArray_STD_ARRAY_MAT: int |
| _INPUT_ARRAY_STD_ARRAY_MAT: int |
| _InputArray_KindFlag = int |
| """One of [_InputArray_KIND_SHIFT, _INPUT_ARRAY_KIND_SHIFT, _InputArray_FIXED_TYPE, _INPUT_ARRAY_FIXED_TYPE, _InputArray_FIXED_SIZE, _INPUT_ARRAY_FIXED_SIZE, _InputArray_KIND_MASK, _INPUT_ARRAY_KIND_MASK, _InputArray_NONE, _INPUT_ARRAY_NONE, _InputArray_MAT, _INPUT_ARRAY_MAT, _InputArray_MATX, _INPUT_ARRAY_MATX, _InputArray_STD_VECTOR, _INPUT_ARRAY_STD_VECTOR, _InputArray_STD_VECTOR_VECTOR, _INPUT_ARRAY_STD_VECTOR_VECTOR, _InputArray_STD_VECTOR_MAT, _INPUT_ARRAY_STD_VECTOR_MAT, _InputArray_EXPR, _INPUT_ARRAY_EXPR, _InputArray_OPENGL_BUFFER, _INPUT_ARRAY_OPENGL_BUFFER, _InputArray_CUDA_HOST_MEM, _INPUT_ARRAY_CUDA_HOST_MEM, _InputArray_CUDA_GPU_MAT, _INPUT_ARRAY_CUDA_GPU_MAT, _InputArray_UMAT, _INPUT_ARRAY_UMAT, _InputArray_STD_VECTOR_UMAT, _INPUT_ARRAY_STD_VECTOR_UMAT, _InputArray_STD_BOOL_VECTOR, _INPUT_ARRAY_STD_BOOL_VECTOR, _InputArray_STD_VECTOR_CUDA_GPU_MAT, _INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT, _InputArray_STD_ARRAY, _INPUT_ARRAY_STD_ARRAY, _InputArray_STD_ARRAY_MAT, _INPUT_ARRAY_STD_ARRAY_MAT]""" |
|
|
| _OutputArray_DEPTH_MASK_8U: int |
| _OUTPUT_ARRAY_DEPTH_MASK_8U: int |
| _OutputArray_DEPTH_MASK_8S: int |
| _OUTPUT_ARRAY_DEPTH_MASK_8S: int |
| _OutputArray_DEPTH_MASK_16U: int |
| _OUTPUT_ARRAY_DEPTH_MASK_16U: int |
| _OutputArray_DEPTH_MASK_16S: int |
| _OUTPUT_ARRAY_DEPTH_MASK_16S: int |
| _OutputArray_DEPTH_MASK_32S: int |
| _OUTPUT_ARRAY_DEPTH_MASK_32S: int |
| _OutputArray_DEPTH_MASK_32F: int |
| _OUTPUT_ARRAY_DEPTH_MASK_32F: int |
| _OutputArray_DEPTH_MASK_64F: int |
| _OUTPUT_ARRAY_DEPTH_MASK_64F: int |
| _OutputArray_DEPTH_MASK_16F: int |
| _OUTPUT_ARRAY_DEPTH_MASK_16F: int |
| _OutputArray_DEPTH_MASK_ALL: int |
| _OUTPUT_ARRAY_DEPTH_MASK_ALL: int |
| _OutputArray_DEPTH_MASK_ALL_BUT_8S: int |
| _OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S: int |
| _OutputArray_DEPTH_MASK_ALL_16F: int |
| _OUTPUT_ARRAY_DEPTH_MASK_ALL_16F: int |
| _OutputArray_DEPTH_MASK_FLT: int |
| _OUTPUT_ARRAY_DEPTH_MASK_FLT: int |
| _OutputArray_DepthMask = int |
| """One of [_OutputArray_DEPTH_MASK_8U, _OUTPUT_ARRAY_DEPTH_MASK_8U, _OutputArray_DEPTH_MASK_8S, _OUTPUT_ARRAY_DEPTH_MASK_8S, _OutputArray_DEPTH_MASK_16U, _OUTPUT_ARRAY_DEPTH_MASK_16U, _OutputArray_DEPTH_MASK_16S, _OUTPUT_ARRAY_DEPTH_MASK_16S, _OutputArray_DEPTH_MASK_32S, _OUTPUT_ARRAY_DEPTH_MASK_32S, _OutputArray_DEPTH_MASK_32F, _OUTPUT_ARRAY_DEPTH_MASK_32F, _OutputArray_DEPTH_MASK_64F, _OUTPUT_ARRAY_DEPTH_MASK_64F, _OutputArray_DEPTH_MASK_16F, _OUTPUT_ARRAY_DEPTH_MASK_16F, _OutputArray_DEPTH_MASK_ALL, _OUTPUT_ARRAY_DEPTH_MASK_ALL, _OutputArray_DEPTH_MASK_ALL_BUT_8S, _OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S, _OutputArray_DEPTH_MASK_ALL_16F, _OUTPUT_ARRAY_DEPTH_MASK_ALL_16F, _OutputArray_DEPTH_MASK_FLT, _OUTPUT_ARRAY_DEPTH_MASK_FLT]""" |
|
|
| UMatData_COPY_ON_MAP: int |
| UMAT_DATA_COPY_ON_MAP: int |
| UMatData_HOST_COPY_OBSOLETE: int |
| UMAT_DATA_HOST_COPY_OBSOLETE: int |
| UMatData_DEVICE_COPY_OBSOLETE: int |
| UMAT_DATA_DEVICE_COPY_OBSOLETE: int |
| UMatData_TEMP_UMAT: int |
| UMAT_DATA_TEMP_UMAT: int |
| UMatData_TEMP_COPIED_UMAT: int |
| UMAT_DATA_TEMP_COPIED_UMAT: int |
| UMatData_USER_ALLOCATED: int |
| UMAT_DATA_USER_ALLOCATED: int |
| UMatData_DEVICE_MEM_MAPPED: int |
| UMAT_DATA_DEVICE_MEM_MAPPED: int |
| UMatData_ASYNC_CLEANUP: int |
| UMAT_DATA_ASYNC_CLEANUP: int |
| UMatData_MemoryFlag = int |
| """One of [UMatData_COPY_ON_MAP, UMAT_DATA_COPY_ON_MAP, UMatData_HOST_COPY_OBSOLETE, UMAT_DATA_HOST_COPY_OBSOLETE, UMatData_DEVICE_COPY_OBSOLETE, UMAT_DATA_DEVICE_COPY_OBSOLETE, UMatData_TEMP_UMAT, UMAT_DATA_TEMP_UMAT, UMatData_TEMP_COPIED_UMAT, UMAT_DATA_TEMP_COPIED_UMAT, UMatData_USER_ALLOCATED, UMAT_DATA_USER_ALLOCATED, UMatData_DEVICE_MEM_MAPPED, UMAT_DATA_DEVICE_MEM_MAPPED, UMatData_ASYNC_CLEANUP, UMAT_DATA_ASYNC_CLEANUP]""" |
|
|
| Mat_MAGIC_VAL: int |
| MAT_MAGIC_VAL: int |
| Mat_AUTO_STEP: int |
| MAT_AUTO_STEP: int |
| Mat_CONTINUOUS_FLAG: int |
| MAT_CONTINUOUS_FLAG: int |
| Mat_SUBMATRIX_FLAG: int |
| MAT_SUBMATRIX_FLAG: int |
| Mat_MAGIC_MASK: int |
| MAT_MAGIC_MASK: int |
| Mat_TYPE_MASK: int |
| MAT_TYPE_MASK: int |
| Mat_DEPTH_MASK: int |
| MAT_DEPTH_MASK: int |
|
|
| SparseMat_MAGIC_VAL: int |
| SPARSE_MAT_MAGIC_VAL: int |
| SparseMat_MAX_DIM: int |
| SPARSE_MAT_MAX_DIM: int |
| SparseMat_HASH_SCALE: int |
| SPARSE_MAT_HASH_SCALE: int |
| SparseMat_HASH_BIT: int |
| SPARSE_MAT_HASH_BIT: int |
|
|
| QuatEnum_INT_XYZ: int |
| QUAT_ENUM_INT_XYZ: int |
| QuatEnum_INT_XZY: int |
| QUAT_ENUM_INT_XZY: int |
| QuatEnum_INT_YXZ: int |
| QUAT_ENUM_INT_YXZ: int |
| QuatEnum_INT_YZX: int |
| QUAT_ENUM_INT_YZX: int |
| QuatEnum_INT_ZXY: int |
| QUAT_ENUM_INT_ZXY: int |
| QuatEnum_INT_ZYX: int |
| QUAT_ENUM_INT_ZYX: int |
| QuatEnum_INT_XYX: int |
| QUAT_ENUM_INT_XYX: int |
| QuatEnum_INT_XZX: int |
| QUAT_ENUM_INT_XZX: int |
| QuatEnum_INT_YXY: int |
| QUAT_ENUM_INT_YXY: int |
| QuatEnum_INT_YZY: int |
| QUAT_ENUM_INT_YZY: int |
| QuatEnum_INT_ZXZ: int |
| QUAT_ENUM_INT_ZXZ: int |
| QuatEnum_INT_ZYZ: int |
| QUAT_ENUM_INT_ZYZ: int |
| QuatEnum_EXT_XYZ: int |
| QUAT_ENUM_EXT_XYZ: int |
| QuatEnum_EXT_XZY: int |
| QUAT_ENUM_EXT_XZY: int |
| QuatEnum_EXT_YXZ: int |
| QUAT_ENUM_EXT_YXZ: int |
| QuatEnum_EXT_YZX: int |
| QUAT_ENUM_EXT_YZX: int |
| QuatEnum_EXT_ZXY: int |
| QUAT_ENUM_EXT_ZXY: int |
| QuatEnum_EXT_ZYX: int |
| QUAT_ENUM_EXT_ZYX: int |
| QuatEnum_EXT_XYX: int |
| QUAT_ENUM_EXT_XYX: int |
| QuatEnum_EXT_XZX: int |
| QUAT_ENUM_EXT_XZX: int |
| QuatEnum_EXT_YXY: int |
| QUAT_ENUM_EXT_YXY: int |
| QuatEnum_EXT_YZY: int |
| QUAT_ENUM_EXT_YZY: int |
| QuatEnum_EXT_ZXZ: int |
| QUAT_ENUM_EXT_ZXZ: int |
| QuatEnum_EXT_ZYZ: int |
| QUAT_ENUM_EXT_ZYZ: int |
| QuatEnum_EULER_ANGLES_MAX_VALUE: int |
| QUAT_ENUM_EULER_ANGLES_MAX_VALUE: int |
| QuatEnum_EulerAnglesType = int |
| """One of [QuatEnum_INT_XYZ, QUAT_ENUM_INT_XYZ, QuatEnum_INT_XZY, QUAT_ENUM_INT_XZY, QuatEnum_INT_YXZ, QUAT_ENUM_INT_YXZ, QuatEnum_INT_YZX, QUAT_ENUM_INT_YZX, QuatEnum_INT_ZXY, QUAT_ENUM_INT_ZXY, QuatEnum_INT_ZYX, QUAT_ENUM_INT_ZYX, QuatEnum_INT_XYX, QUAT_ENUM_INT_XYX, QuatEnum_INT_XZX, QUAT_ENUM_INT_XZX, QuatEnum_INT_YXY, QUAT_ENUM_INT_YXY, QuatEnum_INT_YZY, QUAT_ENUM_INT_YZY, QuatEnum_INT_ZXZ, QUAT_ENUM_INT_ZXZ, QuatEnum_INT_ZYZ, QUAT_ENUM_INT_ZYZ, QuatEnum_EXT_XYZ, QUAT_ENUM_EXT_XYZ, QuatEnum_EXT_XZY, QUAT_ENUM_EXT_XZY, QuatEnum_EXT_YXZ, QUAT_ENUM_EXT_YXZ, QuatEnum_EXT_YZX, QUAT_ENUM_EXT_YZX, QuatEnum_EXT_ZXY, QUAT_ENUM_EXT_ZXY, QuatEnum_EXT_ZYX, QUAT_ENUM_EXT_ZYX, QuatEnum_EXT_XYX, QUAT_ENUM_EXT_XYX, QuatEnum_EXT_XZX, QUAT_ENUM_EXT_XZX, QuatEnum_EXT_YXY, QUAT_ENUM_EXT_YXY, QuatEnum_EXT_YZY, QUAT_ENUM_EXT_YZY, QuatEnum_EXT_ZXZ, QUAT_ENUM_EXT_ZXZ, QuatEnum_EXT_ZYZ, QUAT_ENUM_EXT_ZYZ, QuatEnum_EULER_ANGLES_MAX_VALUE, QUAT_ENUM_EULER_ANGLES_MAX_VALUE]""" |
|
|
| TermCriteria_COUNT: int |
| TERM_CRITERIA_COUNT: int |
| TermCriteria_MAX_ITER: int |
| TERM_CRITERIA_MAX_ITER: int |
| TermCriteria_EPS: int |
| TERM_CRITERIA_EPS: int |
| TermCriteria_Type = int |
| """One of [TermCriteria_COUNT, TERM_CRITERIA_COUNT, TermCriteria_MAX_ITER, TERM_CRITERIA_MAX_ITER, TermCriteria_EPS, TERM_CRITERIA_EPS]""" |
|
|
| GFluidKernel_Kind_Filter: int |
| GFLUID_KERNEL_KIND_FILTER: int |
| GFluidKernel_Kind_Resize: int |
| GFLUID_KERNEL_KIND_RESIZE: int |
| GFluidKernel_Kind_YUV420toRGB: int |
| GFLUID_KERNEL_KIND_YUV420TO_RGB: int |
| GFluidKernel_Kind = int |
| """One of [GFluidKernel_Kind_Filter, GFLUID_KERNEL_KIND_FILTER, GFluidKernel_Kind_Resize, GFLUID_KERNEL_KIND_RESIZE, GFluidKernel_Kind_YUV420toRGB, GFLUID_KERNEL_KIND_YUV420TO_RGB]""" |
|
|
| MediaFrame_Access_R: int |
| MEDIA_FRAME_ACCESS_R: int |
| MediaFrame_Access_W: int |
| MEDIA_FRAME_ACCESS_W: int |
| MediaFrame_Access = int |
| """One of [MediaFrame_Access_R, MEDIA_FRAME_ACCESS_R, MediaFrame_Access_W, MEDIA_FRAME_ACCESS_W]""" |
|
|
| RMat_Access_R: int |
| RMAT_ACCESS_R: int |
| RMat_Access_W: int |
| RMAT_ACCESS_W: int |
| RMat_Access = int |
| """One of [RMat_Access_R, RMAT_ACCESS_R, RMat_Access_W, RMAT_ACCESS_W]""" |
|
|
|
|
| |
| CV_8U: int |
| CV_8UC1: int |
| CV_8UC2: int |
| CV_8UC3: int |
| CV_8UC4: int |
| CV_8S: int |
| CV_8SC1: int |
| CV_8SC2: int |
| CV_8SC3: int |
| CV_8SC4: int |
| CV_16U: int |
| CV_16UC1: int |
| CV_16UC2: int |
| CV_16UC3: int |
| CV_16UC4: int |
| CV_16S: int |
| CV_16SC1: int |
| CV_16SC2: int |
| CV_16SC3: int |
| CV_16SC4: int |
| CV_32S: int |
| CV_32SC1: int |
| CV_32SC2: int |
| CV_32SC3: int |
| CV_32SC4: int |
| CV_32F: int |
| CV_32FC1: int |
| CV_32FC2: int |
| CV_32FC3: int |
| CV_32FC4: int |
| CV_64F: int |
| CV_64FC1: int |
| CV_64FC2: int |
| CV_64FC3: int |
| CV_64FC4: int |
| CV_16F: int |
| CV_16FC1: int |
| CV_16FC2: int |
| CV_16FC3: int |
| CV_16FC4: int |
| __version__: str |
|
|
| |
| class Algorithm: |
| |
| def clear(self) -> None: ... |
|
|
| @_typing.overload |
| def write(self, fs: FileStorage) -> None: ... |
| @_typing.overload |
| def write(self, fs: FileStorage, name: str) -> None: ... |
|
|
| def read(self, fn: FileNode) -> None: ... |
|
|
| def empty(self) -> bool: ... |
|
|
| def save(self, filename: str) -> None: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class AsyncArray: |
| |
| def __init__(self) -> None: ... |
|
|
| def release(self) -> None: ... |
|
|
| @_typing.overload |
| def get(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def get(self, dst: UMat | None = ...) -> UMat: ... |
| @_typing.overload |
| def get(self, timeoutNs: float, dst: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def get(self, timeoutNs: float, dst: UMat | None = ...) -> tuple[bool, UMat]: ... |
|
|
| def wait_for(self, timeoutNs: float) -> bool: ... |
|
|
| def valid(self) -> bool: ... |
|
|
|
|
| class FileStorage: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, flags: int, encoding: str = ...) -> None: ... |
|
|
| def open(self, filename: str, flags: int, encoding: str = ...) -> bool: ... |
|
|
| def isOpened(self) -> bool: ... |
|
|
| def release(self) -> None: ... |
|
|
| def releaseAndGetString(self) -> str: ... |
|
|
| def getFirstTopLevelNode(self) -> FileNode: ... |
|
|
| def root(self, streamidx: int = ...) -> FileNode: ... |
|
|
| def getNode(self, nodename: str) -> FileNode: ... |
|
|
| @_typing.overload |
| def write(self, name: str, val: int) -> None: ... |
| @_typing.overload |
| def write(self, name: str, val: int) -> None: ... |
| @_typing.overload |
| def write(self, name: str, val: float) -> None: ... |
| @_typing.overload |
| def write(self, name: str, val: str) -> None: ... |
| @_typing.overload |
| def write(self, name: str, val: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def write(self, name: str, val: _typing.Sequence[str]) -> None: ... |
|
|
| def writeComment(self, comment: str, append: bool = ...) -> None: ... |
|
|
| def startWriteStruct(self, name: str, flags: int, typeName: str = ...) -> None: ... |
|
|
| def endWriteStruct(self) -> None: ... |
|
|
| def getFormat(self) -> int: ... |
|
|
|
|
| class FileNode: |
| |
| def __init__(self) -> None: ... |
|
|
| def getNode(self, nodename: str) -> FileNode: ... |
|
|
| def at(self, i: int) -> FileNode: ... |
|
|
| def keys(self) -> _typing.Sequence[str]: ... |
|
|
| def type(self) -> int: ... |
|
|
| def empty(self) -> bool: ... |
|
|
| def isNone(self) -> bool: ... |
|
|
| def isSeq(self) -> bool: ... |
|
|
| def isMap(self) -> bool: ... |
|
|
| def isInt(self) -> bool: ... |
|
|
| def isReal(self) -> bool: ... |
|
|
| def isString(self) -> bool: ... |
|
|
| def isNamed(self) -> bool: ... |
|
|
| def name(self) -> str: ... |
|
|
| def size(self) -> int: ... |
|
|
| def rawSize(self) -> int: ... |
|
|
| def real(self) -> float: ... |
|
|
| def string(self) -> str: ... |
|
|
| def mat(self) -> cv2.typing.MatLike: ... |
|
|
|
|
| class RotatedRect: |
| center: cv2.typing.Point2f |
| size: cv2.typing.Size2f |
| angle: float |
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, center: cv2.typing.Point2f, size: cv2.typing.Size2f, angle: float) -> None: ... |
| @_typing.overload |
| def __init__(self, point1: cv2.typing.Point2f, point2: cv2.typing.Point2f, point3: cv2.typing.Point2f) -> None: ... |
|
|
| def points(self) -> _typing.Sequence[cv2.typing.Point2f]: ... |
|
|
| def boundingRect(self) -> cv2.typing.Rect: ... |
|
|
| def boundingRect2f(self) -> cv2.typing.Rect2f: ... |
|
|
|
|
| class KeyPoint: |
| pt: cv2.typing.Point2f |
| size: float |
| angle: float |
| response: float |
| octave: int |
| class_id: int |
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, x: float, y: float, size: float, angle: float = ..., response: float = ..., octave: int = ..., class_id: int = ...) -> None: ... |
|
|
| @staticmethod |
| @_typing.overload |
| def convert(keypoints: _typing.Sequence[KeyPoint], keypointIndexes: _typing.Sequence[int] = ...) -> _typing.Sequence[cv2.typing.Point2f]: ... |
| @staticmethod |
| @_typing.overload |
| def convert(points2f: _typing.Sequence[cv2.typing.Point2f], size: float = ..., response: float = ..., octave: int = ..., class_id: int = ...) -> _typing.Sequence[KeyPoint]: ... |
|
|
| @staticmethod |
| def overlap(kp1: KeyPoint, kp2: KeyPoint) -> float: ... |
|
|
|
|
| class DMatch: |
| queryIdx: int |
| trainIdx: int |
| imgIdx: int |
| distance: float |
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, _queryIdx: int, _trainIdx: int, _distance: float) -> None: ... |
| @_typing.overload |
| def __init__(self, _queryIdx: int, _trainIdx: int, _imgIdx: int, _distance: float) -> None: ... |
|
|
|
|
| class TickMeter: |
| |
| def __init__(self) -> None: ... |
|
|
| def start(self) -> None: ... |
|
|
| def stop(self) -> None: ... |
|
|
| def getTimeTicks(self) -> int: ... |
|
|
| def getTimeMicro(self) -> float: ... |
|
|
| def getTimeMilli(self) -> float: ... |
|
|
| def getTimeSec(self) -> float: ... |
|
|
| def getLastTimeTicks(self) -> int: ... |
|
|
| def getLastTimeMicro(self) -> float: ... |
|
|
| def getLastTimeMilli(self) -> float: ... |
|
|
| def getLastTimeSec(self) -> float: ... |
|
|
| def getCounter(self) -> int: ... |
|
|
| def getFPS(self) -> float: ... |
|
|
| def getAvgTimeSec(self) -> float: ... |
|
|
| def getAvgTimeMilli(self) -> float: ... |
|
|
| def reset(self) -> None: ... |
|
|
|
|
| class UMat: |
| offset: int |
|
|
| |
| @_typing.overload |
| def __init__(self, usageFlags: UMatUsageFlags = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, rows: int, cols: int, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, size: cv2.typing.Size, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, rows: int, cols: int, type: int, s: cv2.typing.Scalar, usageFlags: UMatUsageFlags = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, size: cv2.typing.Size, type: int, s: cv2.typing.Scalar, usageFlags: UMatUsageFlags = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, m: UMat) -> None: ... |
| @_typing.overload |
| def __init__(self, m: UMat, rowRange: cv2.typing.Range, colRange: cv2.typing.Range = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, m: UMat, roi: cv2.typing.Rect) -> None: ... |
| @_typing.overload |
| def __init__(self, m: UMat, ranges: _typing.Sequence[cv2.typing.Range]) -> None: ... |
|
|
| @staticmethod |
| def queue() -> cv2.typing.IntPointer: ... |
|
|
| @staticmethod |
| def context() -> cv2.typing.IntPointer: ... |
|
|
| def get(self) -> cv2.typing.MatLike: ... |
|
|
| def isContinuous(self) -> bool: ... |
|
|
| def isSubmatrix(self) -> bool: ... |
|
|
| def handle(self, accessFlags: AccessFlag) -> cv2.typing.IntPointer: ... |
|
|
|
|
| class GeneralizedHough(Algorithm): |
| |
| @_typing.overload |
| def setTemplate(self, templ: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... |
| @_typing.overload |
| def setTemplate(self, templ: UMat, templCenter: cv2.typing.Point = ...) -> None: ... |
| @_typing.overload |
| def setTemplate(self, edges: cv2.typing.MatLike, dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... |
| @_typing.overload |
| def setTemplate(self, edges: UMat, dx: UMat, dy: UMat, templCenter: cv2.typing.Point = ...) -> None: ... |
|
|
| @_typing.overload |
| def detect(self, image: cv2.typing.MatLike, positions: cv2.typing.MatLike | None = ..., votes: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detect(self, image: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... |
| @_typing.overload |
| def detect(self, edges: cv2.typing.MatLike, dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, positions: cv2.typing.MatLike | None = ..., votes: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detect(self, edges: UMat, dx: UMat, dy: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| def setCannyLowThresh(self, cannyLowThresh: int) -> None: ... |
|
|
| def getCannyLowThresh(self) -> int: ... |
|
|
| def setCannyHighThresh(self, cannyHighThresh: int) -> None: ... |
|
|
| def getCannyHighThresh(self) -> int: ... |
|
|
| def setMinDist(self, minDist: float) -> None: ... |
|
|
| def getMinDist(self) -> float: ... |
|
|
| def setDp(self, dp: float) -> None: ... |
|
|
| def getDp(self) -> float: ... |
|
|
| def setMaxBufferSize(self, maxBufferSize: int) -> None: ... |
|
|
| def getMaxBufferSize(self) -> int: ... |
|
|
|
|
| class GeneralizedHoughBallard(GeneralizedHough): |
| |
| def setLevels(self, levels: int) -> None: ... |
|
|
| def getLevels(self) -> int: ... |
|
|
| def setVotesThreshold(self, votesThreshold: int) -> None: ... |
|
|
| def getVotesThreshold(self) -> int: ... |
|
|
|
|
| class GeneralizedHoughGuil(GeneralizedHough): |
| |
| def setXi(self, xi: float) -> None: ... |
|
|
| def getXi(self) -> float: ... |
|
|
| def setLevels(self, levels: int) -> None: ... |
|
|
| def getLevels(self) -> int: ... |
|
|
| def setAngleEpsilon(self, angleEpsilon: float) -> None: ... |
|
|
| def getAngleEpsilon(self) -> float: ... |
|
|
| def setMinAngle(self, minAngle: float) -> None: ... |
|
|
| def getMinAngle(self) -> float: ... |
|
|
| def setMaxAngle(self, maxAngle: float) -> None: ... |
|
|
| def getMaxAngle(self) -> float: ... |
|
|
| def setAngleStep(self, angleStep: float) -> None: ... |
|
|
| def getAngleStep(self) -> float: ... |
|
|
| def setAngleThresh(self, angleThresh: int) -> None: ... |
|
|
| def getAngleThresh(self) -> int: ... |
|
|
| def setMinScale(self, minScale: float) -> None: ... |
|
|
| def getMinScale(self) -> float: ... |
|
|
| def setMaxScale(self, maxScale: float) -> None: ... |
|
|
| def getMaxScale(self) -> float: ... |
|
|
| def setScaleStep(self, scaleStep: float) -> None: ... |
|
|
| def getScaleStep(self) -> float: ... |
|
|
| def setScaleThresh(self, scaleThresh: int) -> None: ... |
|
|
| def getScaleThresh(self) -> int: ... |
|
|
| def setPosThresh(self, posThresh: int) -> None: ... |
|
|
| def getPosThresh(self) -> int: ... |
|
|
|
|
| class CLAHE(Algorithm): |
| |
| @_typing.overload |
| def apply(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def apply(self, src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| def setClipLimit(self, clipLimit: float) -> None: ... |
|
|
| def getClipLimit(self) -> float: ... |
|
|
| def setTilesGridSize(self, tileGridSize: cv2.typing.Size) -> None: ... |
|
|
| def getTilesGridSize(self) -> cv2.typing.Size: ... |
|
|
| def collectGarbage(self) -> None: ... |
|
|
|
|
| class Subdiv2D: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, rect: cv2.typing.Rect) -> None: ... |
|
|
| def initDelaunay(self, rect: cv2.typing.Rect) -> None: ... |
|
|
| @_typing.overload |
| def insert(self, pt: cv2.typing.Point2f) -> int: ... |
| @_typing.overload |
| def insert(self, ptvec: _typing.Sequence[cv2.typing.Point2f]) -> None: ... |
|
|
| def locate(self, pt: cv2.typing.Point2f) -> tuple[int, int, int]: ... |
|
|
| def findNearest(self, pt: cv2.typing.Point2f) -> tuple[int, cv2.typing.Point2f]: ... |
|
|
| def getEdgeList(self) -> _typing.Sequence[cv2.typing.Vec4f]: ... |
|
|
| def getLeadingEdgeList(self) -> _typing.Sequence[int]: ... |
|
|
| def getTriangleList(self) -> _typing.Sequence[cv2.typing.Vec6f]: ... |
|
|
| def getVoronoiFacetList(self, idx: _typing.Sequence[int]) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point2f]], _typing.Sequence[cv2.typing.Point2f]]: ... |
|
|
| def getVertex(self, vertex: int) -> tuple[cv2.typing.Point2f, int]: ... |
|
|
| def getEdge(self, edge: int, nextEdgeType: int) -> int: ... |
|
|
| def nextEdge(self, edge: int) -> int: ... |
|
|
| def rotateEdge(self, edge: int, rotate: int) -> int: ... |
|
|
| def symEdge(self, edge: int) -> int: ... |
|
|
| def edgeOrg(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... |
|
|
| def edgeDst(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... |
|
|
|
|
| class LineSegmentDetector(Algorithm): |
| |
| @_typing.overload |
| def detect(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ..., width: cv2.typing.MatLike | None = ..., prec: cv2.typing.MatLike | None = ..., nfa: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detect(self, image: UMat, lines: UMat | None = ..., width: UMat | None = ..., prec: UMat | None = ..., nfa: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawSegments(self, image: UMat, lines: UMat) -> UMat: ... |
|
|
| @_typing.overload |
| def compareSegments(self, size: cv2.typing.Size, lines1: cv2.typing.MatLike, lines2: cv2.typing.MatLike, image: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def compareSegments(self, size: cv2.typing.Size, lines1: UMat, lines2: UMat, image: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
|
|
| class Tonemap(Algorithm): |
| |
| @_typing.overload |
| def process(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| def getGamma(self) -> float: ... |
|
|
| def setGamma(self, gamma: float) -> None: ... |
|
|
|
|
| class TonemapDrago(Tonemap): |
| |
| def getSaturation(self) -> float: ... |
|
|
| def setSaturation(self, saturation: float) -> None: ... |
|
|
| def getBias(self) -> float: ... |
|
|
| def setBias(self, bias: float) -> None: ... |
|
|
|
|
| class TonemapReinhard(Tonemap): |
| |
| def getIntensity(self) -> float: ... |
|
|
| def setIntensity(self, intensity: float) -> None: ... |
|
|
| def getLightAdaptation(self) -> float: ... |
|
|
| def setLightAdaptation(self, light_adapt: float) -> None: ... |
|
|
| def getColorAdaptation(self) -> float: ... |
|
|
| def setColorAdaptation(self, color_adapt: float) -> None: ... |
|
|
|
|
| class TonemapMantiuk(Tonemap): |
| |
| def getScale(self) -> float: ... |
|
|
| def setScale(self, scale: float) -> None: ... |
|
|
| def getSaturation(self) -> float: ... |
|
|
| def setSaturation(self, saturation: float) -> None: ... |
|
|
|
|
| class AlignExposures(Algorithm): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike], times: UMat, response: UMat) -> None: ... |
|
|
|
|
| class AlignMTB(AlignExposures): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike], times: UMat, response: UMat) -> None: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike]) -> None: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], dst: _typing.Sequence[cv2.typing.MatLike]) -> None: ... |
|
|
| @_typing.overload |
| def calculateShift(self, img0: cv2.typing.MatLike, img1: cv2.typing.MatLike) -> cv2.typing.Point: ... |
| @_typing.overload |
| def calculateShift(self, img0: UMat, img1: UMat) -> cv2.typing.Point: ... |
|
|
| @_typing.overload |
| def shiftMat(self, src: cv2.typing.MatLike, shift: cv2.typing.Point, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def shiftMat(self, src: UMat, shift: cv2.typing.Point, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def computeBitmaps(self, img: cv2.typing.MatLike, tb: cv2.typing.MatLike | None = ..., eb: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def computeBitmaps(self, img: UMat, tb: UMat | None = ..., eb: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| def getMaxBits(self) -> int: ... |
|
|
| def setMaxBits(self, max_bits: int) -> None: ... |
|
|
| def getExcludeRange(self) -> int: ... |
|
|
| def setExcludeRange(self, exclude_range: int) -> None: ... |
|
|
| def getCut(self) -> bool: ... |
|
|
| def setCut(self, value: bool) -> None: ... |
|
|
|
|
| class CalibrateCRF(Algorithm): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
|
|
| class CalibrateDebevec(CalibrateCRF): |
| |
| def getLambda(self) -> float: ... |
|
|
| def setLambda(self, lambda_: float) -> None: ... |
|
|
| def getSamples(self) -> int: ... |
|
|
| def setSamples(self, samples: int) -> None: ... |
|
|
| def getRandom(self) -> bool: ... |
|
|
| def setRandom(self, random: bool) -> None: ... |
|
|
|
|
| class CalibrateRobertson(CalibrateCRF): |
| |
| def getMaxIter(self) -> int: ... |
|
|
| def setMaxIter(self, max_iter: int) -> None: ... |
|
|
| def getThreshold(self) -> float: ... |
|
|
| def setThreshold(self, threshold: float) -> None: ... |
|
|
| def getRadiance(self) -> cv2.typing.MatLike: ... |
|
|
|
|
| class MergeExposures(Algorithm): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
|
|
| class MergeDebevec(MergeExposures): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
|
|
| class MergeMertens(MergeExposures): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... |
|
|
| def getContrastWeight(self) -> float: ... |
|
|
| def setContrastWeight(self, contrast_weiht: float) -> None: ... |
|
|
| def getSaturationWeight(self) -> float: ... |
|
|
| def setSaturationWeight(self, saturation_weight: float) -> None: ... |
|
|
| def getExposureWeight(self) -> float: ... |
|
|
| def setExposureWeight(self, exposure_weight: float) -> None: ... |
|
|
|
|
| class MergeRobertson(MergeExposures): |
| |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, response: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[cv2.typing.MatLike], times: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
|
|
| class Feature2D: |
| |
| @_typing.overload |
| def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> _typing.Sequence[KeyPoint]: ... |
| @_typing.overload |
| def detect(self, image: UMat, mask: UMat | None = ...) -> _typing.Sequence[KeyPoint]: ... |
| @_typing.overload |
| def detect(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[_typing.Sequence[KeyPoint]]: ... |
| @_typing.overload |
| def detect(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[_typing.Sequence[KeyPoint]]: ... |
|
|
| @_typing.overload |
| def compute(self, image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], descriptors: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[KeyPoint], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def compute(self, image: UMat, keypoints: _typing.Sequence[KeyPoint], descriptors: UMat | None = ...) -> tuple[_typing.Sequence[KeyPoint], UMat]: ... |
| @_typing.overload |
| def compute(self, images: _typing.Sequence[cv2.typing.MatLike], keypoints: _typing.Sequence[_typing.Sequence[KeyPoint]], descriptors: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[_typing.Sequence[KeyPoint]], _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def compute(self, images: _typing.Sequence[UMat], keypoints: _typing.Sequence[_typing.Sequence[KeyPoint]], descriptors: _typing.Sequence[UMat] | None = ...) -> tuple[_typing.Sequence[_typing.Sequence[KeyPoint]], _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def detectAndCompute(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike, descriptors: cv2.typing.MatLike | None = ..., useProvidedKeypoints: bool = ...) -> tuple[_typing.Sequence[KeyPoint], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detectAndCompute(self, image: UMat, mask: UMat, descriptors: UMat | None = ..., useProvidedKeypoints: bool = ...) -> tuple[_typing.Sequence[KeyPoint], UMat]: ... |
|
|
| def descriptorSize(self) -> int: ... |
|
|
| def descriptorType(self) -> int: ... |
|
|
| def defaultNorm(self) -> int: ... |
|
|
| @_typing.overload |
| def write(self, fileName: str) -> None: ... |
| @_typing.overload |
| def write(self, fs: FileStorage, name: str) -> None: ... |
|
|
| @_typing.overload |
| def read(self, fileName: str) -> None: ... |
| @_typing.overload |
| def read(self, arg1: FileNode) -> None: ... |
|
|
| def empty(self) -> bool: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class AffineFeature(Feature2D): |
| |
| @classmethod |
| def create(cls, backend: Feature2D, maxTilt: int = ..., minTilt: int = ..., tiltStep: float = ..., rotateStepBase: float = ...) -> AffineFeature: ... |
|
|
| def setViewParams(self, tilts: _typing.Sequence[float], rolls: _typing.Sequence[float]) -> None: ... |
|
|
| def getViewParams(self, tilts: _typing.Sequence[float], rolls: _typing.Sequence[float]) -> None: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class SIFT(Feature2D): |
| |
| @classmethod |
| @_typing.overload |
| def create(cls, nfeatures: int = ..., nOctaveLayers: int = ..., contrastThreshold: float = ..., edgeThreshold: float = ..., sigma: float = ..., enable_precise_upscale: bool = ...) -> SIFT: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, nfeatures: int, nOctaveLayers: int, contrastThreshold: float, edgeThreshold: float, sigma: float, descriptorType: int, enable_precise_upscale: bool = ...) -> SIFT: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
| def setNFeatures(self, maxFeatures: int) -> None: ... |
|
|
| def getNFeatures(self) -> int: ... |
|
|
| def setNOctaveLayers(self, nOctaveLayers: int) -> None: ... |
|
|
| def getNOctaveLayers(self) -> int: ... |
|
|
| def setContrastThreshold(self, contrastThreshold: float) -> None: ... |
|
|
| def getContrastThreshold(self) -> float: ... |
|
|
| def setEdgeThreshold(self, edgeThreshold: float) -> None: ... |
|
|
| def getEdgeThreshold(self) -> float: ... |
|
|
| def setSigma(self, sigma: float) -> None: ... |
|
|
| def getSigma(self) -> float: ... |
|
|
|
|
| class BRISK(Feature2D): |
| |
| @classmethod |
| @_typing.overload |
| def create(cls, thresh: int = ..., octaves: int = ..., patternScale: float = ...) -> BRISK: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, radiusList: _typing.Sequence[float], numberList: _typing.Sequence[int], dMax: float = ..., dMin: float = ..., indexChange: _typing.Sequence[int] = ...) -> BRISK: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, thresh: int, octaves: int, radiusList: _typing.Sequence[float], numberList: _typing.Sequence[int], dMax: float = ..., dMin: float = ..., indexChange: _typing.Sequence[int] = ...) -> BRISK: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
| def setThreshold(self, threshold: int) -> None: ... |
|
|
| def getThreshold(self) -> int: ... |
|
|
| def setOctaves(self, octaves: int) -> None: ... |
|
|
| def getOctaves(self) -> int: ... |
|
|
| def setPatternScale(self, patternScale: float) -> None: ... |
|
|
| def getPatternScale(self) -> float: ... |
|
|
|
|
| class ORB(Feature2D): |
| |
| @classmethod |
| def create(cls, nfeatures: int = ..., scaleFactor: float = ..., nlevels: int = ..., edgeThreshold: int = ..., firstLevel: int = ..., WTA_K: int = ..., scoreType: ORB_ScoreType = ..., patchSize: int = ..., fastThreshold: int = ...) -> ORB: ... |
|
|
| def setMaxFeatures(self, maxFeatures: int) -> None: ... |
|
|
| def getMaxFeatures(self) -> int: ... |
|
|
| def setScaleFactor(self, scaleFactor: float) -> None: ... |
|
|
| def getScaleFactor(self) -> float: ... |
|
|
| def setNLevels(self, nlevels: int) -> None: ... |
|
|
| def getNLevels(self) -> int: ... |
|
|
| def setEdgeThreshold(self, edgeThreshold: int) -> None: ... |
|
|
| def getEdgeThreshold(self) -> int: ... |
|
|
| def setFirstLevel(self, firstLevel: int) -> None: ... |
|
|
| def getFirstLevel(self) -> int: ... |
|
|
| def setWTA_K(self, wta_k: int) -> None: ... |
|
|
| def getWTA_K(self) -> int: ... |
|
|
| def setScoreType(self, scoreType: ORB_ScoreType) -> None: ... |
|
|
| def getScoreType(self) -> ORB_ScoreType: ... |
|
|
| def setPatchSize(self, patchSize: int) -> None: ... |
|
|
| def getPatchSize(self) -> int: ... |
|
|
| def setFastThreshold(self, fastThreshold: int) -> None: ... |
|
|
| def getFastThreshold(self) -> int: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class MSER(Feature2D): |
| |
| @classmethod |
| def create(cls, delta: int = ..., min_area: int = ..., max_area: int = ..., max_variation: float = ..., min_diversity: float = ..., max_evolution: int = ..., area_threshold: float = ..., min_margin: float = ..., edge_blur_size: int = ...) -> MSER: ... |
|
|
| @_typing.overload |
| def detectRegions(self, image: cv2.typing.MatLike) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[cv2.typing.Rect]]: ... |
| @_typing.overload |
| def detectRegions(self, image: UMat) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[cv2.typing.Rect]]: ... |
|
|
| def setDelta(self, delta: int) -> None: ... |
|
|
| def getDelta(self) -> int: ... |
|
|
| def setMinArea(self, minArea: int) -> None: ... |
|
|
| def getMinArea(self) -> int: ... |
|
|
| def setMaxArea(self, maxArea: int) -> None: ... |
|
|
| def getMaxArea(self) -> int: ... |
|
|
| def setMaxVariation(self, maxVariation: float) -> None: ... |
|
|
| def getMaxVariation(self) -> float: ... |
|
|
| def setMinDiversity(self, minDiversity: float) -> None: ... |
|
|
| def getMinDiversity(self) -> float: ... |
|
|
| def setMaxEvolution(self, maxEvolution: int) -> None: ... |
|
|
| def getMaxEvolution(self) -> int: ... |
|
|
| def setAreaThreshold(self, areaThreshold: float) -> None: ... |
|
|
| def getAreaThreshold(self) -> float: ... |
|
|
| def setMinMargin(self, min_margin: float) -> None: ... |
|
|
| def getMinMargin(self) -> float: ... |
|
|
| def setEdgeBlurSize(self, edge_blur_size: int) -> None: ... |
|
|
| def getEdgeBlurSize(self) -> int: ... |
|
|
| def setPass2Only(self, f: bool) -> None: ... |
|
|
| def getPass2Only(self) -> bool: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class FastFeatureDetector(Feature2D): |
| |
| @classmethod |
| def create(cls, threshold: int = ..., nonmaxSuppression: bool = ..., type: FastFeatureDetector_DetectorType = ...) -> FastFeatureDetector: ... |
|
|
| def setThreshold(self, threshold: int) -> None: ... |
|
|
| def getThreshold(self) -> int: ... |
|
|
| def setNonmaxSuppression(self, f: bool) -> None: ... |
|
|
| def getNonmaxSuppression(self) -> bool: ... |
|
|
| def setType(self, type: FastFeatureDetector_DetectorType) -> None: ... |
|
|
| def getType(self) -> FastFeatureDetector_DetectorType: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class AgastFeatureDetector(Feature2D): |
| |
| @classmethod |
| def create(cls, threshold: int = ..., nonmaxSuppression: bool = ..., type: AgastFeatureDetector_DetectorType = ...) -> AgastFeatureDetector: ... |
|
|
| def setThreshold(self, threshold: int) -> None: ... |
|
|
| def getThreshold(self) -> int: ... |
|
|
| def setNonmaxSuppression(self, f: bool) -> None: ... |
|
|
| def getNonmaxSuppression(self) -> bool: ... |
|
|
| def setType(self, type: AgastFeatureDetector_DetectorType) -> None: ... |
|
|
| def getType(self) -> AgastFeatureDetector_DetectorType: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class GFTTDetector(Feature2D): |
| |
| @classmethod |
| @_typing.overload |
| def create(cls, maxCorners: int = ..., qualityLevel: float = ..., minDistance: float = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> GFTTDetector: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, maxCorners: int, qualityLevel: float, minDistance: float, blockSize: int, gradiantSize: int, useHarrisDetector: bool = ..., k: float = ...) -> GFTTDetector: ... |
|
|
| def setMaxFeatures(self, maxFeatures: int) -> None: ... |
|
|
| def getMaxFeatures(self) -> int: ... |
|
|
| def setQualityLevel(self, qlevel: float) -> None: ... |
|
|
| def getQualityLevel(self) -> float: ... |
|
|
| def setMinDistance(self, minDistance: float) -> None: ... |
|
|
| def getMinDistance(self) -> float: ... |
|
|
| def setBlockSize(self, blockSize: int) -> None: ... |
|
|
| def getBlockSize(self) -> int: ... |
|
|
| def setGradientSize(self, gradientSize_: int) -> None: ... |
|
|
| def getGradientSize(self) -> int: ... |
|
|
| def setHarrisDetector(self, val: bool) -> None: ... |
|
|
| def getHarrisDetector(self) -> bool: ... |
|
|
| def setK(self, k: float) -> None: ... |
|
|
| def getK(self) -> float: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class SimpleBlobDetector(Feature2D): |
| |
| class Params: |
| thresholdStep: float |
| minThreshold: float |
| maxThreshold: float |
| minRepeatability: int |
| minDistBetweenBlobs: float |
| filterByColor: bool |
| blobColor: int |
| filterByArea: bool |
| minArea: float |
| maxArea: float |
| filterByCircularity: bool |
| minCircularity: float |
| maxCircularity: float |
| filterByInertia: bool |
| minInertiaRatio: float |
| maxInertiaRatio: float |
| filterByConvexity: bool |
| minConvexity: float |
| maxConvexity: float |
| collectContours: bool |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: SimpleBlobDetector.Params = ...) -> SimpleBlobDetector: ... |
|
|
| def setParams(self, params: SimpleBlobDetector.Params) -> None: ... |
|
|
| def getParams(self) -> SimpleBlobDetector.Params: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
| def getBlobContours(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ... |
|
|
|
|
| class KAZE(Feature2D): |
| |
| @classmethod |
| def create(cls, extended: bool = ..., upright: bool = ..., threshold: float = ..., nOctaves: int = ..., nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ...) -> KAZE: ... |
|
|
| def setExtended(self, extended: bool) -> None: ... |
|
|
| def getExtended(self) -> bool: ... |
|
|
| def setUpright(self, upright: bool) -> None: ... |
|
|
| def getUpright(self) -> bool: ... |
|
|
| def setThreshold(self, threshold: float) -> None: ... |
|
|
| def getThreshold(self) -> float: ... |
|
|
| def setNOctaves(self, octaves: int) -> None: ... |
|
|
| def getNOctaves(self) -> int: ... |
|
|
| def setNOctaveLayers(self, octaveLayers: int) -> None: ... |
|
|
| def getNOctaveLayers(self) -> int: ... |
|
|
| def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... |
|
|
| def getDiffusivity(self) -> KAZE_DiffusivityType: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
|
|
| class AKAZE(Feature2D): |
| |
| @classmethod |
| def create(cls, descriptor_type: AKAZE_DescriptorType = ..., descriptor_size: int = ..., descriptor_channels: int = ..., threshold: float = ..., nOctaves: int = ..., nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ..., max_points: int = ...) -> AKAZE: ... |
|
|
| def setDescriptorType(self, dtype: AKAZE_DescriptorType) -> None: ... |
|
|
| def getDescriptorType(self) -> AKAZE_DescriptorType: ... |
|
|
| def setDescriptorSize(self, dsize: int) -> None: ... |
|
|
| def getDescriptorSize(self) -> int: ... |
|
|
| def setDescriptorChannels(self, dch: int) -> None: ... |
|
|
| def getDescriptorChannels(self) -> int: ... |
|
|
| def setThreshold(self, threshold: float) -> None: ... |
|
|
| def getThreshold(self) -> float: ... |
|
|
| def setNOctaves(self, octaves: int) -> None: ... |
|
|
| def getNOctaves(self) -> int: ... |
|
|
| def setNOctaveLayers(self, octaveLayers: int) -> None: ... |
|
|
| def getNOctaveLayers(self) -> int: ... |
|
|
| def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... |
|
|
| def getDiffusivity(self) -> KAZE_DiffusivityType: ... |
|
|
| def getDefaultName(self) -> str: ... |
|
|
| def setMaxPoints(self, max_points: int) -> None: ... |
|
|
| def getMaxPoints(self) -> int: ... |
|
|
|
|
| class DescriptorMatcher(Algorithm): |
| |
| @_typing.overload |
| def add(self, descriptors: _typing.Sequence[cv2.typing.MatLike]) -> None: ... |
| @_typing.overload |
| def add(self, descriptors: _typing.Sequence[UMat]) -> None: ... |
|
|
| def getTrainDescriptors(self) -> _typing.Sequence[cv2.typing.MatLike]: ... |
|
|
| def clear(self) -> None: ... |
|
|
| def empty(self) -> bool: ... |
|
|
| def isMaskSupported(self) -> bool: ... |
|
|
| def train(self) -> None: ... |
|
|
| @_typing.overload |
| def match(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> _typing.Sequence[DMatch]: ... |
| @_typing.overload |
| def match(self, queryDescriptors: UMat, trainDescriptors: UMat, mask: UMat | None = ...) -> _typing.Sequence[DMatch]: ... |
| @_typing.overload |
| def match(self, queryDescriptors: cv2.typing.MatLike, masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[DMatch]: ... |
| @_typing.overload |
| def match(self, queryDescriptors: UMat, masks: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[DMatch]: ... |
|
|
| @_typing.overload |
| def knnMatch(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, k: int, mask: cv2.typing.MatLike | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def knnMatch(self, queryDescriptors: UMat, trainDescriptors: UMat, k: int, mask: UMat | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def knnMatch(self, queryDescriptors: cv2.typing.MatLike, k: int, masks: _typing.Sequence[cv2.typing.MatLike] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def knnMatch(self, queryDescriptors: UMat, k: int, masks: _typing.Sequence[UMat] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
|
|
| @_typing.overload |
| def radiusMatch(self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, maxDistance: float, mask: cv2.typing.MatLike | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def radiusMatch(self, queryDescriptors: UMat, trainDescriptors: UMat, maxDistance: float, mask: UMat | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def radiusMatch(self, queryDescriptors: cv2.typing.MatLike, maxDistance: float, masks: _typing.Sequence[cv2.typing.MatLike] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
| @_typing.overload |
| def radiusMatch(self, queryDescriptors: UMat, maxDistance: float, masks: _typing.Sequence[UMat] | None = ..., compactResult: bool = ...) -> _typing.Sequence[_typing.Sequence[DMatch]]: ... |
|
|
| @_typing.overload |
| def write(self, fileName: str) -> None: ... |
| @_typing.overload |
| def write(self, fs: FileStorage, name: str) -> None: ... |
|
|
| @_typing.overload |
| def read(self, fileName: str) -> None: ... |
| @_typing.overload |
| def read(self, arg1: FileNode) -> None: ... |
|
|
| def clone(self, emptyTrainData: bool = ...) -> DescriptorMatcher: ... |
|
|
| @classmethod |
| @_typing.overload |
| def create(cls, descriptorMatcherType: str) -> DescriptorMatcher: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, matcherType: DescriptorMatcher_MatcherType) -> DescriptorMatcher: ... |
|
|
|
|
| class BFMatcher(DescriptorMatcher): |
| |
| def __init__(self, normType: int = ..., crossCheck: bool = ...) -> None: ... |
|
|
| @classmethod |
| def create(cls, normType: int = ..., crossCheck: bool = ...) -> BFMatcher: ... |
|
|
|
|
| class FlannBasedMatcher(DescriptorMatcher): |
| |
| def __init__(self, indexParams: cv2.typing.IndexParams = ..., searchParams: cv2.typing.SearchParams = ...) -> None: ... |
|
|
| @classmethod |
| def create(cls) -> FlannBasedMatcher: ... |
|
|
|
|
| class BOWTrainer: |
| |
| def add(self, descriptors: cv2.typing.MatLike) -> None: ... |
|
|
| def getDescriptors(self) -> _typing.Sequence[cv2.typing.MatLike]: ... |
|
|
| def descriptorsCount(self) -> int: ... |
|
|
| def clear(self) -> None: ... |
|
|
| @_typing.overload |
| def cluster(self) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
|
|
|
|
| class BOWKMeansTrainer(BOWTrainer): |
| |
| def __init__(self, clusterCount: int, termcrit: cv2.typing.TermCriteria = ..., attempts: int = ..., flags: int = ...) -> None: ... |
|
|
| @_typing.overload |
| def cluster(self) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
|
|
|
|
| class BOWImgDescriptorExtractor: |
| |
| def __init__(self, dextractor: Feature2D, dmatcher: DescriptorMatcher) -> None: ... |
|
|
| def setVocabulary(self, vocabulary: cv2.typing.MatLike) -> None: ... |
|
|
| def getVocabulary(self) -> cv2.typing.MatLike: ... |
|
|
| def compute(self, image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], imgDescriptor: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
|
|
| def descriptorSize(self) -> int: ... |
|
|
| def descriptorType(self) -> int: ... |
|
|
|
|
| class Animation: |
| loop_count: int |
| bgcolor: cv2.typing.Scalar |
| durations: _typing.Sequence[int] |
| frames: _typing.Sequence[cv2.typing.MatLike] |
|
|
| class IStreamReader: |
| ... |
|
|
| class VideoCapture: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, apiPreference: int = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, apiPreference: int, params: _typing.Sequence[int]) -> None: ... |
| @_typing.overload |
| def __init__(self, index: int, apiPreference: int = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, index: int, apiPreference: int, params: _typing.Sequence[int]) -> None: ... |
| @_typing.overload |
| def __init__(self, source: IStreamReader, apiPreference: int, params: _typing.Sequence[int]) -> None: ... |
|
|
| @_typing.overload |
| def open(self, filename: str, apiPreference: int = ...) -> bool: ... |
| @_typing.overload |
| def open(self, filename: str, apiPreference: int, params: _typing.Sequence[int]) -> bool: ... |
| @_typing.overload |
| def open(self, index: int, apiPreference: int = ...) -> bool: ... |
| @_typing.overload |
| def open(self, index: int, apiPreference: int, params: _typing.Sequence[int]) -> bool: ... |
| @_typing.overload |
| def open(self, source: IStreamReader, apiPreference: int, params: _typing.Sequence[int]) -> bool: ... |
|
|
| def isOpened(self) -> bool: ... |
|
|
| def release(self) -> None: ... |
|
|
| def grab(self) -> bool: ... |
|
|
| @_typing.overload |
| def retrieve(self, image: cv2.typing.MatLike | None = ..., flag: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def retrieve(self, image: UMat | None = ..., flag: int = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def read(self, image: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def read(self, image: UMat | None = ...) -> tuple[bool, UMat]: ... |
|
|
| def set(self, propId: int, value: float) -> bool: ... |
|
|
| def get(self, propId: int) -> float: ... |
|
|
| def getBackendName(self) -> str: ... |
|
|
| def setExceptionMode(self, enable: bool) -> None: ... |
|
|
| def getExceptionMode(self) -> bool: ... |
|
|
| @staticmethod |
| def waitAny(streams: _typing.Sequence[VideoCapture], timeoutNs: int = ...) -> tuple[bool, _typing.Sequence[int]]: ... |
|
|
|
|
| class VideoWriter: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> None: ... |
|
|
| @_typing.overload |
| def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... |
| @_typing.overload |
| def open(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... |
| @_typing.overload |
| def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> bool: ... |
| @_typing.overload |
| def open(self, filename: str, apiPreference: int, fourcc: int, fps: float, frameSize: cv2.typing.Size, params: _typing.Sequence[int]) -> bool: ... |
|
|
| def isOpened(self) -> bool: ... |
|
|
| def release(self) -> None: ... |
|
|
| @_typing.overload |
| def write(self, image: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def write(self, image: UMat) -> None: ... |
|
|
| def set(self, propId: int, value: float) -> bool: ... |
|
|
| def get(self, propId: int) -> float: ... |
|
|
| @staticmethod |
| def fourcc(c1: str, c2: str, c3: str, c4: str) -> int: ... |
|
|
| def getBackendName(self) -> str: ... |
|
|
|
|
| class UsacParams: |
| confidence: float |
| isParallel: bool |
| loIterations: int |
| loMethod: LocalOptimMethod |
| loSampleSize: int |
| maxIterations: int |
| neighborsSearch: NeighborSearchMethod |
| randomGeneratorState: int |
| sampler: SamplingMethod |
| score: ScoreMethod |
| threshold: float |
| final_polisher: PolishingMethod |
| final_polisher_iterations: int |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
| class CirclesGridFinderParameters: |
| densityNeighborhoodSize: cv2.typing.Size2f |
| minDensity: float |
| kmeansAttempts: int |
| minDistanceToAddKeypoint: int |
| keypointScale: int |
| minGraphConfidence: float |
| vertexGain: float |
| vertexPenalty: float |
| existingVertexGain: float |
| edgeGain: float |
| edgePenalty: float |
| convexHullFactor: float |
| minRNGEdgeSwitchDist: float |
| squareSize: float |
| maxRectifiedDistance: float |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
| class StereoMatcher(Algorithm): |
| |
| @_typing.overload |
| def compute(self, left: cv2.typing.MatLike, right: cv2.typing.MatLike, disparity: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def compute(self, left: UMat, right: UMat, disparity: UMat | None = ...) -> UMat: ... |
|
|
| def getMinDisparity(self) -> int: ... |
|
|
| def setMinDisparity(self, minDisparity: int) -> None: ... |
|
|
| def getNumDisparities(self) -> int: ... |
|
|
| def setNumDisparities(self, numDisparities: int) -> None: ... |
|
|
| def getBlockSize(self) -> int: ... |
|
|
| def setBlockSize(self, blockSize: int) -> None: ... |
|
|
| def getSpeckleWindowSize(self) -> int: ... |
|
|
| def setSpeckleWindowSize(self, speckleWindowSize: int) -> None: ... |
|
|
| def getSpeckleRange(self) -> int: ... |
|
|
| def setSpeckleRange(self, speckleRange: int) -> None: ... |
|
|
| def getDisp12MaxDiff(self) -> int: ... |
|
|
| def setDisp12MaxDiff(self, disp12MaxDiff: int) -> None: ... |
|
|
|
|
| class StereoBM(StereoMatcher): |
| |
| def getPreFilterType(self) -> int: ... |
|
|
| def setPreFilterType(self, preFilterType: int) -> None: ... |
|
|
| def getPreFilterSize(self) -> int: ... |
|
|
| def setPreFilterSize(self, preFilterSize: int) -> None: ... |
|
|
| def getPreFilterCap(self) -> int: ... |
|
|
| def setPreFilterCap(self, preFilterCap: int) -> None: ... |
|
|
| def getTextureThreshold(self) -> int: ... |
|
|
| def setTextureThreshold(self, textureThreshold: int) -> None: ... |
|
|
| def getUniquenessRatio(self) -> int: ... |
|
|
| def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... |
|
|
| def getSmallerBlockSize(self) -> int: ... |
|
|
| def setSmallerBlockSize(self, blockSize: int) -> None: ... |
|
|
| def getROI1(self) -> cv2.typing.Rect: ... |
|
|
| def setROI1(self, roi1: cv2.typing.Rect) -> None: ... |
|
|
| def getROI2(self) -> cv2.typing.Rect: ... |
|
|
| def setROI2(self, roi2: cv2.typing.Rect) -> None: ... |
|
|
| @classmethod |
| def create(cls, numDisparities: int = ..., blockSize: int = ...) -> StereoBM: ... |
|
|
|
|
| class StereoSGBM(StereoMatcher): |
| |
| def getPreFilterCap(self) -> int: ... |
|
|
| def setPreFilterCap(self, preFilterCap: int) -> None: ... |
|
|
| def getUniquenessRatio(self) -> int: ... |
|
|
| def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... |
|
|
| def getP1(self) -> int: ... |
|
|
| def setP1(self, P1: int) -> None: ... |
|
|
| def getP2(self) -> int: ... |
|
|
| def setP2(self, P2: int) -> None: ... |
|
|
| def getMode(self) -> int: ... |
|
|
| def setMode(self, mode: int) -> None: ... |
|
|
| @classmethod |
| def create(cls, minDisparity: int = ..., numDisparities: int = ..., blockSize: int = ..., P1: int = ..., P2: int = ..., disp12MaxDiff: int = ..., preFilterCap: int = ..., uniquenessRatio: int = ..., speckleWindowSize: int = ..., speckleRange: int = ..., mode: int = ...) -> StereoSGBM: ... |
|
|
|
|
| class BaseCascadeClassifier(Algorithm): |
| ... |
|
|
| class CascadeClassifier: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str) -> None: ... |
|
|
| def empty(self) -> bool: ... |
|
|
| def load(self, filename: str) -> bool: ... |
|
|
| def read(self, node: FileNode) -> bool: ... |
|
|
| @_typing.overload |
| def detectMultiScale(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> _typing.Sequence[cv2.typing.Rect]: ... |
| @_typing.overload |
| def detectMultiScale(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> _typing.Sequence[cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def detectMultiScale2(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... |
| @_typing.overload |
| def detectMultiScale2(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... |
|
|
| @_typing.overload |
| def detectMultiScale3(self, image: cv2.typing.MatLike, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ..., outputRejectLevels: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int], _typing.Sequence[float]]: ... |
| @_typing.overload |
| def detectMultiScale3(self, image: UMat, scaleFactor: float = ..., minNeighbors: int = ..., flags: int = ..., minSize: cv2.typing.Size = ..., maxSize: cv2.typing.Size = ..., outputRejectLevels: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int], _typing.Sequence[float]]: ... |
|
|
| def isOldFormatCascade(self) -> bool: ... |
|
|
| def getOriginalWindowSize(self) -> cv2.typing.Size: ... |
|
|
| def getFeatureType(self) -> int: ... |
|
|
| @staticmethod |
| def convert(oldcascade: str, newcascade: str) -> bool: ... |
|
|
|
|
| class HOGDescriptor: |
| @property |
| def winSize(self) -> cv2.typing.Size: ... |
| @property |
| def blockSize(self) -> cv2.typing.Size: ... |
| @property |
| def blockStride(self) -> cv2.typing.Size: ... |
| @property |
| def cellSize(self) -> cv2.typing.Size: ... |
| @property |
| def nbins(self) -> int: ... |
| @property |
| def derivAperture(self) -> int: ... |
| @property |
| def winSigma(self) -> float: ... |
| @property |
| def histogramNormType(self) -> HOGDescriptor_HistogramNormType: ... |
| @property |
| def L2HysThreshold(self) -> float: ... |
| @property |
| def gammaCorrection(self) -> bool: ... |
| @property |
| def svmDetector(self) -> _typing.Sequence[float]: ... |
| @property |
| def nlevels(self) -> int: ... |
| @property |
| def signedGradient(self) -> bool: ... |
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, _winSize: cv2.typing.Size, _blockSize: cv2.typing.Size, _blockStride: cv2.typing.Size, _cellSize: cv2.typing.Size, _nbins: int, _derivAperture: int = ..., _winSigma: float = ..., _histogramNormType: HOGDescriptor_HistogramNormType = ..., _L2HysThreshold: float = ..., _gammaCorrection: bool = ..., _nlevels: int = ..., _signedGradient: bool = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, filename: str) -> None: ... |
|
|
| def getDescriptorSize(self) -> int: ... |
|
|
| def checkDetectorSize(self) -> bool: ... |
|
|
| def getWinSigma(self) -> float: ... |
|
|
| @_typing.overload |
| def setSVMDetector(self, svmdetector: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def setSVMDetector(self, svmdetector: UMat) -> None: ... |
|
|
| def load(self, filename: str, objname: str = ...) -> bool: ... |
|
|
| def save(self, filename: str, objname: str = ...) -> None: ... |
|
|
| @_typing.overload |
| def compute(self, img: cv2.typing.MatLike, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., locations: _typing.Sequence[cv2.typing.Point] = ...) -> _typing.Sequence[float]: ... |
| @_typing.overload |
| def compute(self, img: UMat, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., locations: _typing.Sequence[cv2.typing.Point] = ...) -> _typing.Sequence[float]: ... |
|
|
| @_typing.overload |
| def detect(self, img: cv2.typing.MatLike, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., searchLocations: _typing.Sequence[cv2.typing.Point] = ...) -> tuple[_typing.Sequence[cv2.typing.Point], _typing.Sequence[float]]: ... |
| @_typing.overload |
| def detect(self, img: UMat, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., searchLocations: _typing.Sequence[cv2.typing.Point] = ...) -> tuple[_typing.Sequence[cv2.typing.Point], _typing.Sequence[float]]: ... |
|
|
| @_typing.overload |
| def detectMultiScale(self, img: cv2.typing.MatLike, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., scale: float = ..., groupThreshold: float = ..., useMeanshiftGrouping: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ... |
| @_typing.overload |
| def detectMultiScale(self, img: UMat, hitThreshold: float = ..., winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., scale: float = ..., groupThreshold: float = ..., useMeanshiftGrouping: bool = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ... |
|
|
| @_typing.overload |
| def computeGradient(self, img: cv2.typing.MatLike, grad: cv2.typing.MatLike, angleOfs: cv2.typing.MatLike, paddingTL: cv2.typing.Size = ..., paddingBR: cv2.typing.Size = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def computeGradient(self, img: UMat, grad: UMat, angleOfs: UMat, paddingTL: cv2.typing.Size = ..., paddingBR: cv2.typing.Size = ...) -> tuple[UMat, UMat]: ... |
|
|
| @staticmethod |
| def getDefaultPeopleDetector() -> _typing.Sequence[float]: ... |
|
|
| @staticmethod |
| def getDaimlerPeopleDetector() -> _typing.Sequence[float]: ... |
|
|
|
|
| class QRCodeEncoder: |
| |
| class Params: |
| version: int |
| correction_level: QRCodeEncoder_CorrectionLevel |
| mode: QRCodeEncoder_EncodeMode |
| structure_number: int |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: QRCodeEncoder.Params = ...) -> QRCodeEncoder: ... |
|
|
| @_typing.overload |
| def encode(self, encoded_info: str, qrcode: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def encode(self, encoded_info: str, qrcode: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def encodeStructuredAppend(self, encoded_info: str, qrcodes: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... |
| @_typing.overload |
| def encodeStructuredAppend(self, encoded_info: str, qrcodes: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... |
|
|
|
|
| class QRCodeDetector(GraphicalCodeDetector): |
| |
| def __init__(self) -> None: ... |
|
|
| def setEpsX(self, epsX: float) -> QRCodeDetector: ... |
|
|
| def setEpsY(self, epsY: float) -> QRCodeDetector: ... |
|
|
| def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> QRCodeDetector: ... |
|
|
| @_typing.overload |
| def decodeCurved(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_qrcode: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... |
|
|
| @_typing.overload |
| def detectAndDecodeCurved(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_qrcode: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detectAndDecodeCurved(self, img: UMat, points: UMat | None = ..., straight_qrcode: UMat | None = ...) -> tuple[str, UMat, UMat]: ... |
|
|
|
|
| class GraphicalCodeDetector: |
| |
| @_typing.overload |
| def detect(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detect(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def decode(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def decode(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[str, UMat]: ... |
|
|
| @_typing.overload |
| def detectAndDecode(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: cv2.typing.MatLike | None = ...) -> tuple[str, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detectAndDecode(self, img: UMat, points: UMat | None = ..., straight_code: UMat | None = ...) -> tuple[str, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def detectMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detectMulti(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def decodeMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def decodeMulti(self, img: UMat, points: UMat, straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def detectAndDecodeMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[str], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def detectAndDecodeMulti(self, img: UMat, points: UMat | None = ..., straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[str], UMat, _typing.Sequence[UMat]]: ... |
|
|
|
|
| class QRCodeDetectorAruco(GraphicalCodeDetector): |
| |
| class Params: |
| minModuleSizeInPyramid: float |
| maxRotation: float |
| maxModuleSizeMismatch: float |
| maxTimingPatternMismatch: float |
| maxPenalties: float |
| maxColorsMismatch: float |
| scaleTimingPatternScore: float |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, params: QRCodeDetectorAruco.Params) -> None: ... |
|
|
| def getDetectorParameters(self) -> QRCodeDetectorAruco.Params: ... |
|
|
| def setDetectorParameters(self, params: QRCodeDetectorAruco.Params) -> QRCodeDetectorAruco: ... |
|
|
| def getArucoParameters(self) -> cv2.aruco.DetectorParameters: ... |
|
|
| def setArucoParameters(self, params: cv2.aruco.DetectorParameters) -> None: ... |
|
|
|
|
| class FaceDetectorYN: |
| |
| def setInputSize(self, input_size: cv2.typing.Size) -> None: ... |
|
|
| def getInputSize(self) -> cv2.typing.Size: ... |
|
|
| def setScoreThreshold(self, score_threshold: float) -> None: ... |
|
|
| def getScoreThreshold(self) -> float: ... |
|
|
| def setNMSThreshold(self, nms_threshold: float) -> None: ... |
|
|
| def getNMSThreshold(self) -> float: ... |
|
|
| def setTopK(self, top_k: int) -> None: ... |
|
|
| def getTopK(self) -> int: ... |
|
|
| @_typing.overload |
| def detect(self, image: cv2.typing.MatLike, faces: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def detect(self, image: UMat, faces: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
| @classmethod |
| @_typing.overload |
| def create(cls, model: str, config: str, input_size: cv2.typing.Size, score_threshold: float = ..., nms_threshold: float = ..., top_k: int = ..., backend_id: int = ..., target_id: int = ...) -> FaceDetectorYN: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], input_size: cv2.typing.Size, score_threshold: float = ..., nms_threshold: float = ..., top_k: int = ..., backend_id: int = ..., target_id: int = ...) -> FaceDetectorYN: ... |
|
|
|
|
| class FaceRecognizerSF: |
| |
| @_typing.overload |
| def alignCrop(self, src_img: cv2.typing.MatLike, face_box: cv2.typing.MatLike, aligned_img: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def alignCrop(self, src_img: UMat, face_box: UMat, aligned_img: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def feature(self, aligned_img: cv2.typing.MatLike, face_feature: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def feature(self, aligned_img: UMat, face_feature: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def match(self, face_feature1: cv2.typing.MatLike, face_feature2: cv2.typing.MatLike, dis_type: int = ...) -> float: ... |
| @_typing.overload |
| def match(self, face_feature1: UMat, face_feature2: UMat, dis_type: int = ...) -> float: ... |
|
|
| @classmethod |
| @_typing.overload |
| def create(cls, model: str, config: str, backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ... |
| @classmethod |
| @_typing.overload |
| def create(cls, framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ... |
|
|
|
|
| class Stitcher: |
| |
| @classmethod |
| def create(cls, mode: Stitcher_Mode = ...) -> Stitcher: ... |
|
|
| def registrationResol(self) -> float: ... |
|
|
| def setRegistrationResol(self, resol_mpx: float) -> None: ... |
|
|
| def seamEstimationResol(self) -> float: ... |
|
|
| def setSeamEstimationResol(self, resol_mpx: float) -> None: ... |
|
|
| def compositingResol(self) -> float: ... |
|
|
| def setCompositingResol(self, resol_mpx: float) -> None: ... |
|
|
| def panoConfidenceThresh(self) -> float: ... |
|
|
| def setPanoConfidenceThresh(self, conf_thresh: float) -> None: ... |
|
|
| def waveCorrection(self) -> bool: ... |
|
|
| def setWaveCorrection(self, flag: bool) -> None: ... |
|
|
| def interpolationFlags(self) -> InterpolationFlags: ... |
|
|
| def setInterpolationFlags(self, interp_flags: InterpolationFlags) -> None: ... |
|
|
| @_typing.overload |
| def estimateTransform(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> Stitcher_Status: ... |
| @_typing.overload |
| def estimateTransform(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat] | None = ...) -> Stitcher_Status: ... |
|
|
| @_typing.overload |
| def composePanorama(self, pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def composePanorama(self, pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... |
| @_typing.overload |
| def composePanorama(self, images: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def composePanorama(self, images: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... |
|
|
| @_typing.overload |
| def stitch(self, images: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stitch(self, images: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... |
| @_typing.overload |
| def stitch(self, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike], pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stitch(self, images: _typing.Sequence[UMat], masks: _typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... |
|
|
| def component(self) -> _typing.Sequence[int]: ... |
|
|
| def cameras(self) -> _typing.Sequence[cv2.detail.CameraParams]: ... |
|
|
| def workScale(self) -> float: ... |
|
|
|
|
| class PyRotationWarper: |
| |
| @_typing.overload |
| def __init__(self, type: str, scale: float) -> None: ... |
| @_typing.overload |
| def __init__(self) -> None: ... |
|
|
| @_typing.overload |
| def warpPoint(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... |
| @_typing.overload |
| def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... |
|
|
| @_typing.overload |
| def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... |
| @_typing.overload |
| def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... |
| @_typing.overload |
| def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... |
| @_typing.overload |
| def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... |
|
|
| @_typing.overload |
| def buildMaps(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike, xmap: cv2.typing.MatLike | None = ..., ymap: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Rect, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def buildMaps(self, src_size: cv2.typing.Size, K: UMat, R: UMat, xmap: UMat | None = ..., ymap: UMat | None = ...) -> tuple[cv2.typing.Rect, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def warp(self, src: cv2.typing.MatLike, K: cv2.typing.MatLike, R: cv2.typing.MatLike, interp_mode: int, border_mode: int, dst: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Point, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def warp(self, src: UMat, K: UMat, R: UMat, interp_mode: int, border_mode: int, dst: UMat | None = ...) -> tuple[cv2.typing.Point, UMat]: ... |
|
|
| @_typing.overload |
| def warpBackward(self, src: cv2.typing.MatLike, K: cv2.typing.MatLike, R: cv2.typing.MatLike, interp_mode: int, border_mode: int, dst_size: cv2.typing.Size, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def warpBackward(self, src: UMat, K: UMat, R: UMat, interp_mode: int, border_mode: int, dst_size: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def warpRoi(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Rect: ... |
| @_typing.overload |
| def warpRoi(self, src_size: cv2.typing.Size, K: UMat, R: UMat) -> cv2.typing.Rect: ... |
|
|
| def getScale(self) -> float: ... |
|
|
| def setScale(self, arg1: float) -> None: ... |
|
|
|
|
| class WarperCreator: |
| ... |
|
|
| class BackgroundSubtractor(Algorithm): |
| |
| @_typing.overload |
| def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def getBackgroundImage(self, backgroundImage: UMat | None = ...) -> UMat: ... |
|
|
|
|
| class BackgroundSubtractorMOG2(BackgroundSubtractor): |
| |
| def getHistory(self) -> int: ... |
|
|
| def setHistory(self, history: int) -> None: ... |
|
|
| def getNMixtures(self) -> int: ... |
|
|
| def setNMixtures(self, nmixtures: int) -> None: ... |
|
|
| def getBackgroundRatio(self) -> float: ... |
|
|
| def setBackgroundRatio(self, ratio: float) -> None: ... |
|
|
| def getVarThreshold(self) -> float: ... |
|
|
| def setVarThreshold(self, varThreshold: float) -> None: ... |
|
|
| def getVarThresholdGen(self) -> float: ... |
|
|
| def setVarThresholdGen(self, varThresholdGen: float) -> None: ... |
|
|
| def getVarInit(self) -> float: ... |
|
|
| def setVarInit(self, varInit: float) -> None: ... |
|
|
| def getVarMin(self) -> float: ... |
|
|
| def setVarMin(self, varMin: float) -> None: ... |
|
|
| def getVarMax(self) -> float: ... |
|
|
| def setVarMax(self, varMax: float) -> None: ... |
|
|
| def getComplexityReductionThreshold(self) -> float: ... |
|
|
| def setComplexityReductionThreshold(self, ct: float) -> None: ... |
|
|
| def getDetectShadows(self) -> bool: ... |
|
|
| def setDetectShadows(self, detectShadows: bool) -> None: ... |
|
|
| def getShadowValue(self) -> int: ... |
|
|
| def setShadowValue(self, value: int) -> None: ... |
|
|
| def getShadowThreshold(self) -> float: ... |
|
|
| def setShadowThreshold(self, threshold: float) -> None: ... |
|
|
| @_typing.overload |
| def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... |
|
|
|
|
| class BackgroundSubtractorKNN(BackgroundSubtractor): |
| |
| def getHistory(self) -> int: ... |
|
|
| def setHistory(self, history: int) -> None: ... |
|
|
| def getNSamples(self) -> int: ... |
|
|
| def setNSamples(self, _nN: int) -> None: ... |
|
|
| def getDist2Threshold(self) -> float: ... |
|
|
| def setDist2Threshold(self, _dist2Threshold: float) -> None: ... |
|
|
| def getkNNSamples(self) -> int: ... |
|
|
| def setkNNSamples(self, _nkNN: int) -> None: ... |
|
|
| def getDetectShadows(self) -> bool: ... |
|
|
| def setDetectShadows(self, detectShadows: bool) -> None: ... |
|
|
| def getShadowValue(self) -> int: ... |
|
|
| def setShadowValue(self, value: int) -> None: ... |
|
|
| def getShadowThreshold(self) -> float: ... |
|
|
| def setShadowThreshold(self, threshold: float) -> None: ... |
|
|
|
|
| class KalmanFilter: |
| statePre: cv2.typing.MatLike |
| statePost: cv2.typing.MatLike |
| transitionMatrix: cv2.typing.MatLike |
| controlMatrix: cv2.typing.MatLike |
| measurementMatrix: cv2.typing.MatLike |
| processNoiseCov: cv2.typing.MatLike |
| measurementNoiseCov: cv2.typing.MatLike |
| errorCovPre: cv2.typing.MatLike |
| gain: cv2.typing.MatLike |
| errorCovPost: cv2.typing.MatLike |
|
|
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, dynamParams: int, measureParams: int, controlParams: int = ..., type: int = ...) -> None: ... |
|
|
| def predict(self, control: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
|
|
| def correct(self, measurement: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
|
|
|
|
| class DenseOpticalFlow(Algorithm): |
| |
| @_typing.overload |
| def calc(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def calc(self, I0: UMat, I1: UMat, flow: UMat) -> UMat: ... |
|
|
| def collectGarbage(self) -> None: ... |
|
|
|
|
| class SparseOpticalFlow(Algorithm): |
| |
| @_typing.overload |
| def calc(self, prevImg: cv2.typing.MatLike, nextImg: cv2.typing.MatLike, prevPts: cv2.typing.MatLike, nextPts: cv2.typing.MatLike, status: cv2.typing.MatLike | None = ..., err: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calc(self, prevImg: UMat, nextImg: UMat, prevPts: UMat, nextPts: UMat, status: UMat | None = ..., err: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
|
|
| class FarnebackOpticalFlow(DenseOpticalFlow): |
| |
| def getNumLevels(self) -> int: ... |
|
|
| def setNumLevels(self, numLevels: int) -> None: ... |
|
|
| def getPyrScale(self) -> float: ... |
|
|
| def setPyrScale(self, pyrScale: float) -> None: ... |
|
|
| def getFastPyramids(self) -> bool: ... |
|
|
| def setFastPyramids(self, fastPyramids: bool) -> None: ... |
|
|
| def getWinSize(self) -> int: ... |
|
|
| def setWinSize(self, winSize: int) -> None: ... |
|
|
| def getNumIters(self) -> int: ... |
|
|
| def setNumIters(self, numIters: int) -> None: ... |
|
|
| def getPolyN(self) -> int: ... |
|
|
| def setPolyN(self, polyN: int) -> None: ... |
|
|
| def getPolySigma(self) -> float: ... |
|
|
| def setPolySigma(self, polySigma: float) -> None: ... |
|
|
| def getFlags(self) -> int: ... |
|
|
| def setFlags(self, flags: int) -> None: ... |
|
|
| @classmethod |
| def create(cls, numLevels: int = ..., pyrScale: float = ..., fastPyramids: bool = ..., winSize: int = ..., numIters: int = ..., polyN: int = ..., polySigma: float = ..., flags: int = ...) -> FarnebackOpticalFlow: ... |
|
|
|
|
| class VariationalRefinement(DenseOpticalFlow): |
| |
| @_typing.overload |
| def calcUV(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow_u: cv2.typing.MatLike, flow_v: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calcUV(self, I0: UMat, I1: UMat, flow_u: UMat, flow_v: UMat) -> tuple[UMat, UMat]: ... |
|
|
| def getFixedPointIterations(self) -> int: ... |
|
|
| def setFixedPointIterations(self, val: int) -> None: ... |
|
|
| def getSorIterations(self) -> int: ... |
|
|
| def setSorIterations(self, val: int) -> None: ... |
|
|
| def getOmega(self) -> float: ... |
|
|
| def setOmega(self, val: float) -> None: ... |
|
|
| def getAlpha(self) -> float: ... |
|
|
| def setAlpha(self, val: float) -> None: ... |
|
|
| def getDelta(self) -> float: ... |
|
|
| def setDelta(self, val: float) -> None: ... |
|
|
| def getGamma(self) -> float: ... |
|
|
| def setGamma(self, val: float) -> None: ... |
|
|
| def getEpsilon(self) -> float: ... |
|
|
| def setEpsilon(self, val: float) -> None: ... |
|
|
| @classmethod |
| def create(cls) -> VariationalRefinement: ... |
|
|
|
|
| class DISOpticalFlow(DenseOpticalFlow): |
| |
| def getFinestScale(self) -> int: ... |
|
|
| def setFinestScale(self, val: int) -> None: ... |
|
|
| def getPatchSize(self) -> int: ... |
|
|
| def setPatchSize(self, val: int) -> None: ... |
|
|
| def getPatchStride(self) -> int: ... |
|
|
| def setPatchStride(self, val: int) -> None: ... |
|
|
| def getGradientDescentIterations(self) -> int: ... |
|
|
| def setGradientDescentIterations(self, val: int) -> None: ... |
|
|
| def getVariationalRefinementIterations(self) -> int: ... |
|
|
| def setVariationalRefinementIterations(self, val: int) -> None: ... |
|
|
| def getVariationalRefinementAlpha(self) -> float: ... |
|
|
| def setVariationalRefinementAlpha(self, val: float) -> None: ... |
|
|
| def getVariationalRefinementDelta(self) -> float: ... |
|
|
| def setVariationalRefinementDelta(self, val: float) -> None: ... |
|
|
| def getVariationalRefinementGamma(self) -> float: ... |
|
|
| def setVariationalRefinementGamma(self, val: float) -> None: ... |
|
|
| def getVariationalRefinementEpsilon(self) -> float: ... |
|
|
| def setVariationalRefinementEpsilon(self, val: float) -> None: ... |
|
|
| def getUseMeanNormalization(self) -> bool: ... |
|
|
| def setUseMeanNormalization(self, val: bool) -> None: ... |
|
|
| def getUseSpatialPropagation(self) -> bool: ... |
|
|
| def setUseSpatialPropagation(self, val: bool) -> None: ... |
|
|
| @classmethod |
| def create(cls, preset: int = ...) -> DISOpticalFlow: ... |
|
|
|
|
| class SparsePyrLKOpticalFlow(SparseOpticalFlow): |
| |
| def getWinSize(self) -> cv2.typing.Size: ... |
|
|
| def setWinSize(self, winSize: cv2.typing.Size) -> None: ... |
|
|
| def getMaxLevel(self) -> int: ... |
|
|
| def setMaxLevel(self, maxLevel: int) -> None: ... |
|
|
| def getTermCriteria(self) -> cv2.typing.TermCriteria: ... |
|
|
| def setTermCriteria(self, crit: cv2.typing.TermCriteria) -> None: ... |
|
|
| def getFlags(self) -> int: ... |
|
|
| def setFlags(self, flags: int) -> None: ... |
|
|
| def getMinEigThreshold(self) -> float: ... |
|
|
| def setMinEigThreshold(self, minEigThreshold: float) -> None: ... |
|
|
| @classmethod |
| def create(cls, winSize: cv2.typing.Size = ..., maxLevel: int = ..., crit: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> SparsePyrLKOpticalFlow: ... |
|
|
|
|
| class Tracker: |
| |
| @_typing.overload |
| def init(self, image: cv2.typing.MatLike, boundingBox: cv2.typing.Rect) -> None: ... |
| @_typing.overload |
| def init(self, image: UMat, boundingBox: cv2.typing.Rect) -> None: ... |
|
|
| @_typing.overload |
| def update(self, image: cv2.typing.MatLike) -> tuple[bool, cv2.typing.Rect]: ... |
| @_typing.overload |
| def update(self, image: UMat) -> tuple[bool, cv2.typing.Rect]: ... |
|
|
|
|
| class TrackerMIL(Tracker): |
| |
| class Params: |
| samplerInitInRadius: float |
| samplerInitMaxNegNum: int |
| samplerSearchWinSize: float |
| samplerTrackInRadius: float |
| samplerTrackMaxPosNum: int |
| samplerTrackMaxNegNum: int |
| featureSetNumFeatures: int |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: TrackerMIL.Params = ...) -> TrackerMIL: ... |
|
|
|
|
| class TrackerGOTURN(Tracker): |
| |
| class Params: |
| modelTxt: str |
| modelBin: str |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ... |
|
|
|
|
| class TrackerDaSiamRPN(Tracker): |
| |
| class Params: |
| model: str |
| kernel_cls1: str |
| kernel_r1: str |
| backend: int |
| target: int |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ... |
|
|
| def getTrackingScore(self) -> float: ... |
|
|
|
|
| class TrackerNano(Tracker): |
| |
| class Params: |
| backbone: str |
| neckhead: str |
| backend: int |
| target: int |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ... |
|
|
| def getTrackingScore(self) -> float: ... |
|
|
|
|
| class TrackerVit(Tracker): |
| |
| class Params: |
| net: str |
| backend: int |
| target: int |
| meanvalue: cv2.typing.Scalar |
| stdvalue: cv2.typing.Scalar |
| tracking_score_threshold: float |
|
|
| |
| def __init__(self) -> None: ... |
|
|
|
|
|
|
| |
| @classmethod |
| def create(cls, parameters: TrackerVit.Params = ...) -> TrackerVit: ... |
|
|
| def getTrackingScore(self) -> float: ... |
|
|
|
|
| class GArrayDesc: |
| ... |
|
|
| class GComputation: |
| |
| @_typing.overload |
| def __init__(self, ins: cv2.typing.GProtoInputArgs, outs: cv2.typing.GProtoOutputArgs) -> None: ... |
| @_typing.overload |
| def __init__(self, in_: GMat, out: GMat) -> None: ... |
| @_typing.overload |
| def __init__(self, in_: GMat, out: GScalar) -> None: ... |
| @_typing.overload |
| def __init__(self, in1: GMat, in2: GMat, out: GMat) -> None: ... |
|
|
| def apply(self, callback: cv2.typing.ExtractArgsCallback, args: _typing.Sequence[GCompileArg] = ...) -> _typing.Sequence[cv2.typing.GRunArg]: ... |
|
|
| @_typing.overload |
| def compileStreaming(self, in_metas: _typing.Sequence[cv2.typing.GMetaArg], args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... |
| @_typing.overload |
| def compileStreaming(self, args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... |
| @_typing.overload |
| def compileStreaming(self, callback: cv2.typing.ExtractMetaCallback, args: _typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... |
|
|
|
|
| class GFrame: |
| |
| def __init__(self) -> None: ... |
|
|
|
|
| class GKernelPackage: |
| |
| def size(self) -> int: ... |
|
|
|
|
| class GMat: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, m: cv2.typing.MatLike) -> None: ... |
|
|
|
|
| class GMatDesc: |
| @property |
| def depth(self) -> int: ... |
| @property |
| def chan(self) -> int: ... |
| @property |
| def size(self) -> cv2.typing.Size: ... |
| @property |
| def planar(self) -> bool: ... |
| @property |
| def dims(self) -> _typing.Sequence[int]: ... |
|
|
| |
| @_typing.overload |
| def __init__(self, d: int, c: int, s: cv2.typing.Size, p: bool = ...) -> None: ... |
| @_typing.overload |
| def __init__(self, d: int, dd: _typing.Sequence[int]) -> None: ... |
| @_typing.overload |
| def __init__(self, d: int, dd: _typing.Sequence[int]) -> None: ... |
| @_typing.overload |
| def __init__(self) -> None: ... |
|
|
| @_typing.overload |
| def withSizeDelta(self, delta: cv2.typing.Size) -> GMatDesc: ... |
| @_typing.overload |
| def withSizeDelta(self, dx: int, dy: int) -> GMatDesc: ... |
|
|
| def withSize(self, sz: cv2.typing.Size) -> GMatDesc: ... |
|
|
| def withDepth(self, ddepth: int) -> GMatDesc: ... |
|
|
| def withType(self, ddepth: int, dchan: int) -> GMatDesc: ... |
|
|
| @_typing.overload |
| def asPlanar(self) -> GMatDesc: ... |
| @_typing.overload |
| def asPlanar(self, planes: int) -> GMatDesc: ... |
|
|
| def asInterleaved(self) -> GMatDesc: ... |
|
|
|
|
| class GOpaqueDesc: |
| ... |
|
|
| class GScalar: |
| |
| @_typing.overload |
| def __init__(self) -> None: ... |
| @_typing.overload |
| def __init__(self, s: cv2.typing.Scalar) -> None: ... |
|
|
|
|
| class GScalarDesc: |
| ... |
|
|
| class GStreamingCompiled: |
| |
| def __init__(self) -> None: ... |
|
|
| def setSource(self, callback: cv2.typing.ExtractArgsCallback) -> None: ... |
|
|
| def start(self) -> None: ... |
|
|
| def pull(self) -> tuple[bool, _typing.Sequence[cv2.typing.GRunArg] | _typing.Sequence[cv2.typing.GOptRunArg]]: ... |
|
|
| def stop(self) -> None: ... |
|
|
| def running(self) -> bool: ... |
|
|
|
|
| class GOpaqueT: |
| |
| def __init__(self, type: cv2.gapi.ArgType) -> None: ... |
|
|
| def type(self) -> cv2.gapi.ArgType: ... |
|
|
|
|
| class GArrayT: |
| |
| def __init__(self, type: cv2.gapi.ArgType) -> None: ... |
|
|
| def type(self) -> cv2.gapi.ArgType: ... |
|
|
|
|
| class GCompileArg: |
| |
| @_typing.overload |
| def __init__(self, arg: GKernelPackage) -> None: ... |
| @_typing.overload |
| def __init__(self, arg: cv2.gapi.GNetPackage) -> None: ... |
| @_typing.overload |
| def __init__(self, arg: cv2.gapi.streaming.queue_capacity) -> None: ... |
| @_typing.overload |
| def __init__(self, arg: cv2.gapi.ot.ObjectTrackerParams) -> None: ... |
|
|
|
|
| class GInferInputs: |
| |
| def __init__(self) -> None: ... |
|
|
| @_typing.overload |
| def setInput(self, name: str, value: GMat) -> GInferInputs: ... |
| @_typing.overload |
| def setInput(self, name: str, value: GFrame) -> GInferInputs: ... |
|
|
|
|
| class GInferListInputs: |
| |
| def __init__(self) -> None: ... |
|
|
| @_typing.overload |
| def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... |
| @_typing.overload |
| def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... |
|
|
|
|
| class GInferOutputs: |
| |
| def __init__(self) -> None: ... |
|
|
| def at(self, name: str) -> GMat: ... |
|
|
|
|
| class GInferListOutputs: |
| |
| def __init__(self) -> None: ... |
|
|
| def at(self, name: str) -> GArrayT: ... |
|
|
|
|
| class error(Exception): |
| code: int |
| err: str |
| file: str |
| func: str |
| line: int |
| msg: str |
|
|
|
|
| |
| @_typing.overload |
| def CamShift(probImage: cv2.typing.MatLike, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[cv2.typing.RotatedRect, cv2.typing.Rect]: ... |
| @_typing.overload |
| def CamShift(probImage: UMat, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[cv2.typing.RotatedRect, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def Canny(image: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike | None = ..., apertureSize: int = ..., L2gradient: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def Canny(image: UMat, threshold1: float, threshold2: float, edges: UMat | None = ..., apertureSize: int = ..., L2gradient: bool = ...) -> UMat: ... |
| @_typing.overload |
| def Canny(dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike | None = ..., L2gradient: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def Canny(dx: UMat, dy: UMat, threshold1: float, threshold2: float, edges: UMat | None = ..., L2gradient: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def EMD(signature1: cv2.typing.MatLike, signature2: cv2.typing.MatLike, distType: int, cost: cv2.typing.MatLike | None = ..., lowerBound: float | None = ..., flow: cv2.typing.MatLike | None = ...) -> tuple[float, float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def EMD(signature1: UMat, signature2: UMat, distType: int, cost: UMat | None = ..., lowerBound: float | None = ..., flow: UMat | None = ...) -> tuple[float, float, UMat]: ... |
|
|
| @_typing.overload |
| def GaussianBlur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, sigmaX: float, dst: cv2.typing.MatLike | None = ..., sigmaY: float = ..., borderType: int = ..., hint: AlgorithmHint = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def GaussianBlur(src: UMat, ksize: cv2.typing.Size, sigmaX: float, dst: UMat | None = ..., sigmaY: float = ..., borderType: int = ..., hint: AlgorithmHint = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HoughCircles(image: cv2.typing.MatLike, method: int, dp: float, minDist: float, circles: cv2.typing.MatLike | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HoughCircles(image: UMat, method: int, dp: float, minDist: float, circles: UMat | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HoughLines(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ..., use_edgeval: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HoughLines(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ..., use_edgeval: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HoughLinesP(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., minLineLength: float = ..., maxLineGap: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HoughLinesP(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., minLineLength: float = ..., maxLineGap: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HoughLinesPointSet(point: cv2.typing.MatLike, lines_max: int, threshold: int, min_rho: float, max_rho: float, rho_step: float, min_theta: float, max_theta: float, theta_step: float, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HoughLinesPointSet(point: UMat, lines_max: int, threshold: int, min_rho: float, max_rho: float, rho_step: float, min_theta: float, max_theta: float, theta_step: float, lines: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HoughLinesWithAccumulator(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HoughLinesWithAccumulator(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def HuMoments(m: cv2.typing.Moments, hu: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def LUT(src: cv2.typing.MatLike, lut: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def LUT(src: UMat, lut: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def Laplacian(src: cv2.typing.MatLike, ddepth: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def Laplacian(src: UMat, ddepth: int, dst: UMat | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def Mahalanobis(v1: cv2.typing.MatLike, v2: cv2.typing.MatLike, icovar: cv2.typing.MatLike) -> float: ... |
| @_typing.overload |
| def Mahalanobis(v1: UMat, v2: UMat, icovar: UMat) -> float: ... |
|
|
| @_typing.overload |
| def PCABackProject(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def PCABackProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def PCACompute(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike | None = ..., maxComponents: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def PCACompute(data: UMat, mean: UMat, eigenvectors: UMat | None = ..., maxComponents: int = ...) -> tuple[UMat, UMat]: ... |
| @_typing.overload |
| def PCACompute(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, retainedVariance: float, eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def PCACompute(data: UMat, mean: UMat, retainedVariance: float, eigenvectors: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def PCACompute2(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike | None = ..., eigenvalues: cv2.typing.MatLike | None = ..., maxComponents: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def PCACompute2(data: UMat, mean: UMat, eigenvectors: UMat | None = ..., eigenvalues: UMat | None = ..., maxComponents: int = ...) -> tuple[UMat, UMat, UMat]: ... |
| @_typing.overload |
| def PCACompute2(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, retainedVariance: float, eigenvectors: cv2.typing.MatLike | None = ..., eigenvalues: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def PCACompute2(data: UMat, mean: UMat, retainedVariance: float, eigenvectors: UMat | None = ..., eigenvalues: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def PCAProject(data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def PCAProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def PSNR(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, R: float = ...) -> float: ... |
| @_typing.overload |
| def PSNR(src1: UMat, src2: UMat, R: float = ...) -> float: ... |
|
|
| @_typing.overload |
| def RQDecomp3x3(src: cv2.typing.MatLike, mtxR: cv2.typing.MatLike | None = ..., mtxQ: cv2.typing.MatLike | None = ..., Qx: cv2.typing.MatLike | None = ..., Qy: cv2.typing.MatLike | None = ..., Qz: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Vec3d, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def RQDecomp3x3(src: UMat, mtxR: UMat | None = ..., mtxQ: UMat | None = ..., Qx: UMat | None = ..., Qy: UMat | None = ..., Qz: UMat | None = ...) -> tuple[cv2.typing.Vec3d, UMat, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def Rodrigues(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def Rodrigues(src: UMat, dst: UMat | None = ..., jacobian: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def SVBackSubst(w: cv2.typing.MatLike, u: cv2.typing.MatLike, vt: cv2.typing.MatLike, rhs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def SVBackSubst(w: UMat, u: UMat, vt: UMat, rhs: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def SVDecomp(src: cv2.typing.MatLike, w: cv2.typing.MatLike | None = ..., u: cv2.typing.MatLike | None = ..., vt: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def SVDecomp(src: UMat, w: UMat | None = ..., u: UMat | None = ..., vt: UMat | None = ..., flags: int = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def Scharr(src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def Scharr(src: UMat, ddepth: int, dx: int, dy: int, dst: UMat | None = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def Sobel(src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def Sobel(src: UMat, ddepth: int, dx: int, dy: int, dst: UMat | None = ..., ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def absdiff(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def absdiff(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def accumulate(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def accumulate(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def accumulateProduct(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def accumulateProduct(src1: UMat, src2: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def accumulateSquare(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def accumulateSquare(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def accumulateWeighted(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def accumulateWeighted(src: UMat, dst: UMat, alpha: float, mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def adaptiveThreshold(src: cv2.typing.MatLike, maxValue: float, adaptiveMethod: int, thresholdType: int, blockSize: int, C: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def adaptiveThreshold(src: UMat, maxValue: float, adaptiveMethod: int, thresholdType: int, blockSize: int, C: float, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def add(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def add(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... |
|
|
| def addText(img: cv2.typing.MatLike, text: str, org: cv2.typing.Point, nameFont: str, pointSize: int = ..., color: cv2.typing.Scalar = ..., weight: int = ..., style: int = ..., spacing: int = ...) -> None: ... |
|
|
| @_typing.overload |
| def addWeighted(src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike, beta: float, gamma: float, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def addWeighted(src1: UMat, alpha: float, src2: UMat, beta: float, gamma: float, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def applyColorMap(src: cv2.typing.MatLike, colormap: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def applyColorMap(src: UMat, colormap: int, dst: UMat | None = ...) -> UMat: ... |
| @_typing.overload |
| def applyColorMap(src: cv2.typing.MatLike, userColor: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def applyColorMap(src: UMat, userColor: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def approxPolyDP(curve: cv2.typing.MatLike, epsilon: float, closed: bool, approxCurve: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def approxPolyDP(curve: UMat, epsilon: float, closed: bool, approxCurve: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def approxPolyN(curve: cv2.typing.MatLike, nsides: int, approxCurve: cv2.typing.MatLike | None = ..., epsilon_percentage: float = ..., ensure_convex: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def approxPolyN(curve: UMat, nsides: int, approxCurve: UMat | None = ..., epsilon_percentage: float = ..., ensure_convex: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def arcLength(curve: cv2.typing.MatLike, closed: bool) -> float: ... |
| @_typing.overload |
| def arcLength(curve: UMat, closed: bool) -> float: ... |
|
|
| @_typing.overload |
| def arrowedLine(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., line_type: int = ..., shift: int = ..., tipLength: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def arrowedLine(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., line_type: int = ..., shift: int = ..., tipLength: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def batchDistance(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dtype: int, dist: cv2.typing.MatLike | None = ..., nidx: cv2.typing.MatLike | None = ..., normType: int = ..., K: int = ..., mask: cv2.typing.MatLike | None = ..., update: int = ..., crosscheck: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def batchDistance(src1: UMat, src2: UMat, dtype: int, dist: UMat | None = ..., nidx: UMat | None = ..., normType: int = ..., K: int = ..., mask: UMat | None = ..., update: int = ..., crosscheck: bool = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def bilateralFilter(src: cv2.typing.MatLike, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def bilateralFilter(src: UMat, d: int, sigmaColor: float, sigmaSpace: float, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def bitwise_and(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def bitwise_and(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def bitwise_not(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def bitwise_not(src: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def bitwise_or(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def bitwise_or(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def bitwise_xor(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def bitwise_xor(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def blendLinear(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, weights1: cv2.typing.MatLike, weights2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def blendLinear(src1: UMat, src2: UMat, weights1: UMat, weights2: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def blur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def blur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., borderType: int = ...) -> UMat: ... |
|
|
| def borderInterpolate(p: int, len: int, borderType: int) -> int: ... |
|
|
| @_typing.overload |
| def boundingRect(array: cv2.typing.MatLike) -> cv2.typing.Rect: ... |
| @_typing.overload |
| def boundingRect(array: UMat) -> cv2.typing.Rect: ... |
|
|
| @_typing.overload |
| def boxFilter(src: cv2.typing.MatLike, ddepth: int, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def boxFilter(src: UMat, ddepth: int, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def boxPoints(box: cv2.typing.RotatedRect, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def boxPoints(box: cv2.typing.RotatedRect, points: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def broadcast(src: cv2.typing.MatLike, shape: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def broadcast(src: UMat, shape: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def buildOpticalFlowPyramid(img: cv2.typing.MatLike, winSize: cv2.typing.Size, maxLevel: int, pyramid: _typing.Sequence[cv2.typing.MatLike] | None = ..., withDerivatives: bool = ..., pyrBorder: int = ..., derivBorder: int = ..., tryReuseInputImage: bool = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def buildOpticalFlowPyramid(img: UMat, winSize: cv2.typing.Size, maxLevel: int, pyramid: _typing.Sequence[UMat] | None = ..., withDerivatives: bool = ..., pyrBorder: int = ..., derivBorder: int = ..., tryReuseInputImage: bool = ...) -> tuple[int, _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def calcBackProject(images: _typing.Sequence[cv2.typing.MatLike], channels: _typing.Sequence[int], hist: cv2.typing.MatLike, ranges: _typing.Sequence[float], scale: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def calcBackProject(images: _typing.Sequence[UMat], channels: _typing.Sequence[int], hist: UMat, ranges: _typing.Sequence[float], scale: float, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def calcCovarMatrix(samples: cv2.typing.MatLike, mean: cv2.typing.MatLike, flags: int, covar: cv2.typing.MatLike | None = ..., ctype: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calcCovarMatrix(samples: UMat, mean: UMat, flags: int, covar: UMat | None = ..., ctype: int = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calcHist(images: _typing.Sequence[cv2.typing.MatLike], channels: _typing.Sequence[int], mask: cv2.typing.MatLike | None, histSize: _typing.Sequence[int], ranges: _typing.Sequence[float], hist: cv2.typing.MatLike | None = ..., accumulate: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def calcHist(images: _typing.Sequence[UMat], channels: _typing.Sequence[int], mask: UMat | None, histSize: _typing.Sequence[int], ranges: _typing.Sequence[float], hist: UMat | None = ..., accumulate: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def calcOpticalFlowFarneback(prev: cv2.typing.MatLike, next: cv2.typing.MatLike, flow: cv2.typing.MatLike, pyr_scale: float, levels: int, winsize: int, iterations: int, poly_n: int, poly_sigma: float, flags: int) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def calcOpticalFlowFarneback(prev: UMat, next: UMat, flow: UMat, pyr_scale: float, levels: int, winsize: int, iterations: int, poly_n: int, poly_sigma: float, flags: int) -> UMat: ... |
|
|
| @_typing.overload |
| def calcOpticalFlowPyrLK(prevImg: cv2.typing.MatLike, nextImg: cv2.typing.MatLike, prevPts: cv2.typing.MatLike, nextPts: cv2.typing.MatLike, status: cv2.typing.MatLike | None = ..., err: cv2.typing.MatLike | None = ..., winSize: cv2.typing.Size = ..., maxLevel: int = ..., criteria: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calcOpticalFlowPyrLK(prevImg: UMat, nextImg: UMat, prevPts: UMat, nextPts: UMat, status: UMat | None = ..., err: UMat | None = ..., winSize: cv2.typing.Size = ..., maxLevel: int = ..., criteria: cv2.typing.TermCriteria = ..., flags: int = ..., minEigThreshold: float = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calibrateCamera(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def calibrateCamera(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def calibrateCameraExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calibrateCameraExtended(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., stdDeviationsIntrinsics: UMat | None = ..., stdDeviationsExtrinsics: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calibrateCameraRO(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., newObjPoints: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calibrateCameraRO(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., newObjPoints: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... |
|
|
| @_typing.overload |
| def calibrateCameraROExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., newObjPoints: cv2.typing.MatLike | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., stdDeviationsObjPoints: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calibrateCameraROExtended(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, iFixedPoint: int, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., newObjPoints: UMat | None = ..., stdDeviationsIntrinsics: UMat | None = ..., stdDeviationsExtrinsics: UMat | None = ..., stdDeviationsObjPoints: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calibrateHandEye(R_gripper2base: _typing.Sequence[cv2.typing.MatLike], t_gripper2base: _typing.Sequence[cv2.typing.MatLike], R_target2cam: _typing.Sequence[cv2.typing.MatLike], t_target2cam: _typing.Sequence[cv2.typing.MatLike], R_cam2gripper: cv2.typing.MatLike | None = ..., t_cam2gripper: cv2.typing.MatLike | None = ..., method: HandEyeCalibrationMethod = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calibrateHandEye(R_gripper2base: _typing.Sequence[UMat], t_gripper2base: _typing.Sequence[UMat], R_target2cam: _typing.Sequence[UMat], t_target2cam: _typing.Sequence[UMat], R_cam2gripper: UMat | None = ..., t_cam2gripper: UMat | None = ..., method: HandEyeCalibrationMethod = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calibrateRobotWorldHandEye(R_world2cam: _typing.Sequence[cv2.typing.MatLike], t_world2cam: _typing.Sequence[cv2.typing.MatLike], R_base2gripper: _typing.Sequence[cv2.typing.MatLike], t_base2gripper: _typing.Sequence[cv2.typing.MatLike], R_base2world: cv2.typing.MatLike | None = ..., t_base2world: cv2.typing.MatLike | None = ..., R_gripper2cam: cv2.typing.MatLike | None = ..., t_gripper2cam: cv2.typing.MatLike | None = ..., method: RobotWorldHandEyeCalibrationMethod = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def calibrateRobotWorldHandEye(R_world2cam: _typing.Sequence[UMat], t_world2cam: _typing.Sequence[UMat], R_base2gripper: _typing.Sequence[UMat], t_base2gripper: _typing.Sequence[UMat], R_base2world: UMat | None = ..., t_base2world: UMat | None = ..., R_gripper2cam: UMat | None = ..., t_gripper2cam: UMat | None = ..., method: RobotWorldHandEyeCalibrationMethod = ...) -> tuple[UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def calibrationMatrixValues(cameraMatrix: cv2.typing.MatLike, imageSize: cv2.typing.Size, apertureWidth: float, apertureHeight: float) -> tuple[float, float, float, cv2.typing.Point2d, float]: ... |
| @_typing.overload |
| def calibrationMatrixValues(cameraMatrix: UMat, imageSize: cv2.typing.Size, apertureWidth: float, apertureHeight: float) -> tuple[float, float, float, cv2.typing.Point2d, float]: ... |
|
|
| @_typing.overload |
| def cartToPolar(x: cv2.typing.MatLike, y: cv2.typing.MatLike, magnitude: cv2.typing.MatLike | None = ..., angle: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def cartToPolar(x: UMat, y: UMat, magnitude: UMat | None = ..., angle: UMat | None = ..., angleInDegrees: bool = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def checkChessboard(img: cv2.typing.MatLike, size: cv2.typing.Size) -> bool: ... |
| @_typing.overload |
| def checkChessboard(img: UMat, size: cv2.typing.Size) -> bool: ... |
|
|
| def checkHardwareSupport(feature: int) -> bool: ... |
|
|
| @_typing.overload |
| def checkRange(a: cv2.typing.MatLike, quiet: bool = ..., minVal: float = ..., maxVal: float = ...) -> tuple[bool, cv2.typing.Point]: ... |
| @_typing.overload |
| def checkRange(a: UMat, quiet: bool = ..., minVal: float = ..., maxVal: float = ...) -> tuple[bool, cv2.typing.Point]: ... |
|
|
| @_typing.overload |
| def circle(img: cv2.typing.MatLike, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def circle(img: UMat, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
|
|
| def clipLine(imgRect: cv2.typing.Rect, pt1: cv2.typing.Point, pt2: cv2.typing.Point) -> tuple[bool, cv2.typing.Point, cv2.typing.Point]: ... |
|
|
| @_typing.overload |
| def colorChange(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., red_mul: float = ..., green_mul: float = ..., blue_mul: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def colorChange(src: UMat, mask: UMat, dst: UMat | None = ..., red_mul: float = ..., green_mul: float = ..., blue_mul: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def compare(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, cmpop: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def compare(src1: UMat, src2: UMat, cmpop: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def compareHist(H1: cv2.typing.MatLike, H2: cv2.typing.MatLike, method: int) -> float: ... |
| @_typing.overload |
| def compareHist(H1: UMat, H2: UMat, method: int) -> float: ... |
|
|
| @_typing.overload |
| def completeSymm(m: cv2.typing.MatLike, lowerToUpper: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def completeSymm(m: UMat, lowerToUpper: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def composeRT(rvec1: cv2.typing.MatLike, tvec1: cv2.typing.MatLike, rvec2: cv2.typing.MatLike, tvec2: cv2.typing.MatLike, rvec3: cv2.typing.MatLike | None = ..., tvec3: cv2.typing.MatLike | None = ..., dr3dr1: cv2.typing.MatLike | None = ..., dr3dt1: cv2.typing.MatLike | None = ..., dr3dr2: cv2.typing.MatLike | None = ..., dr3dt2: cv2.typing.MatLike | None = ..., dt3dr1: cv2.typing.MatLike | None = ..., dt3dt1: cv2.typing.MatLike | None = ..., dt3dr2: cv2.typing.MatLike | None = ..., dt3dt2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def composeRT(rvec1: UMat, tvec1: UMat, rvec2: UMat, tvec2: UMat, rvec3: UMat | None = ..., tvec3: UMat | None = ..., dr3dr1: UMat | None = ..., dr3dt1: UMat | None = ..., dr3dr2: UMat | None = ..., dr3dt2: UMat | None = ..., dt3dr1: UMat | None = ..., dt3dt1: UMat | None = ..., dt3dr2: UMat | None = ..., dt3dt2: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def computeCorrespondEpilines(points: cv2.typing.MatLike, whichImage: int, F: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def computeCorrespondEpilines(points: UMat, whichImage: int, F: UMat, lines: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def computeECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, inputMask: cv2.typing.MatLike | None = ...) -> float: ... |
| @_typing.overload |
| def computeECC(templateImage: UMat, inputImage: UMat, inputMask: UMat | None = ...) -> float: ... |
|
|
| @_typing.overload |
| def connectedComponents(image: cv2.typing.MatLike, labels: cv2.typing.MatLike | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def connectedComponents(image: UMat, labels: UMat | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, UMat]: ... |
|
|
| @_typing.overload |
| def connectedComponentsWithAlgorithm(image: cv2.typing.MatLike, connectivity: int, ltype: int, ccltype: int, labels: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def connectedComponentsWithAlgorithm(image: UMat, connectivity: int, ltype: int, ccltype: int, labels: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
| @_typing.overload |
| def connectedComponentsWithStats(image: cv2.typing.MatLike, labels: cv2.typing.MatLike | None = ..., stats: cv2.typing.MatLike | None = ..., centroids: cv2.typing.MatLike | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def connectedComponentsWithStats(image: UMat, labels: UMat | None = ..., stats: UMat | None = ..., centroids: UMat | None = ..., connectivity: int = ..., ltype: int = ...) -> tuple[int, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def connectedComponentsWithStatsWithAlgorithm(image: cv2.typing.MatLike, connectivity: int, ltype: int, ccltype: int, labels: cv2.typing.MatLike | None = ..., stats: cv2.typing.MatLike | None = ..., centroids: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def connectedComponentsWithStatsWithAlgorithm(image: UMat, connectivity: int, ltype: int, ccltype: int, labels: UMat | None = ..., stats: UMat | None = ..., centroids: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def contourArea(contour: cv2.typing.MatLike, oriented: bool = ...) -> float: ... |
| @_typing.overload |
| def contourArea(contour: UMat, oriented: bool = ...) -> float: ... |
|
|
| @_typing.overload |
| def convertFp16(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convertFp16(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def convertMaps(map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, dstmap1type: int, dstmap1: cv2.typing.MatLike | None = ..., dstmap2: cv2.typing.MatLike | None = ..., nninterpolation: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def convertMaps(map1: UMat, map2: UMat, dstmap1type: int, dstmap1: UMat | None = ..., dstmap2: UMat | None = ..., nninterpolation: bool = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def convertPointsFromHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convertPointsFromHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def convertPointsToHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convertPointsToHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def convertScaleAbs(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., alpha: float = ..., beta: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convertScaleAbs(src: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def convexHull(points: cv2.typing.MatLike, hull: cv2.typing.MatLike | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convexHull(points: UMat, hull: UMat | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def convexityDefects(contour: cv2.typing.MatLike, convexhull: cv2.typing.MatLike, convexityDefects: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def convexityDefects(contour: UMat, convexhull: UMat, convexityDefects: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def copyMakeBorder(src: cv2.typing.MatLike, top: int, bottom: int, left: int, right: int, borderType: int, dst: cv2.typing.MatLike | None = ..., value: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def copyMakeBorder(src: UMat, top: int, bottom: int, left: int, right: int, borderType: int, dst: UMat | None = ..., value: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def copyTo(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def copyTo(src: UMat, mask: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def cornerEigenValsAndVecs(src: cv2.typing.MatLike, blockSize: int, ksize: int, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cornerEigenValsAndVecs(src: UMat, blockSize: int, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def cornerHarris(src: cv2.typing.MatLike, blockSize: int, ksize: int, k: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cornerHarris(src: UMat, blockSize: int, ksize: int, k: float, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def cornerMinEigenVal(src: cv2.typing.MatLike, blockSize: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cornerMinEigenVal(src: UMat, blockSize: int, dst: UMat | None = ..., ksize: int = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def cornerSubPix(image: cv2.typing.MatLike, corners: cv2.typing.MatLike, winSize: cv2.typing.Size, zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cornerSubPix(image: UMat, corners: UMat, winSize: cv2.typing.Size, zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria) -> UMat: ... |
|
|
| @_typing.overload |
| def correctMatches(F: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, newPoints1: cv2.typing.MatLike | None = ..., newPoints2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def correctMatches(F: UMat, points1: UMat, points2: UMat, newPoints1: UMat | None = ..., newPoints2: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def countNonZero(src: cv2.typing.MatLike) -> int: ... |
| @_typing.overload |
| def countNonZero(src: UMat) -> int: ... |
|
|
| def createAlignMTB(max_bits: int = ..., exclude_range: int = ..., cut: bool = ...) -> AlignMTB: ... |
|
|
| def createBackgroundSubtractorKNN(history: int = ..., dist2Threshold: float = ..., detectShadows: bool = ...) -> BackgroundSubtractorKNN: ... |
|
|
| def createBackgroundSubtractorMOG2(history: int = ..., varThreshold: float = ..., detectShadows: bool = ...) -> BackgroundSubtractorMOG2: ... |
|
|
| def createCLAHE(clipLimit: float = ..., tileGridSize: cv2.typing.Size = ...) -> CLAHE: ... |
|
|
| def createCalibrateDebevec(samples: int = ..., lambda_: float = ..., random: bool = ...) -> CalibrateDebevec: ... |
|
|
| def createCalibrateRobertson(max_iter: int = ..., threshold: float = ...) -> CalibrateRobertson: ... |
|
|
| def createGeneralizedHoughBallard() -> GeneralizedHoughBallard: ... |
|
|
| def createGeneralizedHoughGuil() -> GeneralizedHoughGuil: ... |
|
|
| @_typing.overload |
| def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| def createLineSegmentDetector(refine: int = ..., scale: float = ..., sigma_scale: float = ..., quant: float = ..., ang_th: float = ..., log_eps: float = ..., density_th: float = ..., n_bins: int = ...) -> LineSegmentDetector: ... |
|
|
| def createMergeDebevec() -> MergeDebevec: ... |
|
|
| def createMergeMertens(contrast_weight: float = ..., saturation_weight: float = ..., exposure_weight: float = ...) -> MergeMertens: ... |
|
|
| def createMergeRobertson() -> MergeRobertson: ... |
|
|
| def createTonemap(gamma: float = ...) -> Tonemap: ... |
|
|
| def createTonemapDrago(gamma: float = ..., saturation: float = ..., bias: float = ...) -> TonemapDrago: ... |
|
|
| def createTonemapMantiuk(gamma: float = ..., scale: float = ..., saturation: float = ...) -> TonemapMantiuk: ... |
|
|
| def createTonemapReinhard(gamma: float = ..., intensity: float = ..., light_adapt: float = ..., color_adapt: float = ...) -> TonemapReinhard: ... |
|
|
| def cubeRoot(val: float) -> float: ... |
|
|
| def currentUIFramework() -> str: ... |
|
|
| @_typing.overload |
| def cvtColor(src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ..., dstCn: int = ..., hint: AlgorithmHint = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cvtColor(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ..., hint: AlgorithmHint = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def cvtColorTwoPlane(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ..., hint: AlgorithmHint = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def cvtColorTwoPlane(src1: UMat, src2: UMat, code: int, dst: UMat | None = ..., hint: AlgorithmHint = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def dct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def dct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def decolor(src: cv2.typing.MatLike, grayscale: cv2.typing.MatLike | None = ..., color_boost: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def decolor(src: UMat, grayscale: UMat | None = ..., color_boost: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def decomposeEssentialMat(E: cv2.typing.MatLike, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def decomposeEssentialMat(E: UMat, R1: UMat | None = ..., R2: UMat | None = ..., t: UMat | None = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def decomposeHomographyMat(H: cv2.typing.MatLike, K: cv2.typing.MatLike, rotations: _typing.Sequence[cv2.typing.MatLike] | None = ..., translations: _typing.Sequence[cv2.typing.MatLike] | None = ..., normals: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def decomposeHomographyMat(H: UMat, K: UMat, rotations: _typing.Sequence[UMat] | None = ..., translations: _typing.Sequence[UMat] | None = ..., normals: _typing.Sequence[UMat] | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def decomposeProjectionMatrix(projMatrix: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike | None = ..., rotMatrix: cv2.typing.MatLike | None = ..., transVect: cv2.typing.MatLike | None = ..., rotMatrixX: cv2.typing.MatLike | None = ..., rotMatrixY: cv2.typing.MatLike | None = ..., rotMatrixZ: cv2.typing.MatLike | None = ..., eulerAngles: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def decomposeProjectionMatrix(projMatrix: UMat, cameraMatrix: UMat | None = ..., rotMatrix: UMat | None = ..., transVect: UMat | None = ..., rotMatrixX: UMat | None = ..., rotMatrixY: UMat | None = ..., rotMatrixZ: UMat | None = ..., eulerAngles: UMat | None = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def demosaicing(src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | None = ..., dstCn: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def demosaicing(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... |
|
|
| def denoise_TVL1(observations: _typing.Sequence[cv2.typing.MatLike], result: cv2.typing.MatLike, lambda_: float = ..., niters: int = ...) -> None: ... |
|
|
| def destroyAllWindows() -> None: ... |
|
|
| def destroyWindow(winname: str) -> None: ... |
|
|
| @_typing.overload |
| def detailEnhance(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def detailEnhance(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def determinant(mtx: cv2.typing.MatLike) -> float: ... |
| @_typing.overload |
| def determinant(mtx: UMat) -> float: ... |
|
|
| @_typing.overload |
| def dft(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., nonzeroRows: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def dft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def dilate(src: cv2.typing.MatLike, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def dilate(src: UMat, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| def displayOverlay(winname: str, text: str, delayms: int = ...) -> None: ... |
|
|
| def displayStatusBar(winname: str, text: str, delayms: int = ...) -> None: ... |
|
|
| @_typing.overload |
| def distanceTransform(src: cv2.typing.MatLike, distanceType: int, maskSize: int, dst: cv2.typing.MatLike | None = ..., dstType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def distanceTransform(src: UMat, distanceType: int, maskSize: int, dst: UMat | None = ..., dstType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def distanceTransformWithLabels(src: cv2.typing.MatLike, distanceType: int, maskSize: int, dst: cv2.typing.MatLike | None = ..., labels: cv2.typing.MatLike | None = ..., labelType: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def distanceTransformWithLabels(src: UMat, distanceType: int, maskSize: int, dst: UMat | None = ..., labels: UMat | None = ..., labelType: int = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def divSpectrums(a: cv2.typing.MatLike, b: cv2.typing.MatLike, flags: int, c: cv2.typing.MatLike | None = ..., conjB: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def divSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def divide(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def divide(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... |
| @_typing.overload |
| def divide(scale: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def divide(scale: float, src2: UMat, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawChessboardCorners(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike, patternWasFound: bool) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat, patternWasFound: bool) -> UMat: ... |
|
|
| @_typing.overload |
| def drawContours(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike], contourIdx: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., hierarchy: cv2.typing.MatLike | None = ..., maxLevel: int = ..., offset: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawContours(image: UMat, contours: _typing.Sequence[UMat], contourIdx: int, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., hierarchy: UMat | None = ..., maxLevel: int = ..., offset: cv2.typing.Point = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawFrameAxes(image: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, length: float, thickness: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawFrameAxes(image: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, length: float, thickness: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawKeypoints(image: cv2.typing.MatLike, keypoints: _typing.Sequence[KeyPoint], outImage: cv2.typing.MatLike, color: cv2.typing.Scalar = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawKeypoints(image: UMat, keypoints: _typing.Sequence[KeyPoint], outImage: UMat, color: cv2.typing.Scalar = ..., flags: DrawMatchesFlags = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawMarker(img: cv2.typing.MatLike, position: cv2.typing.Point, color: cv2.typing.Scalar, markerType: int = ..., markerSize: int = ..., thickness: int = ..., line_type: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawMarker(img: UMat, position: cv2.typing.Point, color: cv2.typing.Scalar, markerType: int = ..., markerSize: int = ..., thickness: int = ..., line_type: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawMatches(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: cv2.typing.MatLike, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawMatches(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: UMat, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... |
| @_typing.overload |
| def drawMatches(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: cv2.typing.MatLike, matchesThickness: int, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawMatches(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[DMatch], outImg: UMat, matchesThickness: int, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[str] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def drawMatchesKnn(img1: cv2.typing.MatLike, keypoints1: _typing.Sequence[KeyPoint], img2: cv2.typing.MatLike, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[_typing.Sequence[DMatch]], outImg: cv2.typing.MatLike, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[_typing.Sequence[str]] = ..., flags: DrawMatchesFlags = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def drawMatchesKnn(img1: UMat, keypoints1: _typing.Sequence[KeyPoint], img2: UMat, keypoints2: _typing.Sequence[KeyPoint], matches1to2: _typing.Sequence[_typing.Sequence[DMatch]], outImg: UMat, matchColor: cv2.typing.Scalar = ..., singlePointColor: cv2.typing.Scalar = ..., matchesMask: _typing.Sequence[_typing.Sequence[str]] = ..., flags: DrawMatchesFlags = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def edgePreservingFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def edgePreservingFilter(src: UMat, dst: UMat | None = ..., flags: int = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def eigen(src: cv2.typing.MatLike, eigenvalues: cv2.typing.MatLike | None = ..., eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def eigen(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def eigenNonSymmetric(src: cv2.typing.MatLike, eigenvalues: cv2.typing.MatLike | None = ..., eigenvectors: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def eigenNonSymmetric(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def ellipse(img: cv2.typing.MatLike, center: cv2.typing.Point, axes: cv2.typing.Size, angle: float, startAngle: float, endAngle: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def ellipse(img: UMat, center: cv2.typing.Point, axes: cv2.typing.Size, angle: float, startAngle: float, endAngle: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
| @_typing.overload |
| def ellipse(img: cv2.typing.MatLike, box: cv2.typing.RotatedRect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def ellipse(img: UMat, box: cv2.typing.RotatedRect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ...) -> UMat: ... |
|
|
| def ellipse2Poly(center: cv2.typing.Point, axes: cv2.typing.Size, angle: int, arcStart: int, arcEnd: int, delta: int) -> _typing.Sequence[cv2.typing.Point]: ... |
|
|
| def empty_array_desc() -> GArrayDesc: ... |
|
|
| def empty_gopaque_desc() -> GOpaqueDesc: ... |
|
|
| def empty_scalar_desc() -> GScalarDesc: ... |
|
|
| @_typing.overload |
| def equalizeHist(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def equalizeHist(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def erode(src: cv2.typing.MatLike, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def erode(src: UMat, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def estimateAffine2D(from_: cv2.typing.MatLike, to: cv2.typing.MatLike, inliers: cv2.typing.MatLike | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateAffine2D(from_: UMat, to: UMat, inliers: UMat | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def estimateAffine2D(pts1: cv2.typing.MatLike, pts2: cv2.typing.MatLike, params: UsacParams, inliers: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateAffine2D(pts1: UMat, pts2: UMat, params: UsacParams, inliers: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
|
|
| @_typing.overload |
| def estimateAffine3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateAffine3D(src: UMat, dst: UMat, out: UMat | None = ..., inliers: UMat | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, UMat, UMat]: ... |
| @_typing.overload |
| def estimateAffine3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... |
| @_typing.overload |
| def estimateAffine3D(src: UMat, dst: UMat, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... |
|
|
| @_typing.overload |
| def estimateAffinePartial2D(from_: cv2.typing.MatLike, to: cv2.typing.MatLike, inliers: cv2.typing.MatLike | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateAffinePartial2D(from_: UMat, to: UMat, inliers: UMat | None = ..., method: int = ..., ransacReprojThreshold: float = ..., maxIters: int = ..., confidence: float = ..., refineIters: int = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
|
|
| @_typing.overload |
| def estimateChessboardSharpness(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike, rise_distance: float = ..., vertical: bool = ..., sharpness: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Scalar, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateChessboardSharpness(image: UMat, patternSize: cv2.typing.Size, corners: UMat, rise_distance: float = ..., vertical: bool = ..., sharpness: UMat | None = ...) -> tuple[cv2.typing.Scalar, UMat]: ... |
|
|
| @_typing.overload |
| def estimateTranslation3D(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def estimateTranslation3D(src: UMat, dst: UMat, out: UMat | None = ..., inliers: UMat | None = ..., ransacThreshold: float = ..., confidence: float = ...) -> tuple[int, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def exp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def exp(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def extractChannel(src: cv2.typing.MatLike, coi: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def extractChannel(src: UMat, coi: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| def fastAtan2(y: float, x: float) -> float: ... |
|
|
| @_typing.overload |
| def fastNlMeansDenoising(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoising(src: UMat, dst: UMat | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... |
| @_typing.overload |
| def fastNlMeansDenoising(src: cv2.typing.MatLike, h: _typing.Sequence[float], dst: cv2.typing.MatLike | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoising(src: UMat, h: _typing.Sequence[float], dst: UMat | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def fastNlMeansDenoisingColored(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoisingColored(src: UMat, dst: UMat | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def fastNlMeansDenoisingColoredMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, dst: cv2.typing.MatLike | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoisingColoredMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, dst: UMat | None = ..., h: float = ..., hColor: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, dst: cv2.typing.MatLike | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, dst: UMat | None = ..., h: float = ..., templateWindowSize: int = ..., searchWindowSize: int = ...) -> UMat: ... |
| @_typing.overload |
| def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[cv2.typing.MatLike], imgToDenoiseIndex: int, temporalWindowSize: int, h: _typing.Sequence[float], dst: cv2.typing.MatLike | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fastNlMeansDenoisingMulti(srcImgs: _typing.Sequence[UMat], imgToDenoiseIndex: int, temporalWindowSize: int, h: _typing.Sequence[float], dst: UMat | None = ..., templateWindowSize: int = ..., searchWindowSize: int = ..., normType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def fillConvexPoly(img: cv2.typing.MatLike, points: cv2.typing.MatLike, color: cv2.typing.Scalar, lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fillConvexPoly(img: UMat, points: UMat, color: cv2.typing.Scalar, lineType: int = ..., shift: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def fillPoly(img: cv2.typing.MatLike, pts: _typing.Sequence[cv2.typing.MatLike], color: cv2.typing.Scalar, lineType: int = ..., shift: int = ..., offset: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fillPoly(img: UMat, pts: _typing.Sequence[UMat], color: cv2.typing.Scalar, lineType: int = ..., shift: int = ..., offset: cv2.typing.Point = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def filter2D(src: cv2.typing.MatLike, ddepth: int, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def filter2D(src: UMat, ddepth: int, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def filterHomographyDecompByVisibleRefpoints(rotations: _typing.Sequence[cv2.typing.MatLike], normals: _typing.Sequence[cv2.typing.MatLike], beforePoints: cv2.typing.MatLike, afterPoints: cv2.typing.MatLike, possibleSolutions: cv2.typing.MatLike | None = ..., pointsMask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def filterHomographyDecompByVisibleRefpoints(rotations: _typing.Sequence[UMat], normals: _typing.Sequence[UMat], beforePoints: UMat, afterPoints: UMat, possibleSolutions: UMat | None = ..., pointsMask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def filterSpeckles(img: cv2.typing.MatLike, newVal: float, maxSpeckleSize: int, maxDiff: float, buf: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def filterSpeckles(img: UMat, newVal: float, maxSpeckleSize: int, maxDiff: float, buf: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def find4QuadCornerSubpix(img: cv2.typing.MatLike, corners: cv2.typing.MatLike, region_size: cv2.typing.Size) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def find4QuadCornerSubpix(img: UMat, corners: UMat, region_size: cv2.typing.Size) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def findChessboardCorners(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def findChessboardCornersSB(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, corners: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findChessboardCornersSB(image: UMat, patternSize: cv2.typing.Size, corners: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def findChessboardCornersSBWithMeta(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, flags: int, corners: cv2.typing.MatLike | None = ..., meta: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findChessboardCornersSBWithMeta(image: UMat, patternSize: cv2.typing.Size, flags: int, corners: UMat | None = ..., meta: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def findCirclesGrid(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, flags: int, blobDetector: cv2.typing.FeatureDetector, parameters: CirclesGridFinderParameters, centers: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findCirclesGrid(image: UMat, patternSize: cv2.typing.Size, flags: int, blobDetector: cv2.typing.FeatureDetector, parameters: CirclesGridFinderParameters, centers: UMat | None = ...) -> tuple[bool, UMat]: ... |
| @_typing.overload |
| def findCirclesGrid(image: cv2.typing.MatLike, patternSize: cv2.typing.Size, centers: cv2.typing.MatLike | None = ..., flags: int = ..., blobDetector: cv2.typing.FeatureDetector = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findCirclesGrid(image: UMat, patternSize: cv2.typing.Size, centers: UMat | None = ..., flags: int = ..., blobDetector: cv2.typing.FeatureDetector = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def findContours(image: cv2.typing.MatLike, mode: int, method: int, contours: _typing.Sequence[cv2.typing.MatLike] | None = ..., hierarchy: cv2.typing.MatLike | None = ..., offset: cv2.typing.Point = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findContours(image: UMat, mode: int, method: int, contours: _typing.Sequence[UMat] | None = ..., hierarchy: UMat | None = ..., offset: cv2.typing.Point = ...) -> tuple[_typing.Sequence[UMat], UMat]: ... |
|
|
| @_typing.overload |
| def findContoursLinkRuns(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike] | None = ..., hierarchy: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findContoursLinkRuns(image: UMat, contours: _typing.Sequence[UMat] | None = ..., hierarchy: UMat | None = ...) -> tuple[_typing.Sequence[UMat], UMat]: ... |
| @_typing.overload |
| def findContoursLinkRuns(image: cv2.typing.MatLike, contours: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findContoursLinkRuns(image: UMat, contours: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... |
|
|
| @_typing.overload |
| def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix: UMat, method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, focal: float = ..., pp: cv2.typing.Point2d = ..., method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findEssentialMat(points1: UMat, points2: UMat, focal: float = ..., pp: cv2.typing.Point2d = ..., method: int = ..., prob: float = ..., threshold: float = ..., maxIters: int = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, method: int = ..., prob: float = ..., threshold: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, method: int = ..., prob: float = ..., threshold: float = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findEssentialMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, dist_coeff1: cv2.typing.MatLike, dist_coeff2: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findEssentialMat(points1: UMat, points2: UMat, cameraMatrix1: UMat, cameraMatrix2: UMat, dist_coeff1: UMat, dist_coeff2: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
|
|
| @_typing.overload |
| def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, method: int, ransacReprojThreshold: float, confidence: float, maxIters: int, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findFundamentalMat(points1: UMat, points2: UMat, method: int, ransacReprojThreshold: float, confidence: float, maxIters: int, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, method: int = ..., ransacReprojThreshold: float = ..., confidence: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findFundamentalMat(points1: UMat, points2: UMat, method: int = ..., ransacReprojThreshold: float = ..., confidence: float = ..., mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findFundamentalMat(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findFundamentalMat(points1: UMat, points2: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
|
|
| @_typing.overload |
| def findHomography(srcPoints: cv2.typing.MatLike, dstPoints: cv2.typing.MatLike, method: int = ..., ransacReprojThreshold: float = ..., mask: cv2.typing.MatLike | None = ..., maxIters: int = ..., confidence: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findHomography(srcPoints: UMat, dstPoints: UMat, method: int = ..., ransacReprojThreshold: float = ..., mask: UMat | None = ..., maxIters: int = ..., confidence: float = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
| @_typing.overload |
| def findHomography(srcPoints: cv2.typing.MatLike, dstPoints: cv2.typing.MatLike, params: UsacParams, mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findHomography(srcPoints: UMat, dstPoints: UMat, params: UsacParams, mask: UMat | None = ...) -> tuple[cv2.typing.MatLike, UMat]: ... |
|
|
| @_typing.overload |
| def findNonZero(src: cv2.typing.MatLike, idx: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def findNonZero(src: UMat, idx: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def findTransformECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, warpMatrix: cv2.typing.MatLike, motionType: int, criteria: cv2.typing.TermCriteria, inputMask: cv2.typing.MatLike, gaussFiltSize: int) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findTransformECC(templateImage: UMat, inputImage: UMat, warpMatrix: UMat, motionType: int, criteria: cv2.typing.TermCriteria, inputMask: UMat, gaussFiltSize: int) -> tuple[float, UMat]: ... |
| @_typing.overload |
| def findTransformECC(templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, warpMatrix: cv2.typing.MatLike, motionType: int = ..., criteria: cv2.typing.TermCriteria = ..., inputMask: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def findTransformECC(templateImage: UMat, inputImage: UMat, warpMatrix: UMat, motionType: int = ..., criteria: cv2.typing.TermCriteria = ..., inputMask: UMat | None = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def fitEllipse(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... |
| @_typing.overload |
| def fitEllipse(points: UMat) -> cv2.typing.RotatedRect: ... |
|
|
| @_typing.overload |
| def fitEllipseAMS(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... |
| @_typing.overload |
| def fitEllipseAMS(points: UMat) -> cv2.typing.RotatedRect: ... |
|
|
| @_typing.overload |
| def fitEllipseDirect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... |
| @_typing.overload |
| def fitEllipseDirect(points: UMat) -> cv2.typing.RotatedRect: ... |
|
|
| @_typing.overload |
| def fitLine(points: cv2.typing.MatLike, distType: int, param: float, reps: float, aeps: float, line: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def fitLine(points: UMat, distType: int, param: float, reps: float, aeps: float, line: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def flip(src: cv2.typing.MatLike, flipCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def flip(src: UMat, flipCode: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def flipND(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def flipND(src: UMat, axis: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def floodFill(image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None, seedPoint: cv2.typing.Point, newVal: cv2.typing.Scalar, loDiff: cv2.typing.Scalar = ..., upDiff: cv2.typing.Scalar = ..., flags: int = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect]: ... |
| @_typing.overload |
| def floodFill(image: UMat, mask: UMat | None, seedPoint: cv2.typing.Point, newVal: cv2.typing.Scalar, loDiff: cv2.typing.Scalar = ..., upDiff: cv2.typing.Scalar = ..., flags: int = ...) -> tuple[int, UMat, UMat, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def gemm(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, alpha: float, src3: cv2.typing.MatLike, beta: float, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def gemm(src1: UMat, src2: UMat, alpha: float, src3: UMat, beta: float, dst: UMat | None = ..., flags: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def getAffineTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def getAffineTransform(src: UMat, dst: UMat) -> cv2.typing.MatLike: ... |
|
|
| def getBuildInformation() -> str: ... |
|
|
| def getCPUFeaturesLine() -> str: ... |
|
|
| def getCPUTickCount() -> int: ... |
|
|
| def getDefaultAlgorithmHint() -> AlgorithmHint: ... |
|
|
| @_typing.overload |
| def getDefaultNewCameraMatrix(cameraMatrix: cv2.typing.MatLike, imgsize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def getDefaultNewCameraMatrix(cameraMatrix: UMat, imgsize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> cv2.typing.MatLike: ... |
|
|
| @_typing.overload |
| def getDerivKernels(dx: int, dy: int, ksize: int, kx: cv2.typing.MatLike | None = ..., ky: cv2.typing.MatLike | None = ..., normalize: bool = ..., ktype: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def getDerivKernels(dx: int, dy: int, ksize: int, kx: UMat | None = ..., ky: UMat | None = ..., normalize: bool = ..., ktype: int = ...) -> tuple[UMat, UMat]: ... |
|
|
| def getFontScaleFromHeight(fontFace: int, pixelHeight: int, thickness: int = ...) -> float: ... |
|
|
| def getGaborKernel(ksize: cv2.typing.Size, sigma: float, theta: float, lambd: float, gamma: float, psi: float = ..., ktype: int = ...) -> cv2.typing.MatLike: ... |
|
|
| def getGaussianKernel(ksize: int, sigma: float, ktype: int = ...) -> cv2.typing.MatLike: ... |
|
|
| def getHardwareFeatureName(feature: int) -> str: ... |
|
|
| def getLogLevel() -> int: ... |
|
|
| def getNumThreads() -> int: ... |
|
|
| def getNumberOfCPUs() -> int: ... |
|
|
| def getOptimalDFTSize(vecsize: int) -> int: ... |
|
|
| @_typing.overload |
| def getOptimalNewCameraMatrix(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, imageSize: cv2.typing.Size, alpha: float, newImgSize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.Rect]: ... |
| @_typing.overload |
| def getOptimalNewCameraMatrix(cameraMatrix: UMat, distCoeffs: UMat, imageSize: cv2.typing.Size, alpha: float, newImgSize: cv2.typing.Size = ..., centerPrincipalPoint: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def getPerspectiveTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, solveMethod: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def getPerspectiveTransform(src: UMat, dst: UMat, solveMethod: int = ...) -> cv2.typing.MatLike: ... |
|
|
| @_typing.overload |
| def getRectSubPix(image: cv2.typing.MatLike, patchSize: cv2.typing.Size, center: cv2.typing.Point2f, patch: cv2.typing.MatLike | None = ..., patchType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def getRectSubPix(image: UMat, patchSize: cv2.typing.Size, center: cv2.typing.Point2f, patch: UMat | None = ..., patchType: int = ...) -> UMat: ... |
|
|
| def getRotationMatrix2D(center: cv2.typing.Point2f, angle: float, scale: float) -> cv2.typing.MatLike: ... |
|
|
| def getStructuringElement(shape: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... |
|
|
| def getTextSize(text: str, fontFace: int, fontScale: float, thickness: int) -> tuple[cv2.typing.Size, int]: ... |
|
|
| def getThreadNum() -> int: ... |
|
|
| def getTickCount() -> int: ... |
|
|
| def getTickFrequency() -> float: ... |
|
|
| def getTrackbarPos(trackbarname: str, winname: str) -> int: ... |
|
|
| def getValidDisparityROI(roi1: cv2.typing.Rect, roi2: cv2.typing.Rect, minDisparity: int, numberOfDisparities: int, blockSize: int) -> cv2.typing.Rect: ... |
|
|
| def getVersionMajor() -> int: ... |
|
|
| def getVersionMinor() -> int: ... |
|
|
| def getVersionRevision() -> int: ... |
|
|
| def getVersionString() -> str: ... |
|
|
| def getWindowImageRect(winname: str) -> cv2.typing.Rect: ... |
|
|
| def getWindowProperty(winname: str, prop_id: int) -> float: ... |
|
|
| @_typing.overload |
| def goodFeaturesToTrack(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, corners: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def goodFeaturesToTrack(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, corners: UMat | None = ..., mask: UMat | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> UMat: ... |
| @_typing.overload |
| def goodFeaturesToTrack(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike, blockSize: int, gradientSize: int, corners: cv2.typing.MatLike | None = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def goodFeaturesToTrack(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: UMat, blockSize: int, gradientSize: int, corners: UMat | None = ..., useHarrisDetector: bool = ..., k: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def goodFeaturesToTrackWithQuality(image: cv2.typing.MatLike, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike, corners: cv2.typing.MatLike | None = ..., cornersQuality: cv2.typing.MatLike | None = ..., blockSize: int = ..., gradientSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def goodFeaturesToTrackWithQuality(image: UMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: UMat, corners: UMat | None = ..., cornersQuality: UMat | None = ..., blockSize: int = ..., gradientSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def grabCut(img: cv2.typing.MatLike, mask: cv2.typing.MatLike, rect: cv2.typing.Rect, bgdModel: cv2.typing.MatLike, fgdModel: cv2.typing.MatLike, iterCount: int, mode: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def grabCut(img: UMat, mask: UMat, rect: cv2.typing.Rect, bgdModel: UMat, fgdModel: UMat, iterCount: int, mode: int = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| def groupRectangles(rectList: _typing.Sequence[cv2.typing.Rect], groupThreshold: int, eps: float = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[int]]: ... |
|
|
| @_typing.overload |
| def hasNonZero(src: cv2.typing.MatLike) -> bool: ... |
| @_typing.overload |
| def hasNonZero(src: UMat) -> bool: ... |
|
|
| def haveImageReader(filename: str) -> bool: ... |
|
|
| def haveImageWriter(filename: str) -> bool: ... |
|
|
| def haveOpenVX() -> bool: ... |
|
|
| @_typing.overload |
| def hconcat(src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def hconcat(src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def idct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def idct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def idft(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ..., nonzeroRows: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def idft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def illuminationChange(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., alpha: float = ..., beta: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def illuminationChange(src: UMat, mask: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... |
|
|
| def imcount(filename: str, flags: int = ...) -> int: ... |
|
|
| @_typing.overload |
| def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ... |
|
|
| @_typing.overload |
| def imdecodemulti(buf: cv2.typing.MatLike, flags: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., range: cv2.typing.Range = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def imdecodemulti(buf: UMat, flags: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., range: cv2.typing.Range = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... |
|
|
| @_typing.overload |
| def imencode(ext: str, img: cv2.typing.MatLike, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... |
| @_typing.overload |
| def imencode(ext: str, img: UMat, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... |
|
|
| @_typing.overload |
| def imencodemulti(ext: str, imgs: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... |
| @_typing.overload |
| def imencodemulti(ext: str, imgs: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ... |
|
|
| @_typing.overload |
| def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def imread(filename: str, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def imread(filename: str, dst: UMat | None = ..., flags: int = ...) -> UMat: ... |
|
|
| def imreadanimation(filename: str, start: int = ..., count: int = ...) -> tuple[bool, Animation]: ... |
|
|
| @_typing.overload |
| def imreadmulti(filename: str, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def imreadmulti(filename: str, start: int, count: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ... |
|
|
| @_typing.overload |
| def imshow(winname: str, mat: cv2.typing.MatLike) -> None: ... |
| @_typing.overload |
| def imshow(winname: str, mat: cv2.cuda.GpuMat) -> None: ... |
| @_typing.overload |
| def imshow(winname: str, mat: UMat) -> None: ... |
|
|
| @_typing.overload |
| def imwrite(filename: str, img: cv2.typing.MatLike, params: _typing.Sequence[int] = ...) -> bool: ... |
| @_typing.overload |
| def imwrite(filename: str, img: UMat, params: _typing.Sequence[int] = ...) -> bool: ... |
|
|
| def imwriteanimation(filename: str, animation: Animation, params: _typing.Sequence[int] = ...) -> bool: ... |
|
|
| @_typing.overload |
| def imwritemulti(filename: str, img: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> bool: ... |
| @_typing.overload |
| def imwritemulti(filename: str, img: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> bool: ... |
|
|
| @_typing.overload |
| def inRange(src: cv2.typing.MatLike, lowerb: cv2.typing.MatLike, upperb: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def inRange(src: UMat, lowerb: UMat, upperb: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def initCameraMatrix2D(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, aspectRatio: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def initCameraMatrix2D(objectPoints: _typing.Sequence[UMat], imagePoints: _typing.Sequence[UMat], imageSize: cv2.typing.Size, aspectRatio: float = ...) -> cv2.typing.MatLike: ... |
|
|
| @_typing.overload |
| def initInverseRectificationMap(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, newCameraMatrix: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def initInverseRectificationMap(cameraMatrix: UMat, distCoeffs: UMat, R: UMat, newCameraMatrix: UMat, size: cv2.typing.Size, m1type: int, map1: UMat | None = ..., map2: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def initUndistortRectifyMap(cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, newCameraMatrix: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def initUndistortRectifyMap(cameraMatrix: UMat, distCoeffs: UMat, R: UMat, newCameraMatrix: UMat, size: cv2.typing.Size, m1type: int, map1: UMat | None = ..., map2: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def inpaint(src: cv2.typing.MatLike, inpaintMask: cv2.typing.MatLike, inpaintRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def inpaint(src: UMat, inpaintMask: UMat, inpaintRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def insertChannel(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, coi: int) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def insertChannel(src: UMat, dst: UMat, coi: int) -> UMat: ... |
|
|
| @_typing.overload |
| def integral(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sdepth: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def integral(src: UMat, sum: UMat | None = ..., sdepth: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def integral2(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sqsum: cv2.typing.MatLike | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def integral2(src: UMat, sum: UMat | None = ..., sqsum: UMat | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def integral3(src: cv2.typing.MatLike, sum: cv2.typing.MatLike | None = ..., sqsum: cv2.typing.MatLike | None = ..., tilted: cv2.typing.MatLike | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def integral3(src: UMat, sum: UMat | None = ..., sqsum: UMat | None = ..., tilted: UMat | None = ..., sdepth: int = ..., sqdepth: int = ...) -> tuple[UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def intersectConvexConvex(p1: cv2.typing.MatLike, p2: cv2.typing.MatLike, p12: cv2.typing.MatLike | None = ..., handleNested: bool = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def intersectConvexConvex(p1: UMat, p2: UMat, p12: UMat | None = ..., handleNested: bool = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def invert(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def invert(src: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def invertAffineTransform(M: cv2.typing.MatLike, iM: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def invertAffineTransform(M: UMat, iM: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def isContourConvex(contour: cv2.typing.MatLike) -> bool: ... |
| @_typing.overload |
| def isContourConvex(contour: UMat) -> bool: ... |
|
|
| @_typing.overload |
| def kmeans(data: cv2.typing.MatLike, K: int, bestLabels: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria, attempts: int, flags: int, centers: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def kmeans(data: UMat, K: int, bestLabels: UMat, criteria: cv2.typing.TermCriteria, attempts: int, flags: int, centers: UMat | None = ...) -> tuple[float, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def line(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def line(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def linearPolar(src: cv2.typing.MatLike, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def linearPolar(src: UMat, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def log(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def log(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def logPolar(src: cv2.typing.MatLike, center: cv2.typing.Point2f, M: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def logPolar(src: UMat, center: cv2.typing.Point2f, M: float, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def magnitude(x: cv2.typing.MatLike, y: cv2.typing.MatLike, magnitude: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def magnitude(x: UMat, y: UMat, magnitude: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def matMulDeriv(A: cv2.typing.MatLike, B: cv2.typing.MatLike, dABdA: cv2.typing.MatLike | None = ..., dABdB: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def matMulDeriv(A: UMat, B: UMat, dABdA: UMat | None = ..., dABdB: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def matchShapes(contour1: cv2.typing.MatLike, contour2: cv2.typing.MatLike, method: int, parameter: float) -> float: ... |
| @_typing.overload |
| def matchShapes(contour1: UMat, contour2: UMat, method: int, parameter: float) -> float: ... |
|
|
| @_typing.overload |
| def matchTemplate(image: cv2.typing.MatLike, templ: cv2.typing.MatLike, method: int, result: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def matchTemplate(image: UMat, templ: UMat, method: int, result: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def max(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def max(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def mean(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.Scalar: ... |
| @_typing.overload |
| def mean(src: UMat, mask: UMat | None = ...) -> cv2.typing.Scalar: ... |
|
|
| @_typing.overload |
| def meanShift(probImage: cv2.typing.MatLike, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[int, cv2.typing.Rect]: ... |
| @_typing.overload |
| def meanShift(probImage: UMat, window: cv2.typing.Rect, criteria: cv2.typing.TermCriteria) -> tuple[int, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def meanStdDev(src: cv2.typing.MatLike, mean: cv2.typing.MatLike | None = ..., stddev: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def meanStdDev(src: UMat, mean: UMat | None = ..., stddev: UMat | None = ..., mask: UMat | None = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def medianBlur(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def medianBlur(src: UMat, ksize: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def merge(mv: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def merge(mv: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def min(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def min(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def minAreaRect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... |
| @_typing.overload |
| def minAreaRect(points: UMat) -> cv2.typing.RotatedRect: ... |
|
|
| @_typing.overload |
| def minEnclosingCircle(points: cv2.typing.MatLike) -> tuple[cv2.typing.Point2f, float]: ... |
| @_typing.overload |
| def minEnclosingCircle(points: UMat) -> tuple[cv2.typing.Point2f, float]: ... |
|
|
| @_typing.overload |
| def minEnclosingTriangle(points: cv2.typing.MatLike, triangle: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def minEnclosingTriangle(points: UMat, triangle: UMat | None = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def minMaxLoc(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... |
| @_typing.overload |
| def minMaxLoc(src: UMat, mask: UMat | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... |
|
|
| @_typing.overload |
| def mixChannels(src: _typing.Sequence[cv2.typing.MatLike], dst: _typing.Sequence[cv2.typing.MatLike], fromTo: _typing.Sequence[int]) -> _typing.Sequence[cv2.typing.MatLike]: ... |
| @_typing.overload |
| def mixChannels(src: _typing.Sequence[UMat], dst: _typing.Sequence[UMat], fromTo: _typing.Sequence[int]) -> _typing.Sequence[UMat]: ... |
|
|
| @_typing.overload |
| def moments(array: cv2.typing.MatLike, binaryImage: bool = ...) -> cv2.typing.Moments: ... |
| @_typing.overload |
| def moments(array: UMat, binaryImage: bool = ...) -> cv2.typing.Moments: ... |
|
|
| @_typing.overload |
| def morphologyEx(src: cv2.typing.MatLike, op: int, kernel: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def morphologyEx(src: UMat, op: int, kernel: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| def moveWindow(winname: str, x: int, y: int) -> None: ... |
|
|
| @_typing.overload |
| def mulSpectrums(a: cv2.typing.MatLike, b: cv2.typing.MatLike, flags: int, c: cv2.typing.MatLike | None = ..., conjB: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def mulSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def mulTransposed(src: cv2.typing.MatLike, aTa: bool, dst: cv2.typing.MatLike | None = ..., delta: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def mulTransposed(src: UMat, aTa: bool, dst: UMat | None = ..., delta: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def multiply(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def multiply(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... |
|
|
| def namedWindow(winname: str, flags: int = ...) -> None: ... |
|
|
| @_typing.overload |
| def norm(src1: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... |
| @_typing.overload |
| def norm(src1: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... |
| @_typing.overload |
| def norm(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... |
| @_typing.overload |
| def norm(src1: UMat, src2: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... |
|
|
| @_typing.overload |
| def normalize(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float = ..., beta: float = ..., norm_type: int = ..., dtype: int = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def normalize(src: UMat, dst: UMat, alpha: float = ..., beta: float = ..., norm_type: int = ..., dtype: int = ..., mask: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def patchNaNs(a: cv2.typing.MatLike, val: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def patchNaNs(a: UMat, val: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def pencilSketch(src: cv2.typing.MatLike, dst1: cv2.typing.MatLike | None = ..., dst2: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ..., shade_factor: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def pencilSketch(src: UMat, dst1: UMat | None = ..., dst2: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ..., shade_factor: float = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def perspectiveTransform(src: cv2.typing.MatLike, m: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def perspectiveTransform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def phase(x: cv2.typing.MatLike, y: cv2.typing.MatLike, angle: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def phase(x: UMat, y: UMat, angle: UMat | None = ..., angleInDegrees: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def phaseCorrelate(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, window: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.Point2d, float]: ... |
| @_typing.overload |
| def phaseCorrelate(src1: UMat, src2: UMat, window: UMat | None = ...) -> tuple[cv2.typing.Point2d, float]: ... |
|
|
| @_typing.overload |
| def pointPolygonTest(contour: cv2.typing.MatLike, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... |
| @_typing.overload |
| def pointPolygonTest(contour: UMat, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... |
|
|
| @_typing.overload |
| def polarToCart(magnitude: cv2.typing.MatLike, angle: cv2.typing.MatLike, x: cv2.typing.MatLike | None = ..., y: cv2.typing.MatLike | None = ..., angleInDegrees: bool = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def polarToCart(magnitude: UMat, angle: UMat, x: UMat | None = ..., y: UMat | None = ..., angleInDegrees: bool = ...) -> tuple[UMat, UMat]: ... |
|
|
| def pollKey() -> int: ... |
|
|
| @_typing.overload |
| def polylines(img: cv2.typing.MatLike, pts: _typing.Sequence[cv2.typing.MatLike], isClosed: bool, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def polylines(img: UMat, pts: _typing.Sequence[UMat], isClosed: bool, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def pow(src: cv2.typing.MatLike, power: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def pow(src: UMat, power: float, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def preCornerDetect(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def preCornerDetect(src: UMat, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., jacobian: cv2.typing.MatLike | None = ..., aspectRatio: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def projectPoints(objectPoints: UMat, rvec: UMat, tvec: UMat, cameraMatrix: UMat, distCoeffs: UMat, imagePoints: UMat | None = ..., jacobian: UMat | None = ..., aspectRatio: float = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def putText(img: cv2.typing.MatLike, text: str, org: cv2.typing.Point, fontFace: int, fontScale: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., bottomLeftOrigin: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def putText(img: UMat, text: str, org: cv2.typing.Point, fontFace: int, fontScale: float, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., bottomLeftOrigin: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def pyrDown(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def pyrDown(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def pyrMeanShiftFiltering(src: cv2.typing.MatLike, sp: float, sr: float, dst: cv2.typing.MatLike | None = ..., maxLevel: int = ..., termcrit: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def pyrMeanShiftFiltering(src: UMat, sp: float, sr: float, dst: UMat | None = ..., maxLevel: int = ..., termcrit: cv2.typing.TermCriteria = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def pyrUp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def pyrUp(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def randShuffle(dst: cv2.typing.MatLike, iterFactor: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def randShuffle(dst: UMat, iterFactor: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def randn(dst: cv2.typing.MatLike, mean: cv2.typing.MatLike, stddev: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def randn(dst: UMat, mean: UMat, stddev: UMat) -> UMat: ... |
|
|
| @_typing.overload |
| def randu(dst: cv2.typing.MatLike, low: cv2.typing.MatLike, high: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def randu(dst: UMat, low: UMat, high: UMat) -> UMat: ... |
|
|
| def readOpticalFlow(path: str) -> cv2.typing.MatLike: ... |
|
|
| @_typing.overload |
| def recoverPose(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., method: int = ..., prob: float = ..., threshold: float = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def recoverPose(points1: UMat, points2: UMat, cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, E: UMat | None = ..., R: UMat | None = ..., t: UMat | None = ..., method: int = ..., prob: float = ..., threshold: float = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat, UMat]: ... |
| @_typing.overload |
| def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def recoverPose(E: UMat, points1: UMat, points2: UMat, cameraMatrix: UMat, R: UMat | None = ..., t: UMat | None = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... |
| @_typing.overload |
| def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., focal: float = ..., pp: cv2.typing.Point2d = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def recoverPose(E: UMat, points1: UMat, points2: UMat, R: UMat | None = ..., t: UMat | None = ..., focal: float = ..., pp: cv2.typing.Point2d = ..., mask: UMat | None = ...) -> tuple[int, UMat, UMat, UMat]: ... |
| @_typing.overload |
| def recoverPose(E: cv2.typing.MatLike, points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distanceThresh: float, R: cv2.typing.MatLike | None = ..., t: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., triangulatedPoints: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def recoverPose(E: UMat, points1: UMat, points2: UMat, cameraMatrix: UMat, distanceThresh: float, R: UMat | None = ..., t: UMat | None = ..., mask: UMat | None = ..., triangulatedPoints: UMat | None = ...) -> tuple[int, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def rectangle(img: cv2.typing.MatLike, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def rectangle(img: UMat, pt1: cv2.typing.Point, pt2: cv2.typing.Point, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
| @_typing.overload |
| def rectangle(img: cv2.typing.MatLike, rec: cv2.typing.Rect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def rectangle(img: UMat, rec: cv2.typing.Rect, color: cv2.typing.Scalar, thickness: int = ..., lineType: int = ..., shift: int = ...) -> UMat: ... |
|
|
| def rectangleIntersectionArea(a: cv2.typing.Rect2d, b: cv2.typing.Rect2d) -> float: ... |
|
|
| @_typing.overload |
| def rectify3Collinear(cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, cameraMatrix3: cv2.typing.MatLike, distCoeffs3: cv2.typing.MatLike, imgpt1: _typing.Sequence[cv2.typing.MatLike], imgpt3: _typing.Sequence[cv2.typing.MatLike], imageSize: cv2.typing.Size, R12: cv2.typing.MatLike, T12: cv2.typing.MatLike, R13: cv2.typing.MatLike, T13: cv2.typing.MatLike, alpha: float, newImgSize: cv2.typing.Size, flags: int, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., R3: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., P3: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect, cv2.typing.Rect]: ... |
| @_typing.overload |
| def rectify3Collinear(cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, cameraMatrix3: UMat, distCoeffs3: UMat, imgpt1: _typing.Sequence[UMat], imgpt3: _typing.Sequence[UMat], imageSize: cv2.typing.Size, R12: UMat, T12: UMat, R13: UMat, T13: UMat, alpha: float, newImgSize: cv2.typing.Size, flags: int, R1: UMat | None = ..., R2: UMat | None = ..., R3: UMat | None = ..., P1: UMat | None = ..., P2: UMat | None = ..., P3: UMat | None = ..., Q: UMat | None = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, cv2.typing.Rect, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def reduce(src: cv2.typing.MatLike, dim: int, rtype: int, dst: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def reduce(src: UMat, dim: int, rtype: int, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def reduceArgMax(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ..., lastIndex: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def reduceArgMax(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def reduceArgMin(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ..., lastIndex: bool = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def reduceArgMin(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def remap(src: cv2.typing.MatLike, map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, interpolation: int, dst: cv2.typing.MatLike | None = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def remap(src: UMat, map1: UMat, map2: UMat, interpolation: int, dst: UMat | None = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def repeat(src: cv2.typing.MatLike, ny: int, nx: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def repeat(src: UMat, ny: int, nx: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def reprojectImageTo3D(disparity: cv2.typing.MatLike, Q: cv2.typing.MatLike, _3dImage: cv2.typing.MatLike | None = ..., handleMissingValues: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def reprojectImageTo3D(disparity: UMat, Q: UMat, _3dImage: UMat | None = ..., handleMissingValues: bool = ..., ddepth: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def resize(src: cv2.typing.MatLike, dsize: cv2.typing.Size | None, dst: cv2.typing.MatLike | None = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def resize(src: UMat, dsize: cv2.typing.Size | None, dst: UMat | None = ..., fx: float = ..., fy: float = ..., interpolation: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def resizeWindow(winname: str, width: int, height: int) -> None: ... |
| @_typing.overload |
| def resizeWindow(winname: str, size: cv2.typing.Size) -> None: ... |
|
|
| @_typing.overload |
| def rotate(src: cv2.typing.MatLike, rotateCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def rotate(src: UMat, rotateCode: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def rotatedRectangleIntersection(rect1: cv2.typing.RotatedRect, rect2: cv2.typing.RotatedRect, intersectingRegion: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def rotatedRectangleIntersection(rect1: cv2.typing.RotatedRect, rect2: cv2.typing.RotatedRect, intersectingRegion: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
| @_typing.overload |
| def sampsonDistance(pt1: cv2.typing.MatLike, pt2: cv2.typing.MatLike, F: cv2.typing.MatLike) -> float: ... |
| @_typing.overload |
| def sampsonDistance(pt1: UMat, pt2: UMat, F: UMat) -> float: ... |
|
|
| @_typing.overload |
| def scaleAdd(src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def scaleAdd(src1: UMat, alpha: float, src2: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def seamlessClone(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike, p: cv2.typing.Point, flags: int, blend: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def seamlessClone(src: UMat, dst: UMat, mask: UMat, p: cv2.typing.Point, flags: int, blend: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def selectROI(windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... |
| @_typing.overload |
| def selectROI(windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... |
| @_typing.overload |
| def selectROI(img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... |
| @_typing.overload |
| def selectROI(img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> cv2.typing.Rect: ... |
|
|
| @_typing.overload |
| def selectROIs(windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> _typing.Sequence[cv2.typing.Rect]: ... |
| @_typing.overload |
| def selectROIs(windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., printNotice: bool = ...) -> _typing.Sequence[cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def sepFilter2D(src: cv2.typing.MatLike, ddepth: int, kernelX: cv2.typing.MatLike, kernelY: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def sepFilter2D(src: UMat, ddepth: int, kernelX: UMat, kernelY: UMat, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., delta: float = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def setIdentity(mtx: cv2.typing.MatLike, s: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def setIdentity(mtx: UMat, s: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| def setLogLevel(level: int) -> int: ... |
|
|
| def setNumThreads(nthreads: int) -> None: ... |
|
|
| def setRNGSeed(seed: int) -> None: ... |
|
|
| def setTrackbarMax(trackbarname: str, winname: str, maxval: int) -> None: ... |
|
|
| def setTrackbarMin(trackbarname: str, winname: str, minval: int) -> None: ... |
|
|
| def setTrackbarPos(trackbarname: str, winname: str, pos: int) -> None: ... |
|
|
| def setUseOpenVX(flag: bool) -> None: ... |
|
|
| def setUseOptimized(onoff: bool) -> None: ... |
|
|
| def setWindowProperty(winname: str, prop_id: int, prop_value: float) -> None: ... |
|
|
| def setWindowTitle(winname: str, title: str) -> None: ... |
|
|
| @_typing.overload |
| def solve(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solve(src1: UMat, src2: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... |
|
|
| @_typing.overload |
| def solveCubic(coeffs: cv2.typing.MatLike, roots: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
| @_typing.overload |
| def solveLP(Func: cv2.typing.MatLike, Constr: cv2.typing.MatLike, constr_eps: float, z: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solveLP(Func: UMat, Constr: UMat, constr_eps: float, z: UMat | None = ...) -> tuple[int, UMat]: ... |
| @_typing.overload |
| def solveLP(Func: cv2.typing.MatLike, Constr: cv2.typing.MatLike, z: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solveLP(Func: UMat, Constr: UMat, z: UMat | None = ...) -> tuple[int, UMat]: ... |
|
|
| @_typing.overload |
| def solveP3P(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, flags: int, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ... |
| @_typing.overload |
| def solveP3P(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, flags: int, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat]]: ... |
|
|
| @_typing.overload |
| def solvePnP(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnP(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., useExtrinsicGuess: bool = ..., flags: int = ...) -> tuple[bool, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def solvePnPGeneric(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., useExtrinsicGuess: bool = ..., flags: SolvePnPMethod = ..., rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., reprojectionError: cv2.typing.MatLike | None = ...) -> tuple[int, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnPGeneric(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., useExtrinsicGuess: bool = ..., flags: SolvePnPMethod = ..., rvec: UMat | None = ..., tvec: UMat | None = ..., reprojectionError: UMat | None = ...) -> tuple[int, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... |
|
|
| @_typing.overload |
| def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.typing.MatLike | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnPRansac(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat, UMat, UMat]: ... |
| @_typing.overload |
| def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., inliers: cv2.typing.MatLike | None = ..., params: UsacParams = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnPRansac(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat | None = ..., tvec: UMat | None = ..., inliers: UMat | None = ..., params: UsacParams = ...) -> tuple[bool, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def solvePnPRefineLM(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnPRefineLM(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, criteria: cv2.typing.TermCriteria = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def solvePnPRefineVVS(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria = ..., VVSlambda: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePnPRefineVVS(objectPoints: UMat, imagePoints: UMat, cameraMatrix: UMat, distCoeffs: UMat, rvec: UMat, tvec: UMat, criteria: cv2.typing.TermCriteria = ..., VVSlambda: float = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def solvePoly(coeffs: cv2.typing.MatLike, roots: cv2.typing.MatLike | None = ..., maxIters: int = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def solvePoly(coeffs: UMat, roots: UMat | None = ..., maxIters: int = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def sort(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def sort(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def sortIdx(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def sortIdx(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def spatialGradient(src: cv2.typing.MatLike, dx: cv2.typing.MatLike | None = ..., dy: cv2.typing.MatLike | None = ..., ksize: int = ..., borderType: int = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def spatialGradient(src: UMat, dx: UMat | None = ..., dy: UMat | None = ..., ksize: int = ..., borderType: int = ...) -> tuple[UMat, UMat]: ... |
|
|
| @_typing.overload |
| def split(m: cv2.typing.MatLike, mv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ... |
| @_typing.overload |
| def split(m: UMat, mv: _typing.Sequence[UMat] | None = ...) -> _typing.Sequence[UMat]: ... |
|
|
| @_typing.overload |
| def sqrBoxFilter(src: cv2.typing.MatLike, ddepth: int, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def sqrBoxFilter(src: UMat, ddepth: int, ksize: cv2.typing.Size, dst: UMat | None = ..., anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def sqrt(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def sqrt(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def stackBlur(src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def stackBlur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... |
|
|
| def startWindowThread() -> int: ... |
|
|
| @_typing.overload |
| def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stereoCalibrate(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat | None = ..., T: UMat | None = ..., E: UMat | None = ..., F: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... |
| @_typing.overload |
| def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stereoCalibrate(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, E: UMat | None = ..., F: UMat | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def stereoCalibrateExtended(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, E: cv2.typing.MatLike | None = ..., F: cv2.typing.MatLike | None = ..., rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stereoCalibrateExtended(objectPoints: _typing.Sequence[UMat], imagePoints1: _typing.Sequence[UMat], imagePoints2: _typing.Sequence[UMat], cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, E: UMat | None = ..., F: UMat | None = ..., rvecs: _typing.Sequence[UMat] | None = ..., tvecs: _typing.Sequence[UMat] | None = ..., perViewErrors: UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, UMat, UMat, UMat, UMat, UMat, UMat, UMat, UMat, _typing.Sequence[UMat], _typing.Sequence[UMat], UMat]: ... |
|
|
| @_typing.overload |
| def stereoRectify(cameraMatrix1: cv2.typing.MatLike, distCoeffs1: cv2.typing.MatLike, cameraMatrix2: cv2.typing.MatLike, distCoeffs2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, T: cv2.typing.MatLike, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ..., flags: int = ..., alpha: float = ..., newImageSize: cv2.typing.Size = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.Rect, cv2.typing.Rect]: ... |
| @_typing.overload |
| def stereoRectify(cameraMatrix1: UMat, distCoeffs1: UMat, cameraMatrix2: UMat, distCoeffs2: UMat, imageSize: cv2.typing.Size, R: UMat, T: UMat, R1: UMat | None = ..., R2: UMat | None = ..., P1: UMat | None = ..., P2: UMat | None = ..., Q: UMat | None = ..., flags: int = ..., alpha: float = ..., newImageSize: cv2.typing.Size = ...) -> tuple[UMat, UMat, UMat, UMat, UMat, cv2.typing.Rect, cv2.typing.Rect]: ... |
|
|
| @_typing.overload |
| def stereoRectifyUncalibrated(points1: cv2.typing.MatLike, points2: cv2.typing.MatLike, F: cv2.typing.MatLike, imgSize: cv2.typing.Size, H1: cv2.typing.MatLike | None = ..., H2: cv2.typing.MatLike | None = ..., threshold: float = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def stereoRectifyUncalibrated(points1: UMat, points2: UMat, F: UMat, imgSize: cv2.typing.Size, H1: UMat | None = ..., H2: UMat | None = ..., threshold: float = ...) -> tuple[bool, UMat, UMat]: ... |
|
|
| @_typing.overload |
| def stylization(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def stylization(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def subtract(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., dtype: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def subtract(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def sumElems(src: cv2.typing.MatLike) -> cv2.typing.Scalar: ... |
| @_typing.overload |
| def sumElems(src: UMat) -> cv2.typing.Scalar: ... |
|
|
| @_typing.overload |
| def textureFlattening(src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., low_threshold: float = ..., high_threshold: float = ..., kernel_size: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def textureFlattening(src: UMat, mask: UMat, dst: UMat | None = ..., low_threshold: float = ..., high_threshold: float = ..., kernel_size: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def threshold(src: cv2.typing.MatLike, thresh: float, maxval: float, type: int, dst: cv2.typing.MatLike | None = ...) -> tuple[float, cv2.typing.MatLike]: ... |
| @_typing.overload |
| def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ... |
|
|
| @_typing.overload |
| def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ... |
| @_typing.overload |
| def trace(mtx: UMat) -> cv2.typing.Scalar: ... |
|
|
| @_typing.overload |
| def transform(src: cv2.typing.MatLike, m: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def transform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def transpose(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def transpose(src: UMat, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def transposeND(src: cv2.typing.MatLike, order: _typing.Sequence[int], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def transposeND(src: UMat, order: _typing.Sequence[int], dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def triangulatePoints(projMatr1: cv2.typing.MatLike, projMatr2: cv2.typing.MatLike, projPoints1: cv2.typing.MatLike, projPoints2: cv2.typing.MatLike, points4D: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def triangulatePoints(projMatr1: UMat, projMatr2: UMat, projPoints1: UMat, projPoints2: UMat, points4D: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def undistort(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., newCameraMatrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def undistort(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., newCameraMatrix: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def undistortImagePoints(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., arg1: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def undistortImagePoints(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., arg1: cv2.typing.TermCriteria = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def undistortPoints(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., P: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def undistortPoints(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | None = ..., R: UMat | None = ..., P: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def undistortPointsIter(src: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, R: cv2.typing.MatLike, P: cv2.typing.MatLike, criteria: cv2.typing.TermCriteria, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def undistortPointsIter(src: UMat, cameraMatrix: UMat, distCoeffs: UMat, R: UMat, P: UMat, criteria: cv2.typing.TermCriteria, dst: UMat | None = ...) -> UMat: ... |
|
|
| def useOpenVX() -> bool: ... |
|
|
| def useOptimized() -> bool: ... |
|
|
| @_typing.overload |
| def validateDisparity(disparity: cv2.typing.MatLike, cost: cv2.typing.MatLike, minDisparity: int, numberOfDisparities: int, disp12MaxDisp: int = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def validateDisparity(disparity: UMat, cost: UMat, minDisparity: int, numberOfDisparities: int, disp12MaxDisp: int = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def vconcat(src: _typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def vconcat(src: _typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... |
|
|
| def waitKey(delay: int = ...) -> int: ... |
|
|
| def waitKeyEx(delay: int = ...) -> int: ... |
|
|
| @_typing.overload |
| def warpAffine(src: cv2.typing.MatLike, M: cv2.typing.MatLike, dsize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def warpAffine(src: UMat, M: UMat, dsize: cv2.typing.Size, dst: UMat | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def warpPerspective(src: cv2.typing.MatLike, M: cv2.typing.MatLike, dsize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def warpPerspective(src: UMat, M: UMat, dsize: cv2.typing.Size, dst: UMat | None = ..., flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def warpPolar(src: cv2.typing.MatLike, dsize: cv2.typing.Size, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def warpPolar(src: UMat, dsize: cv2.typing.Size, center: cv2.typing.Point2f, maxRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... |
|
|
| @_typing.overload |
| def watershed(image: cv2.typing.MatLike, markers: cv2.typing.MatLike) -> cv2.typing.MatLike: ... |
| @_typing.overload |
| def watershed(image: UMat, markers: UMat) -> UMat: ... |
|
|
| @_typing.overload |
| def writeOpticalFlow(path: str, flow: cv2.typing.MatLike) -> bool: ... |
| @_typing.overload |
| def writeOpticalFlow(path: str, flow: UMat) -> bool: ... |
|
|
| def createTrackbar(trackbarName: str, windowName: str, value: int, count: int, onChange: _typing.Callable[[int], None]) -> None: ... |
|
|
| def createButton(buttonName: str, onChange: _typing.Callable[[tuple[int] | tuple[int, _typing.Any]], None], userData: _typing.Any | None = ..., buttonType: int = ..., initialButtonState: int = ...) -> None: ... |
|
|
| def setMouseCallback(windowName: str, onMouse: _typing.Callable[[int, int, int, int, _typing.Any | None], None], param: _typing.Any | None = ...) -> None: ... |
|
|
| def CV_8UC(channels: int) -> int: ... |
|
|
| def CV_8SC(channels: int) -> int: ... |
|
|
| def CV_16UC(channels: int) -> int: ... |
|
|
| def CV_16SC(channels: int) -> int: ... |
|
|
| def CV_32SC(channels: int) -> int: ... |
|
|
| def CV_32FC(channels: int) -> int: ... |
|
|
| def CV_64FC(channels: int) -> int: ... |
|
|
| def CV_16FC(channels: int) -> int: ... |
|
|
| def CV_MAKETYPE(depth: int, channels: int) -> int: ... |
|
|
| def dnn_registerLayer(layerTypeName: str, layerClass: _typing.Type[cv2.dnn.LayerProtocol]) -> None: ... |
|
|
| def dnn_unregisterLayer(layerTypeName: str) -> None: ... |
|
|
| def redirectError(onError: _typing.Callable[[int, str, str, str, int], None] | None) -> None: ... |
|
|
|
|
|
|