ZTWHHH commited on
Commit
2a7efd1
·
verified ·
1 Parent(s): 1590f68

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_tf446/lib/python3.10/site-packages/cv2/Error/__init__.pyi +118 -0
  2. evalkit_tf446/lib/python3.10/site-packages/cv2/LICENSE-3RD-PARTY.txt +0 -0
  3. evalkit_tf446/lib/python3.10/site-packages/cv2/LICENSE.txt +21 -0
  4. evalkit_tf446/lib/python3.10/site-packages/cv2/__init__.py +181 -0
  5. evalkit_tf446/lib/python3.10/site-packages/cv2/__init__.pyi +0 -0
  6. evalkit_tf446/lib/python3.10/site-packages/cv2/aruco/__init__.pyi +303 -0
  7. evalkit_tf446/lib/python3.10/site-packages/cv2/config-3.py +24 -0
  8. evalkit_tf446/lib/python3.10/site-packages/cv2/config.py +5 -0
  9. evalkit_tf446/lib/python3.10/site-packages/cv2/detail/__init__.pyi +600 -0
  10. evalkit_tf446/lib/python3.10/site-packages/cv2/flann/__init__.pyi +64 -0
  11. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__init__.py +323 -0
  12. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__init__.pyi +349 -0
  13. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__pycache__/__init__.cpython-310.pyc +0 -0
  14. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/__init__.pyi +7 -0
  15. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/cpu/__init__.pyi +9 -0
  16. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/fluid/__init__.pyi +9 -0
  17. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/ocl/__init__.pyi +9 -0
  18. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ie/__init__.pyi +51 -0
  19. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ie/detail/__init__.pyi +12 -0
  20. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/imgproc/__init__.pyi +5 -0
  21. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  22. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/oak/__init__.pyi +37 -0
  23. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/onnx/__init__.pyi +51 -0
  24. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/onnx/ep/__init__.pyi +63 -0
  25. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ot/__init__.pyi +32 -0
  26. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ot/cpu/__init__.pyi +9 -0
  27. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ov/__init__.pyi +74 -0
  28. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/own/__init__.pyi +5 -0
  29. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/own/detail/__init__.pyi +10 -0
  30. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/render/__init__.pyi +5 -0
  31. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/render/ocv/__init__.pyi +9 -0
  32. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/streaming/__init__.pyi +42 -0
  33. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/video/__init__.pyi +10 -0
  34. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/__init__.pyi +41 -0
  35. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/draw/__init__.pyi +119 -0
  36. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/gst/__init__.pyi +17 -0
  37. evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  38. evalkit_tf446/lib/python3.10/site-packages/cv2/ipp/__init__.pyi +14 -0
  39. evalkit_tf446/lib/python3.10/site-packages/cv2/load_config_py2.py +6 -0
  40. evalkit_tf446/lib/python3.10/site-packages/cv2/load_config_py3.py +9 -0
  41. evalkit_tf446/lib/python3.10/site-packages/cv2/ogl/__init__.pyi +51 -0
  42. evalkit_tf446/lib/python3.10/site-packages/cv2/py.typed +0 -0
  43. evalkit_tf446/lib/python3.10/site-packages/cv2/qt/plugins/platforms/libqxcb.so +0 -0
  44. evalkit_tf446/lib/python3.10/site-packages/cv2/samples/__init__.pyi +12 -0
  45. evalkit_tf446/lib/python3.10/site-packages/cv2/segmentation/__init__.pyi +39 -0
  46. evalkit_tf446/lib/python3.10/site-packages/cv2/typing/__init__.py +178 -0
  47. evalkit_tf446/lib/python3.10/site-packages/cv2/typing/__pycache__/__init__.cpython-310.pyc +0 -0
  48. evalkit_tf446/lib/python3.10/site-packages/cv2/version.py +5 -0
  49. evalkit_tf446/lib/python3.10/site-packages/cycler/__init__.py +573 -0
  50. evalkit_tf446/lib/python3.10/site-packages/cycler/__pycache__/__init__.cpython-310.pyc +0 -0
evalkit_tf446/lib/python3.10/site-packages/cv2/Error/__init__.pyi ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ StsOk: int
5
+ STS_OK: int
6
+ StsBackTrace: int
7
+ STS_BACK_TRACE: int
8
+ StsError: int
9
+ STS_ERROR: int
10
+ StsInternal: int
11
+ STS_INTERNAL: int
12
+ StsNoMem: int
13
+ STS_NO_MEM: int
14
+ StsBadArg: int
15
+ STS_BAD_ARG: int
16
+ StsBadFunc: int
17
+ STS_BAD_FUNC: int
18
+ StsNoConv: int
19
+ STS_NO_CONV: int
20
+ StsAutoTrace: int
21
+ STS_AUTO_TRACE: int
22
+ HeaderIsNull: int
23
+ HEADER_IS_NULL: int
24
+ BadImageSize: int
25
+ BAD_IMAGE_SIZE: int
26
+ BadOffset: int
27
+ BAD_OFFSET: int
28
+ BadDataPtr: int
29
+ BAD_DATA_PTR: int
30
+ BadStep: int
31
+ BAD_STEP: int
32
+ BadModelOrChSeq: int
33
+ BAD_MODEL_OR_CH_SEQ: int
34
+ BadNumChannels: int
35
+ BAD_NUM_CHANNELS: int
36
+ BadNumChannel1U: int
37
+ BAD_NUM_CHANNEL1U: int
38
+ BadDepth: int
39
+ BAD_DEPTH: int
40
+ BadAlphaChannel: int
41
+ BAD_ALPHA_CHANNEL: int
42
+ BadOrder: int
43
+ BAD_ORDER: int
44
+ BadOrigin: int
45
+ BAD_ORIGIN: int
46
+ BadAlign: int
47
+ BAD_ALIGN: int
48
+ BadCallBack: int
49
+ BAD_CALL_BACK: int
50
+ BadTileSize: int
51
+ BAD_TILE_SIZE: int
52
+ BadCOI: int
53
+ BAD_COI: int
54
+ BadROISize: int
55
+ BAD_ROISIZE: int
56
+ MaskIsTiled: int
57
+ MASK_IS_TILED: int
58
+ StsNullPtr: int
59
+ STS_NULL_PTR: int
60
+ StsVecLengthErr: int
61
+ STS_VEC_LENGTH_ERR: int
62
+ StsFilterStructContentErr: int
63
+ STS_FILTER_STRUCT_CONTENT_ERR: int
64
+ StsKernelStructContentErr: int
65
+ STS_KERNEL_STRUCT_CONTENT_ERR: int
66
+ StsFilterOffsetErr: int
67
+ STS_FILTER_OFFSET_ERR: int
68
+ StsBadSize: int
69
+ STS_BAD_SIZE: int
70
+ StsDivByZero: int
71
+ STS_DIV_BY_ZERO: int
72
+ StsInplaceNotSupported: int
73
+ STS_INPLACE_NOT_SUPPORTED: int
74
+ StsObjectNotFound: int
75
+ STS_OBJECT_NOT_FOUND: int
76
+ StsUnmatchedFormats: int
77
+ STS_UNMATCHED_FORMATS: int
78
+ StsBadFlag: int
79
+ STS_BAD_FLAG: int
80
+ StsBadPoint: int
81
+ STS_BAD_POINT: int
82
+ StsBadMask: int
83
+ STS_BAD_MASK: int
84
+ StsUnmatchedSizes: int
85
+ STS_UNMATCHED_SIZES: int
86
+ StsUnsupportedFormat: int
87
+ STS_UNSUPPORTED_FORMAT: int
88
+ StsOutOfRange: int
89
+ STS_OUT_OF_RANGE: int
90
+ StsParseError: int
91
+ STS_PARSE_ERROR: int
92
+ StsNotImplemented: int
93
+ STS_NOT_IMPLEMENTED: int
94
+ StsBadMemBlock: int
95
+ STS_BAD_MEM_BLOCK: int
96
+ StsAssert: int
97
+ STS_ASSERT: int
98
+ GpuNotSupported: int
99
+ GPU_NOT_SUPPORTED: int
100
+ GpuApiCallError: int
101
+ GPU_API_CALL_ERROR: int
102
+ OpenGlNotSupported: int
103
+ OPEN_GL_NOT_SUPPORTED: int
104
+ OpenGlApiCallError: int
105
+ OPEN_GL_API_CALL_ERROR: int
106
+ OpenCLApiCallError: int
107
+ OPEN_CLAPI_CALL_ERROR: int
108
+ OpenCLDoubleNotSupported: int
109
+ OPEN_CLDOUBLE_NOT_SUPPORTED: int
110
+ OpenCLInitError: int
111
+ OPEN_CLINIT_ERROR: int
112
+ OpenCLNoAMDBlasFft: int
113
+ OPEN_CLNO_AMDBLAS_FFT: int
114
+ Code = int
115
+ """One of [StsOk, STS_OK, StsBackTrace, STS_BACK_TRACE, StsError, STS_ERROR, StsInternal, STS_INTERNAL, StsNoMem, STS_NO_MEM, StsBadArg, STS_BAD_ARG, StsBadFunc, STS_BAD_FUNC, StsNoConv, STS_NO_CONV, StsAutoTrace, STS_AUTO_TRACE, HeaderIsNull, HEADER_IS_NULL, BadImageSize, BAD_IMAGE_SIZE, BadOffset, BAD_OFFSET, BadDataPtr, BAD_DATA_PTR, BadStep, BAD_STEP, BadModelOrChSeq, BAD_MODEL_OR_CH_SEQ, BadNumChannels, BAD_NUM_CHANNELS, BadNumChannel1U, BAD_NUM_CHANNEL1U, BadDepth, BAD_DEPTH, BadAlphaChannel, BAD_ALPHA_CHANNEL, BadOrder, BAD_ORDER, BadOrigin, BAD_ORIGIN, BadAlign, BAD_ALIGN, BadCallBack, BAD_CALL_BACK, BadTileSize, BAD_TILE_SIZE, BadCOI, BAD_COI, BadROISize, BAD_ROISIZE, MaskIsTiled, MASK_IS_TILED, StsNullPtr, STS_NULL_PTR, StsVecLengthErr, STS_VEC_LENGTH_ERR, StsFilterStructContentErr, STS_FILTER_STRUCT_CONTENT_ERR, StsKernelStructContentErr, STS_KERNEL_STRUCT_CONTENT_ERR, StsFilterOffsetErr, STS_FILTER_OFFSET_ERR, StsBadSize, STS_BAD_SIZE, StsDivByZero, STS_DIV_BY_ZERO, StsInplaceNotSupported, STS_INPLACE_NOT_SUPPORTED, StsObjectNotFound, STS_OBJECT_NOT_FOUND, StsUnmatchedFormats, STS_UNMATCHED_FORMATS, StsBadFlag, STS_BAD_FLAG, StsBadPoint, STS_BAD_POINT, StsBadMask, STS_BAD_MASK, StsUnmatchedSizes, STS_UNMATCHED_SIZES, StsUnsupportedFormat, STS_UNSUPPORTED_FORMAT, StsOutOfRange, STS_OUT_OF_RANGE, StsParseError, STS_PARSE_ERROR, StsNotImplemented, STS_NOT_IMPLEMENTED, StsBadMemBlock, STS_BAD_MEM_BLOCK, StsAssert, STS_ASSERT, GpuNotSupported, GPU_NOT_SUPPORTED, GpuApiCallError, GPU_API_CALL_ERROR, OpenGlNotSupported, OPEN_GL_NOT_SUPPORTED, OpenGlApiCallError, OPEN_GL_API_CALL_ERROR, OpenCLApiCallError, OPEN_CLAPI_CALL_ERROR, OpenCLDoubleNotSupported, OPEN_CLDOUBLE_NOT_SUPPORTED, OpenCLInitError, OPEN_CLINIT_ERROR, OpenCLNoAMDBlasFft, OPEN_CLNO_AMDBLAS_FFT]"""
116
+
117
+
118
+
evalkit_tf446/lib/python3.10/site-packages/cv2/LICENSE-3RD-PARTY.txt ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf446/lib/python3.10/site-packages/cv2/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) Olli-Pekka Heinisuo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
evalkit_tf446/lib/python3.10/site-packages/cv2/__init__.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ OpenCV Python binary extension loader
3
+ '''
4
+ import os
5
+ import importlib
6
+ import sys
7
+
8
+ __all__ = []
9
+
10
+ try:
11
+ import numpy
12
+ import numpy.core.multiarray
13
+ except ImportError:
14
+ print('OpenCV bindings requires "numpy" package.')
15
+ print('Install it via command:')
16
+ print(' pip install numpy')
17
+ raise
18
+
19
+ # TODO
20
+ # is_x64 = sys.maxsize > 2**32
21
+
22
+
23
+ def __load_extra_py_code_for_module(base, name, enable_debug_print=False):
24
+ module_name = "{}.{}".format(__name__, name)
25
+ export_module_name = "{}.{}".format(base, name)
26
+ native_module = sys.modules.pop(module_name, None)
27
+ try:
28
+ py_module = importlib.import_module(module_name)
29
+ except ImportError as err:
30
+ if enable_debug_print:
31
+ print("Can't load Python code for module:", module_name,
32
+ ". Reason:", err)
33
+ # Extension doesn't contain extra py code
34
+ return False
35
+
36
+ if base in sys.modules and not hasattr(sys.modules[base], name):
37
+ setattr(sys.modules[base], name, py_module)
38
+ sys.modules[export_module_name] = py_module
39
+ # If it is C extension module it is already loaded by cv2 package
40
+ if native_module:
41
+ setattr(py_module, "_native", native_module)
42
+ for k, v in filter(lambda kv: not hasattr(py_module, kv[0]),
43
+ native_module.__dict__.items()):
44
+ if enable_debug_print: print(' symbol({}): {} = {}'.format(name, k, v))
45
+ setattr(py_module, k, v)
46
+ return True
47
+
48
+
49
+ def __collect_extra_submodules(enable_debug_print=False):
50
+ def modules_filter(module):
51
+ return all((
52
+ # module is not internal
53
+ not module.startswith("_"),
54
+ not module.startswith("python-"),
55
+ # it is not a file
56
+ os.path.isdir(os.path.join(_extra_submodules_init_path, module))
57
+ ))
58
+ if sys.version_info[0] < 3:
59
+ if enable_debug_print:
60
+ print("Extra submodules is loaded only for Python 3")
61
+ return []
62
+
63
+ __INIT_FILE_PATH = os.path.abspath(__file__)
64
+ _extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH)
65
+ return filter(modules_filter, os.listdir(_extra_submodules_init_path))
66
+
67
+
68
+ def bootstrap():
69
+ import sys
70
+
71
+ import copy
72
+ save_sys_path = copy.copy(sys.path)
73
+
74
+ if hasattr(sys, 'OpenCV_LOADER'):
75
+ print(sys.path)
76
+ raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.')
77
+ sys.OpenCV_LOADER = True
78
+
79
+ DEBUG = False
80
+ if hasattr(sys, 'OpenCV_LOADER_DEBUG'):
81
+ DEBUG = True
82
+
83
+ import platform
84
+ if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system())))
85
+
86
+ LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
87
+
88
+ PYTHON_EXTENSIONS_PATHS = []
89
+ BINARIES_PATHS = []
90
+
91
+ g_vars = globals()
92
+ l_vars = locals().copy()
93
+
94
+ if sys.version_info[:2] < (3, 0):
95
+ from . load_config_py2 import exec_file_wrapper
96
+ else:
97
+ from . load_config_py3 import exec_file_wrapper
98
+
99
+ def load_first_config(fnames, required=True):
100
+ for fname in fnames:
101
+ fpath = os.path.join(LOADER_DIR, fname)
102
+ if not os.path.exists(fpath):
103
+ if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath))
104
+ continue
105
+ if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath))
106
+ exec_file_wrapper(fpath, g_vars, l_vars)
107
+ return True
108
+ if required:
109
+ raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames))
110
+
111
+ load_first_config(['config.py'], True)
112
+ load_first_config([
113
+ 'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]),
114
+ 'config-{}.py'.format(sys.version_info[0])
115
+ ], True)
116
+
117
+ if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS'])))
118
+ if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS'])))
119
+
120
+ applySysPathWorkaround = False
121
+ if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'):
122
+ applySysPathWorkaround = True
123
+ else:
124
+ try:
125
+ BASE_DIR = os.path.dirname(LOADER_DIR)
126
+ if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR:
127
+ applySysPathWorkaround = True
128
+ except:
129
+ if DEBUG: print('OpenCV loader: exception during checking workaround for sys.path[0]')
130
+ pass # applySysPathWorkaround is False
131
+
132
+ for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']):
133
+ sys.path.insert(1 if not applySysPathWorkaround else 0, p)
134
+
135
+ if os.name == 'nt':
136
+ if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302
137
+ for p in l_vars['BINARIES_PATHS']:
138
+ try:
139
+ os.add_dll_directory(p)
140
+ except Exception as e:
141
+ if DEBUG: print('Failed os.add_dll_directory(): '+ str(e))
142
+ pass
143
+ os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '')
144
+ if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH'])))
145
+ else:
146
+ # amending of LD_LIBRARY_PATH works for sub-processes only
147
+ os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '')
148
+
149
+ if DEBUG: print("Relink everything from native cv2 module to cv2 package")
150
+
151
+ py_module = sys.modules.pop("cv2")
152
+
153
+ native_module = importlib.import_module("cv2")
154
+
155
+ sys.modules["cv2"] = py_module
156
+ setattr(py_module, "_native", native_module)
157
+
158
+ for item_name, item in filter(lambda kv: kv[0] not in ("__file__", "__loader__", "__spec__",
159
+ "__name__", "__package__"),
160
+ native_module.__dict__.items()):
161
+ if item_name not in g_vars:
162
+ g_vars[item_name] = item
163
+
164
+ sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502)
165
+
166
+ try:
167
+ del sys.OpenCV_LOADER
168
+ except Exception as e:
169
+ if DEBUG:
170
+ print("Exception during delete OpenCV_LOADER:", e)
171
+
172
+ if DEBUG: print('OpenCV loader: binary extension... OK')
173
+
174
+ for submodule in __collect_extra_submodules(DEBUG):
175
+ if __load_extra_py_code_for_module("cv2", submodule, DEBUG):
176
+ if DEBUG: print("Extra Python code for", submodule, "is loaded")
177
+
178
+ if DEBUG: print('OpenCV loader: DONE')
179
+
180
+
181
+ bootstrap()
evalkit_tf446/lib/python3.10/site-packages/cv2/__init__.pyi ADDED
The diff for this file is too large to render. See raw diff
 
evalkit_tf446/lib/python3.10/site-packages/cv2/aruco/__init__.pyi ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ CORNER_REFINE_NONE: int
10
+ CORNER_REFINE_SUBPIX: int
11
+ CORNER_REFINE_CONTOUR: int
12
+ CORNER_REFINE_APRILTAG: int
13
+ CornerRefineMethod = int
14
+ """One of [CORNER_REFINE_NONE, CORNER_REFINE_SUBPIX, CORNER_REFINE_CONTOUR, CORNER_REFINE_APRILTAG]"""
15
+
16
+ DICT_4X4_50: int
17
+ DICT_4X4_100: int
18
+ DICT_4X4_250: int
19
+ DICT_4X4_1000: int
20
+ DICT_5X5_50: int
21
+ DICT_5X5_100: int
22
+ DICT_5X5_250: int
23
+ DICT_5X5_1000: int
24
+ DICT_6X6_50: int
25
+ DICT_6X6_100: int
26
+ DICT_6X6_250: int
27
+ DICT_6X6_1000: int
28
+ DICT_7X7_50: int
29
+ DICT_7X7_100: int
30
+ DICT_7X7_250: int
31
+ DICT_7X7_1000: int
32
+ DICT_ARUCO_ORIGINAL: int
33
+ DICT_APRILTAG_16h5: int
34
+ DICT_APRILTAG_16H5: int
35
+ DICT_APRILTAG_25h9: int
36
+ DICT_APRILTAG_25H9: int
37
+ DICT_APRILTAG_36h10: int
38
+ DICT_APRILTAG_36H10: int
39
+ DICT_APRILTAG_36h11: int
40
+ DICT_APRILTAG_36H11: int
41
+ DICT_ARUCO_MIP_36h12: int
42
+ DICT_ARUCO_MIP_36H12: int
43
+ PredefinedDictionaryType = int
44
+ """One of [DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL, DICT_APRILTAG_16h5, DICT_APRILTAG_16H5, DICT_APRILTAG_25h9, DICT_APRILTAG_25H9, DICT_APRILTAG_36h10, DICT_APRILTAG_36H10, DICT_APRILTAG_36h11, DICT_APRILTAG_36H11, DICT_ARUCO_MIP_36h12, DICT_ARUCO_MIP_36H12]"""
45
+
46
+
47
+
48
+ # Classes
49
+ class Board:
50
+ # Functions
51
+ @_typing.overload
52
+ def __init__(self, objPoints: _typing.Sequence[cv2.typing.MatLike], dictionary: Dictionary, ids: cv2.typing.MatLike) -> None: ...
53
+ @_typing.overload
54
+ def __init__(self, objPoints: _typing.Sequence[cv2.UMat], dictionary: Dictionary, ids: cv2.UMat) -> None: ...
55
+
56
+ def getDictionary(self) -> Dictionary: ...
57
+
58
+ def getObjPoints(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point3f]]: ...
59
+
60
+ def getIds(self) -> _typing.Sequence[int]: ...
61
+
62
+ def getRightBottomCorner(self) -> cv2.typing.Point3f: ...
63
+
64
+ @_typing.overload
65
+ def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, objPoints: cv2.typing.MatLike | None = ..., imgPoints: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
66
+ @_typing.overload
67
+ def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, objPoints: cv2.UMat | None = ..., imgPoints: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
68
+
69
+ @_typing.overload
70
+ def generateImage(self, outSize: cv2.typing.Size, img: cv2.typing.MatLike | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
71
+ @_typing.overload
72
+ def generateImage(self, outSize: cv2.typing.Size, img: cv2.UMat | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.UMat: ...
73
+
74
+
75
+ class GridBoard(Board):
76
+ # Functions
77
+ @_typing.overload
78
+ def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ...
79
+ @_typing.overload
80
+ def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ...
81
+
82
+ def getGridSize(self) -> cv2.typing.Size: ...
83
+
84
+ def getMarkerLength(self) -> float: ...
85
+
86
+ def getMarkerSeparation(self) -> float: ...
87
+
88
+
89
+ class CharucoBoard(Board):
90
+ # Functions
91
+ @_typing.overload
92
+ def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ...
93
+ @_typing.overload
94
+ def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ...
95
+
96
+ def setLegacyPattern(self, legacyPattern: bool) -> None: ...
97
+
98
+ def getLegacyPattern(self) -> bool: ...
99
+
100
+ def getChessboardSize(self) -> cv2.typing.Size: ...
101
+
102
+ def getSquareLength(self) -> float: ...
103
+
104
+ def getMarkerLength(self) -> float: ...
105
+
106
+ def getChessboardCorners(self) -> _typing.Sequence[cv2.typing.Point3f]: ...
107
+
108
+ @_typing.overload
109
+ def checkCharucoCornersCollinear(self, charucoIds: cv2.typing.MatLike) -> bool: ...
110
+ @_typing.overload
111
+ def checkCharucoCornersCollinear(self, charucoIds: cv2.UMat) -> bool: ...
112
+
113
+
114
+ class DetectorParameters:
115
+ adaptiveThreshWinSizeMin: int
116
+ adaptiveThreshWinSizeMax: int
117
+ adaptiveThreshWinSizeStep: int
118
+ adaptiveThreshConstant: float
119
+ minMarkerPerimeterRate: float
120
+ maxMarkerPerimeterRate: float
121
+ polygonalApproxAccuracyRate: float
122
+ minCornerDistanceRate: float
123
+ minDistanceToBorder: int
124
+ minMarkerDistanceRate: float
125
+ minGroupDistance: float
126
+ cornerRefinementMethod: int
127
+ cornerRefinementWinSize: int
128
+ relativeCornerRefinmentWinSize: float
129
+ cornerRefinementMaxIterations: int
130
+ cornerRefinementMinAccuracy: float
131
+ markerBorderBits: int
132
+ perspectiveRemovePixelPerCell: int
133
+ perspectiveRemoveIgnoredMarginPerCell: float
134
+ maxErroneousBitsInBorderRate: float
135
+ minOtsuStdDev: float
136
+ errorCorrectionRate: float
137
+ aprilTagQuadDecimate: float
138
+ aprilTagQuadSigma: float
139
+ aprilTagMinClusterPixels: int
140
+ aprilTagMaxNmaxima: int
141
+ aprilTagCriticalRad: float
142
+ aprilTagMaxLineFitMse: float
143
+ aprilTagMinWhiteBlackDiff: int
144
+ aprilTagDeglitch: int
145
+ detectInvertedMarker: bool
146
+ useAruco3Detection: bool
147
+ minSideLengthCanonicalImg: int
148
+ minMarkerLengthRatioOriginalImg: float
149
+
150
+ # Functions
151
+ def __init__(self) -> None: ...
152
+
153
+ def readDetectorParameters(self, fn: cv2.FileNode) -> bool: ...
154
+
155
+ def writeDetectorParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ...
156
+
157
+
158
+ class RefineParameters:
159
+ minRepDistance: float
160
+ errorCorrectionRate: float
161
+ checkAllOrders: bool
162
+
163
+ # Functions
164
+ def __init__(self, minRepDistance: float = ..., errorCorrectionRate: float = ..., checkAllOrders: bool = ...) -> None: ...
165
+
166
+ def readRefineParameters(self, fn: cv2.FileNode) -> bool: ...
167
+
168
+ def writeRefineParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ...
169
+
170
+
171
+ class ArucoDetector(cv2.Algorithm):
172
+ # Functions
173
+ def __init__(self, dictionary: Dictionary = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
174
+
175
+ @_typing.overload
176
+ def detectMarkers(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
177
+ @_typing.overload
178
+ def detectMarkers(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat]]: ...
179
+
180
+ @_typing.overload
181
+ def refineDetectedMarkers(self, image: cv2.typing.MatLike, board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, rejectedCorners: _typing.Sequence[cv2.typing.MatLike], cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., recoveredIdxs: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
182
+ @_typing.overload
183
+ def refineDetectedMarkers(self, image: cv2.UMat, board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, rejectedCorners: _typing.Sequence[cv2.UMat], cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., recoveredIdxs: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
184
+
185
+ def getDictionary(self) -> Dictionary: ...
186
+
187
+ def setDictionary(self, dictionary: Dictionary) -> None: ...
188
+
189
+ def getDetectorParameters(self) -> DetectorParameters: ...
190
+
191
+ def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ...
192
+
193
+ def getRefineParameters(self) -> RefineParameters: ...
194
+
195
+ def setRefineParameters(self, refineParameters: RefineParameters) -> None: ...
196
+
197
+ def write(self, fs: cv2.FileStorage, name: str) -> None: ...
198
+
199
+ def read(self, fn: cv2.FileNode) -> None: ...
200
+
201
+
202
+ class Dictionary:
203
+ bytesList: cv2.typing.MatLike
204
+ markerSize: int
205
+ maxCorrectionBits: int
206
+
207
+ # Functions
208
+ @_typing.overload
209
+ def __init__(self) -> None: ...
210
+ @_typing.overload
211
+ def __init__(self, bytesList: cv2.typing.MatLike, _markerSize: int, maxcorr: int = ...) -> None: ...
212
+
213
+ def readDictionary(self, fn: cv2.FileNode) -> bool: ...
214
+
215
+ def writeDictionary(self, fs: cv2.FileStorage, name: str = ...) -> None: ...
216
+
217
+ def identify(self, onlyBits: cv2.typing.MatLike, maxCorrectionRate: float) -> tuple[bool, int, int]: ...
218
+
219
+ @_typing.overload
220
+ def getDistanceToId(self, bits: cv2.typing.MatLike, id: int, allRotations: bool = ...) -> int: ...
221
+ @_typing.overload
222
+ def getDistanceToId(self, bits: cv2.UMat, id: int, allRotations: bool = ...) -> int: ...
223
+
224
+ @_typing.overload
225
+ def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
226
+ @_typing.overload
227
+ def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ...
228
+
229
+ @staticmethod
230
+ def getByteListFromBits(bits: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
231
+
232
+ @staticmethod
233
+ def getBitsFromByteList(byteList: cv2.typing.MatLike, markerSize: int) -> cv2.typing.MatLike: ...
234
+
235
+
236
+ class CharucoParameters:
237
+ cameraMatrix: cv2.typing.MatLike
238
+ distCoeffs: cv2.typing.MatLike
239
+ minMarkers: int
240
+ tryRefineMarkers: bool
241
+
242
+ # Functions
243
+ def __init__(self) -> None: ...
244
+
245
+
246
+ class CharucoDetector(cv2.Algorithm):
247
+ # Functions
248
+ def __init__(self, board: CharucoBoard, charucoParams: CharucoParameters = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
249
+
250
+ def getBoard(self) -> CharucoBoard: ...
251
+
252
+ def setBoard(self, board: CharucoBoard) -> None: ...
253
+
254
+ def getCharucoParameters(self) -> CharucoParameters: ...
255
+
256
+ def setCharucoParameters(self, charucoParameters: CharucoParameters) -> None: ...
257
+
258
+ def getDetectorParameters(self) -> DetectorParameters: ...
259
+
260
+ def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ...
261
+
262
+ def getRefineParameters(self) -> RefineParameters: ...
263
+
264
+ def setRefineParameters(self, refineParameters: RefineParameters) -> None: ...
265
+
266
+ @_typing.overload
267
+ def detectBoard(self, image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike | None = ..., charucoIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
268
+ @_typing.overload
269
+ def detectBoard(self, image: cv2.UMat, charucoCorners: cv2.UMat | None = ..., charucoIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
270
+
271
+ @_typing.overload
272
+ def detectDiamonds(self, image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., diamondIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
273
+ @_typing.overload
274
+ def detectDiamonds(self, image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat] | None = ..., diamondIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
275
+
276
+
277
+
278
+ # Functions
279
+ @_typing.overload
280
+ def drawDetectedCornersCharuco(image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike, charucoIds: cv2.typing.MatLike | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
281
+ @_typing.overload
282
+ def drawDetectedCornersCharuco(image: cv2.UMat, charucoCorners: cv2.UMat, charucoIds: cv2.UMat | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
283
+
284
+ @_typing.overload
285
+ def drawDetectedDiamonds(image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike], diamondIds: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
286
+ @_typing.overload
287
+ def drawDetectedDiamonds(image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat], diamondIds: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
288
+
289
+ @_typing.overload
290
+ def drawDetectedMarkers(image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
291
+ @_typing.overload
292
+ def drawDetectedMarkers(image: cv2.UMat, corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
293
+
294
+ def extendDictionary(nMarkers: int, markerSize: int, baseDictionary: Dictionary = ..., randomSeed: int = ...) -> Dictionary: ...
295
+
296
+ @_typing.overload
297
+ def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
298
+ @_typing.overload
299
+ def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ...
300
+
301
+ def getPredefinedDictionary(dict: int) -> Dictionary: ...
302
+
303
+
evalkit_tf446/lib/python3.10/site-packages/cv2/config-3.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PYTHON_EXTENSIONS_PATHS = [
2
+ LOADER_DIR
3
+ ] + PYTHON_EXTENSIONS_PATHS
4
+
5
+ ci_and_not_headless = False
6
+
7
+ try:
8
+ from .version import ci_build, headless
9
+
10
+ ci_and_not_headless = ci_build and not headless
11
+ except:
12
+ pass
13
+
14
+ # the Qt plugin is included currently only in the pre-built wheels
15
+ if sys.platform.startswith("linux") and ci_and_not_headless:
16
+ os.environ["QT_QPA_PLATFORM_PLUGIN_PATH"] = os.path.join(
17
+ os.path.dirname(os.path.abspath(__file__)), "qt", "plugins"
18
+ )
19
+
20
+ # Qt will throw warning on Linux if fonts are not found
21
+ if sys.platform.startswith("linux") and ci_and_not_headless:
22
+ os.environ["QT_QPA_FONTDIR"] = os.path.join(
23
+ os.path.dirname(os.path.abspath(__file__)), "qt", "fonts"
24
+ )
evalkit_tf446/lib/python3.10/site-packages/cv2/config.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import os
2
+
3
+ BINARIES_PATHS = [
4
+ os.path.join(os.path.join(LOADER_DIR, '../../'), 'lib64')
5
+ ] + BINARIES_PATHS
evalkit_tf446/lib/python3.10/site-packages/cv2/detail/__init__.pyi ADDED
@@ -0,0 +1,600 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.gapi
5
+ import cv2.gapi.ie
6
+ import cv2.gapi.onnx
7
+ import cv2.gapi.ov
8
+ import cv2.typing
9
+ import numpy
10
+ import typing as _typing
11
+
12
+
13
+ # Enumerations
14
+ TEST_CUSTOM: int
15
+ TEST_EQ: int
16
+ TEST_NE: int
17
+ TEST_LE: int
18
+ TEST_LT: int
19
+ TEST_GE: int
20
+ TEST_GT: int
21
+ TestOp = int
22
+ """One of [TEST_CUSTOM, TEST_EQ, TEST_NE, TEST_LE, TEST_LT, TEST_GE, TEST_GT]"""
23
+
24
+ WAVE_CORRECT_HORIZ: int
25
+ WAVE_CORRECT_VERT: int
26
+ WAVE_CORRECT_AUTO: int
27
+ WaveCorrectKind = int
28
+ """One of [WAVE_CORRECT_HORIZ, WAVE_CORRECT_VERT, WAVE_CORRECT_AUTO]"""
29
+
30
+ OpaqueKind_CV_UNKNOWN: int
31
+ OPAQUE_KIND_CV_UNKNOWN: int
32
+ OpaqueKind_CV_BOOL: int
33
+ OPAQUE_KIND_CV_BOOL: int
34
+ OpaqueKind_CV_INT: int
35
+ OPAQUE_KIND_CV_INT: int
36
+ OpaqueKind_CV_INT64: int
37
+ OPAQUE_KIND_CV_INT64: int
38
+ OpaqueKind_CV_DOUBLE: int
39
+ OPAQUE_KIND_CV_DOUBLE: int
40
+ OpaqueKind_CV_FLOAT: int
41
+ OPAQUE_KIND_CV_FLOAT: int
42
+ OpaqueKind_CV_UINT64: int
43
+ OPAQUE_KIND_CV_UINT64: int
44
+ OpaqueKind_CV_STRING: int
45
+ OPAQUE_KIND_CV_STRING: int
46
+ OpaqueKind_CV_POINT: int
47
+ OPAQUE_KIND_CV_POINT: int
48
+ OpaqueKind_CV_POINT2F: int
49
+ OPAQUE_KIND_CV_POINT2F: int
50
+ OpaqueKind_CV_POINT3F: int
51
+ OPAQUE_KIND_CV_POINT3F: int
52
+ OpaqueKind_CV_SIZE: int
53
+ OPAQUE_KIND_CV_SIZE: int
54
+ OpaqueKind_CV_RECT: int
55
+ OPAQUE_KIND_CV_RECT: int
56
+ OpaqueKind_CV_SCALAR: int
57
+ OPAQUE_KIND_CV_SCALAR: int
58
+ OpaqueKind_CV_MAT: int
59
+ OPAQUE_KIND_CV_MAT: int
60
+ OpaqueKind_CV_DRAW_PRIM: int
61
+ OPAQUE_KIND_CV_DRAW_PRIM: int
62
+ OpaqueKind = int
63
+ """One of [OpaqueKind_CV_UNKNOWN, OPAQUE_KIND_CV_UNKNOWN, OpaqueKind_CV_BOOL, OPAQUE_KIND_CV_BOOL, OpaqueKind_CV_INT, OPAQUE_KIND_CV_INT, OpaqueKind_CV_INT64, OPAQUE_KIND_CV_INT64, OpaqueKind_CV_DOUBLE, OPAQUE_KIND_CV_DOUBLE, OpaqueKind_CV_FLOAT, OPAQUE_KIND_CV_FLOAT, OpaqueKind_CV_UINT64, OPAQUE_KIND_CV_UINT64, OpaqueKind_CV_STRING, OPAQUE_KIND_CV_STRING, OpaqueKind_CV_POINT, OPAQUE_KIND_CV_POINT, OpaqueKind_CV_POINT2F, OPAQUE_KIND_CV_POINT2F, OpaqueKind_CV_POINT3F, OPAQUE_KIND_CV_POINT3F, OpaqueKind_CV_SIZE, OPAQUE_KIND_CV_SIZE, OpaqueKind_CV_RECT, OPAQUE_KIND_CV_RECT, OpaqueKind_CV_SCALAR, OPAQUE_KIND_CV_SCALAR, OpaqueKind_CV_MAT, OPAQUE_KIND_CV_MAT, OpaqueKind_CV_DRAW_PRIM, OPAQUE_KIND_CV_DRAW_PRIM]"""
64
+
65
+ ArgKind_OPAQUE_VAL: int
66
+ ARG_KIND_OPAQUE_VAL: int
67
+ ArgKind_OPAQUE: int
68
+ ARG_KIND_OPAQUE: int
69
+ ArgKind_GOBJREF: int
70
+ ARG_KIND_GOBJREF: int
71
+ ArgKind_GMAT: int
72
+ ARG_KIND_GMAT: int
73
+ ArgKind_GMATP: int
74
+ ARG_KIND_GMATP: int
75
+ ArgKind_GFRAME: int
76
+ ARG_KIND_GFRAME: int
77
+ ArgKind_GSCALAR: int
78
+ ARG_KIND_GSCALAR: int
79
+ ArgKind_GARRAY: int
80
+ ARG_KIND_GARRAY: int
81
+ ArgKind_GOPAQUE: int
82
+ ARG_KIND_GOPAQUE: int
83
+ ArgKind = int
84
+ """One of [ArgKind_OPAQUE_VAL, ARG_KIND_OPAQUE_VAL, ArgKind_OPAQUE, ARG_KIND_OPAQUE, ArgKind_GOBJREF, ARG_KIND_GOBJREF, ArgKind_GMAT, ARG_KIND_GMAT, ArgKind_GMATP, ARG_KIND_GMATP, ArgKind_GFRAME, ARG_KIND_GFRAME, ArgKind_GSCALAR, ARG_KIND_GSCALAR, ArgKind_GARRAY, ARG_KIND_GARRAY, ArgKind_GOPAQUE, ARG_KIND_GOPAQUE]"""
85
+
86
+
87
+ Blender_NO: int
88
+ BLENDER_NO: int
89
+ Blender_FEATHER: int
90
+ BLENDER_FEATHER: int
91
+ Blender_MULTI_BAND: int
92
+ BLENDER_MULTI_BAND: int
93
+
94
+ ExposureCompensator_NO: int
95
+ EXPOSURE_COMPENSATOR_NO: int
96
+ ExposureCompensator_GAIN: int
97
+ EXPOSURE_COMPENSATOR_GAIN: int
98
+ ExposureCompensator_GAIN_BLOCKS: int
99
+ EXPOSURE_COMPENSATOR_GAIN_BLOCKS: int
100
+ ExposureCompensator_CHANNELS: int
101
+ EXPOSURE_COMPENSATOR_CHANNELS: int
102
+ ExposureCompensator_CHANNELS_BLOCKS: int
103
+ EXPOSURE_COMPENSATOR_CHANNELS_BLOCKS: int
104
+
105
+ SeamFinder_NO: int
106
+ SEAM_FINDER_NO: int
107
+ SeamFinder_VORONOI_SEAM: int
108
+ SEAM_FINDER_VORONOI_SEAM: int
109
+ SeamFinder_DP_SEAM: int
110
+ SEAM_FINDER_DP_SEAM: int
111
+
112
+ DpSeamFinder_COLOR: int
113
+ DP_SEAM_FINDER_COLOR: int
114
+ DpSeamFinder_COLOR_GRAD: int
115
+ DP_SEAM_FINDER_COLOR_GRAD: int
116
+ DpSeamFinder_CostFunction = int
117
+ """One of [DpSeamFinder_COLOR, DP_SEAM_FINDER_COLOR, DpSeamFinder_COLOR_GRAD, DP_SEAM_FINDER_COLOR_GRAD]"""
118
+
119
+ Timelapser_AS_IS: int
120
+ TIMELAPSER_AS_IS: int
121
+ Timelapser_CROP: int
122
+ TIMELAPSER_CROP: int
123
+
124
+ GraphCutSeamFinderBase_COST_COLOR: int
125
+ GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR: int
126
+ GraphCutSeamFinderBase_COST_COLOR_GRAD: int
127
+ GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD: int
128
+ GraphCutSeamFinderBase_CostType = int
129
+ """One of [GraphCutSeamFinderBase_COST_COLOR, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR, GraphCutSeamFinderBase_COST_COLOR_GRAD, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD]"""
130
+
131
+ TrackerSamplerCSC_MODE_INIT_POS: int
132
+ TRACKER_SAMPLER_CSC_MODE_INIT_POS: int
133
+ TrackerSamplerCSC_MODE_INIT_NEG: int
134
+ TRACKER_SAMPLER_CSC_MODE_INIT_NEG: int
135
+ TrackerSamplerCSC_MODE_TRACK_POS: int
136
+ TRACKER_SAMPLER_CSC_MODE_TRACK_POS: int
137
+ TrackerSamplerCSC_MODE_TRACK_NEG: int
138
+ TRACKER_SAMPLER_CSC_MODE_TRACK_NEG: int
139
+ TrackerSamplerCSC_MODE_DETECT: int
140
+ TRACKER_SAMPLER_CSC_MODE_DETECT: int
141
+ TrackerSamplerCSC_MODE = int
142
+ """One of [TrackerSamplerCSC_MODE_INIT_POS, TRACKER_SAMPLER_CSC_MODE_INIT_POS, TrackerSamplerCSC_MODE_INIT_NEG, TRACKER_SAMPLER_CSC_MODE_INIT_NEG, TrackerSamplerCSC_MODE_TRACK_POS, TRACKER_SAMPLER_CSC_MODE_TRACK_POS, TrackerSamplerCSC_MODE_TRACK_NEG, TRACKER_SAMPLER_CSC_MODE_TRACK_NEG, TrackerSamplerCSC_MODE_DETECT, TRACKER_SAMPLER_CSC_MODE_DETECT]"""
143
+
144
+
145
+ # Classes
146
+ class Blender:
147
+ # Functions
148
+ @classmethod
149
+ def createDefault(cls, type: int, try_gpu: bool = ...) -> Blender: ...
150
+
151
+ @_typing.overload
152
+ def prepare(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
153
+ @_typing.overload
154
+ def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
155
+
156
+ @_typing.overload
157
+ def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
158
+ @_typing.overload
159
+ def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
160
+
161
+ @_typing.overload
162
+ def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
163
+ @_typing.overload
164
+ def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
165
+
166
+
167
+ class FeatherBlender(Blender):
168
+ # Functions
169
+ def __init__(self, sharpness: float = ...) -> None: ...
170
+
171
+ def sharpness(self) -> float: ...
172
+
173
+ def setSharpness(self, val: float) -> None: ...
174
+
175
+ def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
176
+
177
+ @_typing.overload
178
+ def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
179
+ @_typing.overload
180
+ def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
181
+
182
+ @_typing.overload
183
+ def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
184
+ @_typing.overload
185
+ def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
186
+
187
+ def createWeightMaps(self, masks: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], weight_maps: _typing.Sequence[cv2.UMat]) -> tuple[cv2.typing.Rect, _typing.Sequence[cv2.UMat]]: ...
188
+
189
+
190
+ class MultiBandBlender(Blender):
191
+ # Functions
192
+ def __init__(self, try_gpu: int = ..., num_bands: int = ..., weight_type: int = ...) -> None: ...
193
+
194
+ def numBands(self) -> int: ...
195
+
196
+ def setNumBands(self, val: int) -> None: ...
197
+
198
+ def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
199
+
200
+ @_typing.overload
201
+ def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
202
+ @_typing.overload
203
+ def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
204
+
205
+ @_typing.overload
206
+ def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
207
+ @_typing.overload
208
+ def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
209
+
210
+
211
+ class CameraParams:
212
+ focal: float
213
+ aspect: float
214
+ ppx: float
215
+ ppy: float
216
+ R: cv2.typing.MatLike
217
+ t: cv2.typing.MatLike
218
+
219
+ # Functions
220
+ def K(self) -> cv2.typing.MatLike: ...
221
+
222
+
223
+ class ExposureCompensator:
224
+ # Functions
225
+ @classmethod
226
+ def createDefault(cls, type: int) -> ExposureCompensator: ...
227
+
228
+ def feed(self, corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat]) -> None: ...
229
+
230
+ @_typing.overload
231
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
232
+ @_typing.overload
233
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
234
+
235
+ def getMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
236
+
237
+ def setMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
238
+
239
+ def setUpdateGain(self, b: bool) -> None: ...
240
+
241
+ def getUpdateGain(self) -> bool: ...
242
+
243
+
244
+ class NoExposureCompensator(ExposureCompensator):
245
+ # Functions
246
+ @_typing.overload
247
+ def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.typing.MatLike, arg4: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
248
+ @_typing.overload
249
+ def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.UMat, arg4: cv2.UMat) -> cv2.UMat: ...
250
+
251
+ def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
252
+
253
+ def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
254
+
255
+
256
+ class GainCompensator(ExposureCompensator):
257
+ # Functions
258
+ @_typing.overload
259
+ def __init__(self) -> None: ...
260
+ @_typing.overload
261
+ def __init__(self, nr_feeds: int) -> None: ...
262
+
263
+ @_typing.overload
264
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
265
+ @_typing.overload
266
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
267
+
268
+ def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
269
+
270
+ def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
271
+
272
+ def setNrFeeds(self, nr_feeds: int) -> None: ...
273
+
274
+ def getNrFeeds(self) -> int: ...
275
+
276
+ def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
277
+
278
+ def getSimilarityThreshold(self) -> float: ...
279
+
280
+
281
+ class ChannelsCompensator(ExposureCompensator):
282
+ # Functions
283
+ def __init__(self, nr_feeds: int = ...) -> None: ...
284
+
285
+ @_typing.overload
286
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
287
+ @_typing.overload
288
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
289
+
290
+ def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
291
+
292
+ def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
293
+
294
+ def setNrFeeds(self, nr_feeds: int) -> None: ...
295
+
296
+ def getNrFeeds(self) -> int: ...
297
+
298
+ def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
299
+
300
+ def getSimilarityThreshold(self) -> float: ...
301
+
302
+
303
+ class BlocksCompensator(ExposureCompensator):
304
+ # Functions
305
+ @_typing.overload
306
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
307
+ @_typing.overload
308
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
309
+
310
+ def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
311
+
312
+ def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
313
+
314
+ def setNrFeeds(self, nr_feeds: int) -> None: ...
315
+
316
+ def getNrFeeds(self) -> int: ...
317
+
318
+ def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
319
+
320
+ def getSimilarityThreshold(self) -> float: ...
321
+
322
+ @_typing.overload
323
+ def setBlockSize(self, width: int, height: int) -> None: ...
324
+ @_typing.overload
325
+ def setBlockSize(self, size: cv2.typing.Size) -> None: ...
326
+
327
+ def getBlockSize(self) -> cv2.typing.Size: ...
328
+
329
+ def setNrGainsFilteringIterations(self, nr_iterations: int) -> None: ...
330
+
331
+ def getNrGainsFilteringIterations(self) -> int: ...
332
+
333
+
334
+ class BlocksGainCompensator(BlocksCompensator):
335
+ # Functions
336
+ @_typing.overload
337
+ def __init__(self, bl_width: int = ..., bl_height: int = ...) -> None: ...
338
+ @_typing.overload
339
+ def __init__(self, bl_width: int, bl_height: int, nr_feeds: int) -> None: ...
340
+
341
+ @_typing.overload
342
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
343
+ @_typing.overload
344
+ def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
345
+
346
+ def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
347
+
348
+ def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
349
+
350
+
351
+ class BlocksChannelsCompensator(BlocksCompensator):
352
+ # Functions
353
+ def __init__(self, bl_width: int = ..., bl_height: int = ..., nr_feeds: int = ...) -> None: ...
354
+
355
+
356
+ class ImageFeatures:
357
+ img_idx: int
358
+ img_size: cv2.typing.Size
359
+ keypoints: _typing.Sequence[cv2.KeyPoint]
360
+ descriptors: cv2.UMat
361
+
362
+ # Functions
363
+ def getKeypoints(self) -> _typing.Sequence[cv2.KeyPoint]: ...
364
+
365
+
366
+ class MatchesInfo:
367
+ src_img_idx: int
368
+ dst_img_idx: int
369
+ matches: _typing.Sequence[cv2.DMatch]
370
+ inliers_mask: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]
371
+ num_inliers: int
372
+ H: cv2.typing.MatLike
373
+ confidence: float
374
+
375
+ # Functions
376
+ def getMatches(self) -> _typing.Sequence[cv2.DMatch]: ...
377
+
378
+ def getInliers(self) -> numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]: ...
379
+
380
+
381
+ class FeaturesMatcher:
382
+ # Functions
383
+ def apply(self, features1: ImageFeatures, features2: ImageFeatures) -> MatchesInfo: ...
384
+
385
+ def apply2(self, features: _typing.Sequence[ImageFeatures], mask: cv2.UMat | None = ...) -> _typing.Sequence[MatchesInfo]: ...
386
+
387
+ def isThreadSafe(self) -> bool: ...
388
+
389
+ def collectGarbage(self) -> None: ...
390
+
391
+
392
+ class BestOf2NearestMatcher(FeaturesMatcher):
393
+ # Functions
394
+ def __init__(self, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> None: ...
395
+
396
+ def collectGarbage(self) -> None: ...
397
+
398
+ @classmethod
399
+ def create(cls, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> BestOf2NearestMatcher: ...
400
+
401
+
402
+ class BestOf2NearestRangeMatcher(BestOf2NearestMatcher):
403
+ # Functions
404
+ def __init__(self, range_width: int = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ...) -> None: ...
405
+
406
+
407
+ class AffineBestOf2NearestMatcher(BestOf2NearestMatcher):
408
+ # Functions
409
+ def __init__(self, full_affine: bool = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ...) -> None: ...
410
+
411
+
412
+ class Estimator:
413
+ # Functions
414
+ def apply(self, features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], cameras: _typing.Sequence[CameraParams]) -> tuple[bool, _typing.Sequence[CameraParams]]: ...
415
+
416
+
417
+ class HomographyBasedEstimator(Estimator):
418
+ # Functions
419
+ def __init__(self, is_focals_estimated: bool = ...) -> None: ...
420
+
421
+
422
+ class AffineBasedEstimator(Estimator):
423
+ # Functions
424
+ def __init__(self) -> None: ...
425
+
426
+
427
+ class BundleAdjusterBase(Estimator):
428
+ # Functions
429
+ def refinementMask(self) -> cv2.typing.MatLike: ...
430
+
431
+ def setRefinementMask(self, mask: cv2.typing.MatLike) -> None: ...
432
+
433
+ def confThresh(self) -> float: ...
434
+
435
+ def setConfThresh(self, conf_thresh: float) -> None: ...
436
+
437
+ def termCriteria(self) -> cv2.typing.TermCriteria: ...
438
+
439
+ def setTermCriteria(self, term_criteria: cv2.typing.TermCriteria) -> None: ...
440
+
441
+
442
+ class NoBundleAdjuster(BundleAdjusterBase):
443
+ # Functions
444
+ def __init__(self) -> None: ...
445
+
446
+
447
+ class BundleAdjusterReproj(BundleAdjusterBase):
448
+ # Functions
449
+ def __init__(self) -> None: ...
450
+
451
+
452
+ class BundleAdjusterRay(BundleAdjusterBase):
453
+ # Functions
454
+ def __init__(self) -> None: ...
455
+
456
+
457
+ class BundleAdjusterAffine(BundleAdjusterBase):
458
+ # Functions
459
+ def __init__(self) -> None: ...
460
+
461
+
462
+ class BundleAdjusterAffinePartial(BundleAdjusterBase):
463
+ # Functions
464
+ def __init__(self) -> None: ...
465
+
466
+
467
+ class SeamFinder:
468
+ # Functions
469
+ def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
470
+
471
+ @classmethod
472
+ def createDefault(cls, type: int) -> SeamFinder: ...
473
+
474
+
475
+ class NoSeamFinder(SeamFinder):
476
+ # Functions
477
+ def find(self, arg1: _typing.Sequence[cv2.UMat], arg2: _typing.Sequence[cv2.typing.Point], arg3: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
478
+
479
+
480
+ class PairwiseSeamFinder(SeamFinder):
481
+ # Functions
482
+ def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
483
+
484
+
485
+ class VoronoiSeamFinder(PairwiseSeamFinder):
486
+ # Functions
487
+ def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
488
+
489
+
490
+ class DpSeamFinder(SeamFinder):
491
+ # Functions
492
+ def __init__(self, costFunc: str) -> None: ...
493
+
494
+ def setCostFunction(self, val: str) -> None: ...
495
+
496
+
497
+ class GraphCutSeamFinder:
498
+ # Functions
499
+ def __init__(self, cost_type: str, terminal_cost: float = ..., bad_region_penalty: float = ...) -> None: ...
500
+
501
+ def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
502
+
503
+
504
+ class Timelapser:
505
+ # Functions
506
+ @classmethod
507
+ def createDefault(cls, type: int) -> Timelapser: ...
508
+
509
+ def initialize(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
510
+
511
+ @_typing.overload
512
+ def process(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
513
+ @_typing.overload
514
+ def process(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
515
+
516
+ def getDst(self) -> cv2.UMat: ...
517
+
518
+
519
+ class TimelapserCrop(Timelapser):
520
+ ...
521
+
522
+ class ProjectorBase:
523
+ ...
524
+
525
+ class SphericalProjector(ProjectorBase):
526
+ # Functions
527
+ def mapForward(self, x: float, y: float, u: float, v: float) -> None: ...
528
+
529
+ def mapBackward(self, u: float, v: float, x: float, y: float) -> None: ...
530
+
531
+
532
+
533
+ # Functions
534
+ def calibrateRotatingCamera(Hs: _typing.Sequence[cv2.typing.MatLike], K: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
535
+
536
+ @_typing.overload
537
+ def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
538
+ @_typing.overload
539
+ def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
540
+
541
+ @_typing.overload
542
+ def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> ImageFeatures: ...
543
+ @_typing.overload
544
+ def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.UMat, mask: cv2.UMat | None = ...) -> ImageFeatures: ...
545
+
546
+ @_typing.overload
547
+ def createLaplacePyr(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
548
+ @_typing.overload
549
+ def createLaplacePyr(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
550
+
551
+ @_typing.overload
552
+ def createLaplacePyrGpu(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
553
+ @_typing.overload
554
+ def createLaplacePyrGpu(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
555
+
556
+ @_typing.overload
557
+ def createWeightMap(mask: cv2.typing.MatLike, sharpness: float, weight: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
558
+ @_typing.overload
559
+ def createWeightMap(mask: cv2.UMat, sharpness: float, weight: cv2.UMat) -> cv2.UMat: ...
560
+
561
+ def focalsFromHomography(H: cv2.typing.MatLike, f0: float, f1: float, f0_ok: bool, f1_ok: bool) -> None: ...
562
+
563
+ def leaveBiggestComponent(features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> _typing.Sequence[int]: ...
564
+
565
+ def matchesGraphAsString(paths: _typing.Sequence[str], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> str: ...
566
+
567
+ @_typing.overload
568
+ def normalizeUsingWeightMap(weight: cv2.typing.MatLike, src: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
569
+ @_typing.overload
570
+ def normalizeUsingWeightMap(weight: cv2.UMat, src: cv2.UMat) -> cv2.UMat: ...
571
+
572
+ def overlapRoi(tl1: cv2.typing.Point, tl2: cv2.typing.Point, sz1: cv2.typing.Size, sz2: cv2.typing.Size, roi: cv2.typing.Rect) -> bool: ...
573
+
574
+ def restoreImageFromLaplacePyr(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
575
+
576
+ def restoreImageFromLaplacePyrGpu(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
577
+
578
+ @_typing.overload
579
+ def resultRoi(corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat]) -> cv2.typing.Rect: ...
580
+ @_typing.overload
581
+ def resultRoi(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
582
+
583
+ def resultRoiIntersection(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
584
+
585
+ def resultTl(corners: _typing.Sequence[cv2.typing.Point]) -> cv2.typing.Point: ...
586
+
587
+ def selectRandomSubset(count: int, size: int, subset: _typing.Sequence[int]) -> None: ...
588
+
589
+ def stitchingLogLevel() -> int: ...
590
+
591
+ @_typing.overload
592
+ def strip(params: cv2.gapi.ie.PyParams) -> cv2.gapi.GNetParam: ...
593
+ @_typing.overload
594
+ def strip(params: cv2.gapi.onnx.PyParams) -> cv2.gapi.GNetParam: ...
595
+ @_typing.overload
596
+ def strip(params: cv2.gapi.ov.PyParams) -> cv2.gapi.GNetParam: ...
597
+
598
+ def waveCorrect(rmats: _typing.Sequence[cv2.typing.MatLike], kind: WaveCorrectKind) -> _typing.Sequence[cv2.typing.MatLike]: ...
599
+
600
+
evalkit_tf446/lib/python3.10/site-packages/cv2/flann/__init__.pyi ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ FLANN_INDEX_TYPE_8U: int
10
+ FLANN_INDEX_TYPE_8S: int
11
+ FLANN_INDEX_TYPE_16U: int
12
+ FLANN_INDEX_TYPE_16S: int
13
+ FLANN_INDEX_TYPE_32S: int
14
+ FLANN_INDEX_TYPE_32F: int
15
+ FLANN_INDEX_TYPE_64F: int
16
+ FLANN_INDEX_TYPE_STRING: int
17
+ FLANN_INDEX_TYPE_BOOL: int
18
+ FLANN_INDEX_TYPE_ALGORITHM: int
19
+ LAST_VALUE_FLANN_INDEX_TYPE: int
20
+ FlannIndexType = int
21
+ """One of [FLANN_INDEX_TYPE_8U, FLANN_INDEX_TYPE_8S, FLANN_INDEX_TYPE_16U, FLANN_INDEX_TYPE_16S, FLANN_INDEX_TYPE_32S, FLANN_INDEX_TYPE_32F, FLANN_INDEX_TYPE_64F, FLANN_INDEX_TYPE_STRING, FLANN_INDEX_TYPE_BOOL, FLANN_INDEX_TYPE_ALGORITHM, LAST_VALUE_FLANN_INDEX_TYPE]"""
22
+
23
+
24
+
25
+ # Classes
26
+ class Index:
27
+ # Functions
28
+ @_typing.overload
29
+ def __init__(self) -> None: ...
30
+ @_typing.overload
31
+ def __init__(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
32
+ @_typing.overload
33
+ def __init__(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
34
+
35
+ @_typing.overload
36
+ def build(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
37
+ @_typing.overload
38
+ def build(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
39
+
40
+ @_typing.overload
41
+ def knnSearch(self, query: cv2.typing.MatLike, knn: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
42
+ @_typing.overload
43
+ def knnSearch(self, query: cv2.UMat, knn: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
44
+
45
+ @_typing.overload
46
+ def radiusSearch(self, query: cv2.typing.MatLike, radius: float, maxResults: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
47
+ @_typing.overload
48
+ def radiusSearch(self, query: cv2.UMat, radius: float, maxResults: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
49
+
50
+ def save(self, filename: str) -> None: ...
51
+
52
+ @_typing.overload
53
+ def load(self, features: cv2.typing.MatLike, filename: str) -> bool: ...
54
+ @_typing.overload
55
+ def load(self, features: cv2.UMat, filename: str) -> bool: ...
56
+
57
+ def release(self) -> None: ...
58
+
59
+ def getDistance(self) -> int: ...
60
+
61
+ def getAlgorithm(self) -> int: ...
62
+
63
+
64
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__init__.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['op', 'kernel']
2
+
3
+ import sys
4
+ import cv2 as cv
5
+
6
+ # NB: Register function in specific module
7
+ def register(mname):
8
+ def parameterized(func):
9
+ sys.modules[mname].__dict__[func.__name__] = func
10
+ return func
11
+ return parameterized
12
+
13
+
14
+ @register('cv2.gapi')
15
+ def networks(*args):
16
+ return cv.gapi_GNetPackage(list(map(cv.detail.strip, args)))
17
+
18
+
19
+ @register('cv2.gapi')
20
+ def compile_args(*args):
21
+ return list(map(cv.GCompileArg, args))
22
+
23
+
24
+ @register('cv2')
25
+ def GIn(*args):
26
+ return [*args]
27
+
28
+
29
+ @register('cv2')
30
+ def GOut(*args):
31
+ return [*args]
32
+
33
+
34
+ @register('cv2')
35
+ def gin(*args):
36
+ return [*args]
37
+
38
+
39
+ @register('cv2.gapi')
40
+ def descr_of(*args):
41
+ return [*args]
42
+
43
+
44
+ @register('cv2')
45
+ class GOpaque():
46
+ # NB: Inheritance from c++ class cause segfault.
47
+ # So just aggregate cv.GOpaqueT instead of inheritance
48
+ def __new__(cls, argtype):
49
+ return cv.GOpaqueT(argtype)
50
+
51
+ class Bool():
52
+ def __new__(self):
53
+ return cv.GOpaqueT(cv.gapi.CV_BOOL)
54
+
55
+ class Int():
56
+ def __new__(self):
57
+ return cv.GOpaqueT(cv.gapi.CV_INT)
58
+
59
+ class Int64():
60
+ def __new__(self):
61
+ return cv.GOpaqueT(cv.gapi.CV_INT64)
62
+
63
+ class UInt64():
64
+ def __new__(self):
65
+ return cv.GOpaqueT(cv.gapi.CV_UINT64)
66
+
67
+ class Double():
68
+ def __new__(self):
69
+ return cv.GOpaqueT(cv.gapi.CV_DOUBLE)
70
+
71
+ class Float():
72
+ def __new__(self):
73
+ return cv.GOpaqueT(cv.gapi.CV_FLOAT)
74
+
75
+ class String():
76
+ def __new__(self):
77
+ return cv.GOpaqueT(cv.gapi.CV_STRING)
78
+
79
+ class Point():
80
+ def __new__(self):
81
+ return cv.GOpaqueT(cv.gapi.CV_POINT)
82
+
83
+ class Point2f():
84
+ def __new__(self):
85
+ return cv.GOpaqueT(cv.gapi.CV_POINT2F)
86
+
87
+ class Point3f():
88
+ def __new__(self):
89
+ return cv.GOpaqueT(cv.gapi.CV_POINT3F)
90
+
91
+ class Size():
92
+ def __new__(self):
93
+ return cv.GOpaqueT(cv.gapi.CV_SIZE)
94
+
95
+ class Rect():
96
+ def __new__(self):
97
+ return cv.GOpaqueT(cv.gapi.CV_RECT)
98
+
99
+ class Prim():
100
+ def __new__(self):
101
+ return cv.GOpaqueT(cv.gapi.CV_DRAW_PRIM)
102
+
103
+ class Any():
104
+ def __new__(self):
105
+ return cv.GOpaqueT(cv.gapi.CV_ANY)
106
+
107
+ @register('cv2')
108
+ class GArray():
109
+ # NB: Inheritance from c++ class cause segfault.
110
+ # So just aggregate cv.GArrayT instead of inheritance
111
+ def __new__(cls, argtype):
112
+ return cv.GArrayT(argtype)
113
+
114
+ class Bool():
115
+ def __new__(self):
116
+ return cv.GArrayT(cv.gapi.CV_BOOL)
117
+
118
+ class Int():
119
+ def __new__(self):
120
+ return cv.GArrayT(cv.gapi.CV_INT)
121
+
122
+ class Int64():
123
+ def __new__(self):
124
+ return cv.GArrayT(cv.gapi.CV_INT64)
125
+
126
+ class UInt64():
127
+ def __new__(self):
128
+ return cv.GArrayT(cv.gapi.CV_UINT64)
129
+
130
+ class Double():
131
+ def __new__(self):
132
+ return cv.GArrayT(cv.gapi.CV_DOUBLE)
133
+
134
+ class Float():
135
+ def __new__(self):
136
+ return cv.GArrayT(cv.gapi.CV_FLOAT)
137
+
138
+ class String():
139
+ def __new__(self):
140
+ return cv.GArrayT(cv.gapi.CV_STRING)
141
+
142
+ class Point():
143
+ def __new__(self):
144
+ return cv.GArrayT(cv.gapi.CV_POINT)
145
+
146
+ class Point2f():
147
+ def __new__(self):
148
+ return cv.GArrayT(cv.gapi.CV_POINT2F)
149
+
150
+ class Point3f():
151
+ def __new__(self):
152
+ return cv.GArrayT(cv.gapi.CV_POINT3F)
153
+
154
+ class Size():
155
+ def __new__(self):
156
+ return cv.GArrayT(cv.gapi.CV_SIZE)
157
+
158
+ class Rect():
159
+ def __new__(self):
160
+ return cv.GArrayT(cv.gapi.CV_RECT)
161
+
162
+ class Scalar():
163
+ def __new__(self):
164
+ return cv.GArrayT(cv.gapi.CV_SCALAR)
165
+
166
+ class Mat():
167
+ def __new__(self):
168
+ return cv.GArrayT(cv.gapi.CV_MAT)
169
+
170
+ class GMat():
171
+ def __new__(self):
172
+ return cv.GArrayT(cv.gapi.CV_GMAT)
173
+
174
+ class Prim():
175
+ def __new__(self):
176
+ return cv.GArray(cv.gapi.CV_DRAW_PRIM)
177
+
178
+ class Any():
179
+ def __new__(self):
180
+ return cv.GArray(cv.gapi.CV_ANY)
181
+
182
+
183
+ # NB: Top lvl decorator takes arguments
184
+ def op(op_id, in_types, out_types):
185
+
186
+ garray_types= {
187
+ cv.GArray.Bool: cv.gapi.CV_BOOL,
188
+ cv.GArray.Int: cv.gapi.CV_INT,
189
+ cv.GArray.Int64: cv.gapi.CV_INT64,
190
+ cv.GArray.UInt64: cv.gapi.CV_UINT64,
191
+ cv.GArray.Double: cv.gapi.CV_DOUBLE,
192
+ cv.GArray.Float: cv.gapi.CV_FLOAT,
193
+ cv.GArray.String: cv.gapi.CV_STRING,
194
+ cv.GArray.Point: cv.gapi.CV_POINT,
195
+ cv.GArray.Point2f: cv.gapi.CV_POINT2F,
196
+ cv.GArray.Point3f: cv.gapi.CV_POINT3F,
197
+ cv.GArray.Size: cv.gapi.CV_SIZE,
198
+ cv.GArray.Rect: cv.gapi.CV_RECT,
199
+ cv.GArray.Scalar: cv.gapi.CV_SCALAR,
200
+ cv.GArray.Mat: cv.gapi.CV_MAT,
201
+ cv.GArray.GMat: cv.gapi.CV_GMAT,
202
+ cv.GArray.Prim: cv.gapi.CV_DRAW_PRIM,
203
+ cv.GArray.Any: cv.gapi.CV_ANY
204
+ }
205
+
206
+ gopaque_types= {
207
+ cv.GOpaque.Size: cv.gapi.CV_SIZE,
208
+ cv.GOpaque.Rect: cv.gapi.CV_RECT,
209
+ cv.GOpaque.Bool: cv.gapi.CV_BOOL,
210
+ cv.GOpaque.Int: cv.gapi.CV_INT,
211
+ cv.GOpaque.Int64: cv.gapi.CV_INT64,
212
+ cv.GOpaque.UInt64: cv.gapi.CV_UINT64,
213
+ cv.GOpaque.Double: cv.gapi.CV_DOUBLE,
214
+ cv.GOpaque.Float: cv.gapi.CV_FLOAT,
215
+ cv.GOpaque.String: cv.gapi.CV_STRING,
216
+ cv.GOpaque.Point: cv.gapi.CV_POINT,
217
+ cv.GOpaque.Point2f: cv.gapi.CV_POINT2F,
218
+ cv.GOpaque.Point3f: cv.gapi.CV_POINT3F,
219
+ cv.GOpaque.Size: cv.gapi.CV_SIZE,
220
+ cv.GOpaque.Rect: cv.gapi.CV_RECT,
221
+ cv.GOpaque.Prim: cv.gapi.CV_DRAW_PRIM,
222
+ cv.GOpaque.Any: cv.gapi.CV_ANY
223
+ }
224
+
225
+ type2str = {
226
+ cv.gapi.CV_BOOL: 'cv.gapi.CV_BOOL' ,
227
+ cv.gapi.CV_INT: 'cv.gapi.CV_INT' ,
228
+ cv.gapi.CV_INT64: 'cv.gapi.CV_INT64' ,
229
+ cv.gapi.CV_UINT64: 'cv.gapi.CV_UINT64' ,
230
+ cv.gapi.CV_DOUBLE: 'cv.gapi.CV_DOUBLE' ,
231
+ cv.gapi.CV_FLOAT: 'cv.gapi.CV_FLOAT' ,
232
+ cv.gapi.CV_STRING: 'cv.gapi.CV_STRING' ,
233
+ cv.gapi.CV_POINT: 'cv.gapi.CV_POINT' ,
234
+ cv.gapi.CV_POINT2F: 'cv.gapi.CV_POINT2F' ,
235
+ cv.gapi.CV_POINT3F: 'cv.gapi.CV_POINT3F' ,
236
+ cv.gapi.CV_SIZE: 'cv.gapi.CV_SIZE',
237
+ cv.gapi.CV_RECT: 'cv.gapi.CV_RECT',
238
+ cv.gapi.CV_SCALAR: 'cv.gapi.CV_SCALAR',
239
+ cv.gapi.CV_MAT: 'cv.gapi.CV_MAT',
240
+ cv.gapi.CV_GMAT: 'cv.gapi.CV_GMAT',
241
+ cv.gapi.CV_DRAW_PRIM: 'cv.gapi.CV_DRAW_PRIM'
242
+ }
243
+
244
+ # NB: Second lvl decorator takes class to decorate
245
+ def op_with_params(cls):
246
+ if not in_types:
247
+ raise Exception('{} operation should have at least one input!'.format(cls.__name__))
248
+
249
+ if not out_types:
250
+ raise Exception('{} operation should have at least one output!'.format(cls.__name__))
251
+
252
+ for i, t in enumerate(out_types):
253
+ if t not in [cv.GMat, cv.GScalar, *garray_types, *gopaque_types]:
254
+ raise Exception('{} unsupported output type: {} in position: {}'
255
+ .format(cls.__name__, t.__name__, i))
256
+
257
+ def on(*args):
258
+ if len(in_types) != len(args):
259
+ raise Exception('Invalid number of input elements!\nExpected: {}, Actual: {}'
260
+ .format(len(in_types), len(args)))
261
+
262
+ for i, (t, a) in enumerate(zip(in_types, args)):
263
+ if t in garray_types:
264
+ if not isinstance(a, cv.GArrayT):
265
+ raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
266
+ .format(cls.__name__, i, cv.GArrayT.__name__, type(a).__name__))
267
+
268
+ elif a.type() != garray_types[t]:
269
+ raise Exception("{} invalid GArrayT type for argument {}.\nExpected: {}, Actual: {}"
270
+ .format(cls.__name__, i, type2str[garray_types[t]], type2str[a.type()]))
271
+
272
+ elif t in gopaque_types:
273
+ if not isinstance(a, cv.GOpaqueT):
274
+ raise Exception("{} invalid type for argument {}.\nExpected: {}, Actual: {}"
275
+ .format(cls.__name__, i, cv.GOpaqueT.__name__, type(a).__name__))
276
+
277
+ elif a.type() != gopaque_types[t]:
278
+ raise Exception("{} invalid GOpaque type for argument {}.\nExpected: {}, Actual: {}"
279
+ .format(cls.__name__, i, type2str[gopaque_types[t]], type2str[a.type()]))
280
+
281
+ else:
282
+ if t != type(a):
283
+ raise Exception('{} invalid input type for argument {}.\nExpected: {}, Actual: {}'
284
+ .format(cls.__name__, i, t.__name__, type(a).__name__))
285
+
286
+ op = cv.gapi.__op(op_id, cls.outMeta, *args)
287
+
288
+ out_protos = []
289
+ for i, out_type in enumerate(out_types):
290
+ if out_type == cv.GMat:
291
+ out_protos.append(op.getGMat())
292
+ elif out_type == cv.GScalar:
293
+ out_protos.append(op.getGScalar())
294
+ elif out_type in gopaque_types:
295
+ out_protos.append(op.getGOpaque(gopaque_types[out_type]))
296
+ elif out_type in garray_types:
297
+ out_protos.append(op.getGArray(garray_types[out_type]))
298
+ else:
299
+ raise Exception("""In {}: G-API operation can't produce the output with type: {} in position: {}"""
300
+ .format(cls.__name__, out_type.__name__, i))
301
+
302
+ return tuple(out_protos) if len(out_protos) != 1 else out_protos[0]
303
+
304
+ # NB: Extend operation class
305
+ cls.id = op_id
306
+ cls.on = staticmethod(on)
307
+ return cls
308
+
309
+ return op_with_params
310
+
311
+
312
+ def kernel(op_cls):
313
+ # NB: Second lvl decorator takes class to decorate
314
+ def kernel_with_params(cls):
315
+ # NB: Add new members to kernel class
316
+ cls.id = op_cls.id
317
+ cls.outMeta = op_cls.outMeta
318
+ return cls
319
+
320
+ return kernel_with_params
321
+
322
+
323
+ cv.gapi.wip.GStreamerPipeline = cv.gapi_wip_gst_GStreamerPipeline
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__init__.pyi ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ from cv2.gapi import core as core
9
+ from cv2.gapi import ie as ie
10
+ from cv2.gapi import imgproc as imgproc
11
+ from cv2.gapi import oak as oak
12
+ from cv2.gapi import onnx as onnx
13
+ from cv2.gapi import ot as ot
14
+ from cv2.gapi import ov as ov
15
+ from cv2.gapi import own as own
16
+ from cv2.gapi import render as render
17
+ from cv2.gapi import streaming as streaming
18
+ from cv2.gapi import video as video
19
+ from cv2.gapi import wip as wip
20
+
21
+
22
+ # Enumerations
23
+ StereoOutputFormat_DEPTH_FLOAT16: int
24
+ STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16: int
25
+ StereoOutputFormat_DEPTH_FLOAT32: int
26
+ STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32: int
27
+ StereoOutputFormat_DISPARITY_FIXED16_11_5: int
28
+ STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5: int
29
+ StereoOutputFormat_DISPARITY_FIXED16_12_4: int
30
+ STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4: int
31
+ StereoOutputFormat_DEPTH_16F: int
32
+ STEREO_OUTPUT_FORMAT_DEPTH_16F: int
33
+ StereoOutputFormat_DEPTH_32F: int
34
+ STEREO_OUTPUT_FORMAT_DEPTH_32F: int
35
+ StereoOutputFormat_DISPARITY_16Q_10_5: int
36
+ STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5: int
37
+ StereoOutputFormat_DISPARITY_16Q_11_4: int
38
+ STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4: int
39
+ StereoOutputFormat = int
40
+ """One of [StereoOutputFormat_DEPTH_FLOAT16, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT16, StereoOutputFormat_DEPTH_FLOAT32, STEREO_OUTPUT_FORMAT_DEPTH_FLOAT32, StereoOutputFormat_DISPARITY_FIXED16_11_5, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_11_5, StereoOutputFormat_DISPARITY_FIXED16_12_4, STEREO_OUTPUT_FORMAT_DISPARITY_FIXED16_12_4, StereoOutputFormat_DEPTH_16F, STEREO_OUTPUT_FORMAT_DEPTH_16F, StereoOutputFormat_DEPTH_32F, STEREO_OUTPUT_FORMAT_DEPTH_32F, StereoOutputFormat_DISPARITY_16Q_10_5, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_10_5, StereoOutputFormat_DISPARITY_16Q_11_4, STEREO_OUTPUT_FORMAT_DISPARITY_16Q_11_4]"""
41
+
42
+ CV_BOOL: int
43
+ CV_INT: int
44
+ CV_INT64: int
45
+ CV_UINT64: int
46
+ CV_DOUBLE: int
47
+ CV_FLOAT: int
48
+ CV_STRING: int
49
+ CV_POINT: int
50
+ CV_POINT2F: int
51
+ CV_POINT3F: int
52
+ CV_SIZE: int
53
+ CV_RECT: int
54
+ CV_SCALAR: int
55
+ CV_MAT: int
56
+ CV_GMAT: int
57
+ CV_DRAW_PRIM: int
58
+ CV_ANY: int
59
+ ArgType = int
60
+ """One of [CV_BOOL, CV_INT, CV_INT64, CV_UINT64, CV_DOUBLE, CV_FLOAT, CV_STRING, CV_POINT, CV_POINT2F, CV_POINT3F, CV_SIZE, CV_RECT, CV_SCALAR, CV_MAT, CV_GMAT, CV_DRAW_PRIM, CV_ANY]"""
61
+
62
+
63
+
64
+ # Classes
65
+ class GNetParam:
66
+ ...
67
+
68
+ class GNetPackage:
69
+ # Functions
70
+ @_typing.overload
71
+ def __init__(self) -> None: ...
72
+ @_typing.overload
73
+ def __init__(self, nets: _typing.Sequence[GNetParam]) -> None: ...
74
+
75
+
76
+
77
+ # Functions
78
+ def BGR2Gray(src: cv2.GMat) -> cv2.GMat: ...
79
+
80
+ def BGR2I420(src: cv2.GMat) -> cv2.GMat: ...
81
+
82
+ def BGR2LUV(src: cv2.GMat) -> cv2.GMat: ...
83
+
84
+ def BGR2RGB(src: cv2.GMat) -> cv2.GMat: ...
85
+
86
+ def BGR2YUV(src: cv2.GMat) -> cv2.GMat: ...
87
+
88
+ def BayerGR2RGB(src_gr: cv2.GMat) -> cv2.GMat: ...
89
+
90
+ def Canny(image: cv2.GMat, threshold1: float, threshold2: float, apertureSize: int = ..., L2gradient: bool = ...) -> cv2.GMat: ...
91
+
92
+ def I4202BGR(src: cv2.GMat) -> cv2.GMat: ...
93
+
94
+ def I4202RGB(src: cv2.GMat) -> cv2.GMat: ...
95
+
96
+ def LUT(src: cv2.GMat, lut: cv2.typing.MatLike) -> cv2.GMat: ...
97
+
98
+ def LUV2BGR(src: cv2.GMat) -> cv2.GMat: ...
99
+
100
+ def Laplacian(src: cv2.GMat, ddepth: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> cv2.GMat: ...
101
+
102
+ def NV12toBGR(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
103
+
104
+ def NV12toGray(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
105
+
106
+ def NV12toRGB(src_y: cv2.GMat, src_uv: cv2.GMat) -> cv2.GMat: ...
107
+
108
+ @_typing.overload
109
+ def RGB2Gray(src: cv2.GMat) -> cv2.GMat: ...
110
+ @_typing.overload
111
+ def RGB2Gray(src: cv2.GMat, rY: float, gY: float, bY: float) -> cv2.GMat: ...
112
+
113
+ def RGB2HSV(src: cv2.GMat) -> cv2.GMat: ...
114
+
115
+ def RGB2I420(src: cv2.GMat) -> cv2.GMat: ...
116
+
117
+ def RGB2Lab(src: cv2.GMat) -> cv2.GMat: ...
118
+
119
+ def RGB2YUV(src: cv2.GMat) -> cv2.GMat: ...
120
+
121
+ def RGB2YUV422(src: cv2.GMat) -> cv2.GMat: ...
122
+
123
+ def Sobel(src: cv2.GMat, ddepth: int, dx: int, dy: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
124
+
125
+ def SobelXY(src: cv2.GMat, ddepth: int, order: int, ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
126
+
127
+ def YUV2BGR(src: cv2.GMat) -> cv2.GMat: ...
128
+
129
+ def YUV2RGB(src: cv2.GMat) -> cv2.GMat: ...
130
+
131
+ def absDiff(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
132
+
133
+ def absDiffC(src: cv2.GMat, c: cv2.GScalar) -> cv2.GMat: ...
134
+
135
+ def add(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
136
+
137
+ @_typing.overload
138
+ def addC(src1: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
139
+ @_typing.overload
140
+ def addC(c: cv2.GScalar, src1: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
141
+
142
+ def addWeighted(src1: cv2.GMat, alpha: float, src2: cv2.GMat, beta: float, gamma: float, ddepth: int = ...) -> cv2.GMat: ...
143
+
144
+ def bilateralFilter(src: cv2.GMat, d: int, sigmaColor: float, sigmaSpace: float, borderType: int = ...) -> cv2.GMat: ...
145
+
146
+ @_typing.overload
147
+ def bitwise_and(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
148
+ @_typing.overload
149
+ def bitwise_and(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
150
+
151
+ def bitwise_not(src: cv2.GMat) -> cv2.GMat: ...
152
+
153
+ @_typing.overload
154
+ def bitwise_or(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
155
+ @_typing.overload
156
+ def bitwise_or(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
157
+
158
+ @_typing.overload
159
+ def bitwise_xor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
160
+ @_typing.overload
161
+ def bitwise_xor(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
162
+
163
+ def blur(src: cv2.GMat, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
164
+
165
+ @_typing.overload
166
+ def boundingRect(src: cv2.GMat) -> cv2.GOpaqueT: ...
167
+ @_typing.overload
168
+ def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ...
169
+ @_typing.overload
170
+ def boundingRect(src: cv2.GArrayT) -> cv2.GOpaqueT: ...
171
+
172
+ def boxFilter(src: cv2.GMat, dtype: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ..., normalize: bool = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
173
+
174
+ def cartToPolar(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
175
+
176
+ @_typing.overload
177
+ def cmpEQ(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
178
+ @_typing.overload
179
+ def cmpEQ(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
180
+
181
+ @_typing.overload
182
+ def cmpGE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
183
+ @_typing.overload
184
+ def cmpGE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
185
+
186
+ @_typing.overload
187
+ def cmpGT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
188
+ @_typing.overload
189
+ def cmpGT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
190
+
191
+ @_typing.overload
192
+ def cmpLE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
193
+ @_typing.overload
194
+ def cmpLE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
195
+
196
+ @_typing.overload
197
+ def cmpLT(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
198
+ @_typing.overload
199
+ def cmpLT(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
200
+
201
+ @_typing.overload
202
+ def cmpNE(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
203
+ @_typing.overload
204
+ def cmpNE(src1: cv2.GMat, src2: cv2.GScalar) -> cv2.GMat: ...
205
+
206
+ def combine(lhs: cv2.GKernelPackage, rhs: cv2.GKernelPackage) -> cv2.GKernelPackage: ...
207
+
208
+ @_typing.overload
209
+ def concatHor(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
210
+ @_typing.overload
211
+ def concatHor(v: _typing.Sequence[cv2.GMat]) -> cv2.GMat: ...
212
+
213
+ @_typing.overload
214
+ def concatVert(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
215
+ @_typing.overload
216
+ def concatVert(v: _typing.Sequence[cv2.GMat]) -> cv2.GMat: ...
217
+
218
+ def convertTo(src: cv2.GMat, rdepth: int, alpha: float = ..., beta: float = ...) -> cv2.GMat: ...
219
+
220
+ def copy(in_: cv2.GMat) -> cv2.GMat: ...
221
+
222
+ def countNonZero(src: cv2.GMat) -> cv2.GOpaqueT: ...
223
+
224
+ def crop(src: cv2.GMat, rect: cv2.typing.Rect) -> cv2.GMat: ...
225
+
226
+ def dilate(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
227
+
228
+ def dilate3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
229
+
230
+ def div(src1: cv2.GMat, src2: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ...
231
+
232
+ def divC(src: cv2.GMat, divisor: cv2.GScalar, scale: float, ddepth: int = ...) -> cv2.GMat: ...
233
+
234
+ def divRC(divident: cv2.GScalar, src: cv2.GMat, scale: float, ddepth: int = ...) -> cv2.GMat: ...
235
+
236
+ def equalizeHist(src: cv2.GMat) -> cv2.GMat: ...
237
+
238
+ def erode(src: cv2.GMat, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
239
+
240
+ def erode3x3(src: cv2.GMat, iterations: int = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
241
+
242
+ def filter2D(src: cv2.GMat, ddepth: int, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., delta: cv2.typing.Scalar = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
243
+
244
+ def flip(src: cv2.GMat, flipCode: int) -> cv2.GMat: ...
245
+
246
+ def gaussianBlur(src: cv2.GMat, ksize: cv2.typing.Size, sigmaX: float, sigmaY: float = ..., borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
247
+
248
+ def goodFeaturesToTrack(image: cv2.GMat, maxCorners: int, qualityLevel: float, minDistance: float, mask: cv2.typing.MatLike | None = ..., blockSize: int = ..., useHarrisDetector: bool = ..., k: float = ...) -> cv2.GArrayT: ...
249
+
250
+ def inRange(src: cv2.GMat, threshLow: cv2.GScalar, threshUp: cv2.GScalar) -> cv2.GMat: ...
251
+
252
+ @_typing.overload
253
+ def infer(name: str, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ...
254
+ @_typing.overload
255
+ def infer(name: str, roi: cv2.GOpaqueT, inputs: cv2.GInferInputs) -> cv2.GInferOutputs: ...
256
+ @_typing.overload
257
+ def infer(name: str, rois: cv2.GArrayT, inputs: cv2.GInferInputs) -> cv2.GInferListOutputs: ...
258
+
259
+ def infer2(name: str, in_: cv2.GMat, inputs: cv2.GInferListInputs) -> cv2.GInferListOutputs: ...
260
+
261
+ def integral(src: cv2.GMat, sdepth: int = ..., sqdepth: int = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
262
+
263
+ @_typing.overload
264
+ def kmeans(data: cv2.GMat, K: int, bestLabels: cv2.GMat, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ...
265
+ @_typing.overload
266
+ def kmeans(data: cv2.GMat, K: int, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GMat, cv2.GMat]: ...
267
+ @_typing.overload
268
+ def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ...
269
+ @_typing.overload
270
+ def kmeans(data: cv2.GArrayT, K: int, bestLabels: cv2.GArrayT, criteria: cv2.typing.TermCriteria, attempts: int, flags: cv2.KmeansFlags) -> tuple[cv2.GOpaqueT, cv2.GArrayT, cv2.GArrayT]: ...
271
+
272
+ def mask(src: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ...
273
+
274
+ def max(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
275
+
276
+ def mean(src: cv2.GMat) -> cv2.GScalar: ...
277
+
278
+ def medianBlur(src: cv2.GMat, ksize: int) -> cv2.GMat: ...
279
+
280
+ def merge3(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat) -> cv2.GMat: ...
281
+
282
+ def merge4(src1: cv2.GMat, src2: cv2.GMat, src3: cv2.GMat, src4: cv2.GMat) -> cv2.GMat: ...
283
+
284
+ def min(src1: cv2.GMat, src2: cv2.GMat) -> cv2.GMat: ...
285
+
286
+ def morphologyEx(src: cv2.GMat, op: cv2.MorphTypes, kernel: cv2.typing.MatLike, anchor: cv2.typing.Point = ..., iterations: int = ..., borderType: cv2.BorderTypes = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
287
+
288
+ def mul(src1: cv2.GMat, src2: cv2.GMat, scale: float = ..., ddepth: int = ...) -> cv2.GMat: ...
289
+
290
+ @_typing.overload
291
+ def mulC(src: cv2.GMat, multiplier: float, ddepth: int = ...) -> cv2.GMat: ...
292
+ @_typing.overload
293
+ def mulC(src: cv2.GMat, multiplier: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
294
+ @_typing.overload
295
+ def mulC(multiplier: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
296
+
297
+ def normInf(src: cv2.GMat) -> cv2.GScalar: ...
298
+
299
+ def normL1(src: cv2.GMat) -> cv2.GScalar: ...
300
+
301
+ def normL2(src: cv2.GMat) -> cv2.GScalar: ...
302
+
303
+ def normalize(src: cv2.GMat, alpha: float, beta: float, norm_type: int, ddepth: int = ...) -> cv2.GMat: ...
304
+
305
+ @_typing.overload
306
+ def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., filterLabel: int = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ...
307
+ @_typing.overload
308
+ def parseSSD(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float, alignmentToSquare: bool, filterOutOfBounds: bool) -> cv2.GArrayT: ...
309
+
310
+ def parseYolo(in_: cv2.GMat, inSz: cv2.GOpaqueT, confidenceThreshold: float = ..., nmsThreshold: float = ..., anchors: _typing.Sequence[float] = ...) -> tuple[cv2.GArrayT, cv2.GArrayT]: ...
311
+
312
+ def phase(x: cv2.GMat, y: cv2.GMat, angleInDegrees: bool = ...) -> cv2.GMat: ...
313
+
314
+ def polarToCart(magnitude: cv2.GMat, angle: cv2.GMat, angleInDegrees: bool = ...) -> tuple[cv2.GMat, cv2.GMat]: ...
315
+
316
+ def remap(src: cv2.GMat, map1: cv2.typing.MatLike, map2: cv2.typing.MatLike, interpolation: int, borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
317
+
318
+ def resize(src: cv2.GMat, dsize: cv2.typing.Size, fx: float = ..., fy: float = ..., interpolation: int = ...) -> cv2.GMat: ...
319
+
320
+ def select(src1: cv2.GMat, src2: cv2.GMat, mask: cv2.GMat) -> cv2.GMat: ...
321
+
322
+ def sepFilter(src: cv2.GMat, ddepth: int, kernelX: cv2.typing.MatLike, kernelY: cv2.typing.MatLike, anchor: cv2.typing.Point, delta: cv2.typing.Scalar, borderType: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
323
+
324
+ def split3(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat]: ...
325
+
326
+ def split4(src: cv2.GMat) -> tuple[cv2.GMat, cv2.GMat, cv2.GMat, cv2.GMat]: ...
327
+
328
+ def sqrt(src: cv2.GMat) -> cv2.GMat: ...
329
+
330
+ def sub(src1: cv2.GMat, src2: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
331
+
332
+ def subC(src: cv2.GMat, c: cv2.GScalar, ddepth: int = ...) -> cv2.GMat: ...
333
+
334
+ def subRC(c: cv2.GScalar, src: cv2.GMat, ddepth: int = ...) -> cv2.GMat: ...
335
+
336
+ def sum(src: cv2.GMat) -> cv2.GScalar: ...
337
+
338
+ @_typing.overload
339
+ def threshold(src: cv2.GMat, thresh: cv2.GScalar, maxval: cv2.GScalar, type: int) -> cv2.GMat: ...
340
+ @_typing.overload
341
+ def threshold(src: cv2.GMat, maxval: cv2.GScalar, type: int) -> tuple[cv2.GMat, cv2.GScalar]: ...
342
+
343
+ def transpose(src: cv2.GMat) -> cv2.GMat: ...
344
+
345
+ def warpAffine(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
346
+
347
+ def warpPerspective(src: cv2.GMat, M: cv2.typing.MatLike, dsize: cv2.typing.Size, flags: int = ..., borderMode: int = ..., borderValue: cv2.typing.Scalar = ...) -> cv2.GMat: ...
348
+
349
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/__init__.pyi ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ from cv2.gapi.core import cpu as cpu
4
+ from cv2.gapi.core import fluid as fluid
5
+ from cv2.gapi.core import ocl as ocl
6
+
7
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/cpu/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/fluid/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/core/ocl/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ie/__init__.pyi ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2.typing
4
+ import typing as _typing
5
+
6
+
7
+ from cv2.gapi.ie import detail as detail
8
+
9
+
10
+ # Enumerations
11
+ TraitAs_TENSOR: int
12
+ TRAIT_AS_TENSOR: int
13
+ TraitAs_IMAGE: int
14
+ TRAIT_AS_IMAGE: int
15
+ TraitAs = int
16
+ """One of [TraitAs_TENSOR, TRAIT_AS_TENSOR, TraitAs_IMAGE, TRAIT_AS_IMAGE]"""
17
+
18
+ Sync: int
19
+ SYNC: int
20
+ Async: int
21
+ ASYNC: int
22
+ InferMode = int
23
+ """One of [Sync, SYNC, Async, ASYNC]"""
24
+
25
+
26
+
27
+ # Classes
28
+ class PyParams:
29
+ # Functions
30
+ @_typing.overload
31
+ def __init__(self) -> None: ...
32
+ @_typing.overload
33
+ def __init__(self, tag: str, model: str, weights: str, device: str) -> None: ...
34
+ @_typing.overload
35
+ def __init__(self, tag: str, model: str, device: str) -> None: ...
36
+
37
+ def constInput(self, layer_name: str, data: cv2.typing.MatLike, hint: TraitAs = ...) -> PyParams: ...
38
+
39
+ def cfgNumRequests(self, nireq: int) -> PyParams: ...
40
+
41
+ def cfgBatchSize(self, size: int) -> PyParams: ...
42
+
43
+
44
+
45
+ # Functions
46
+ @_typing.overload
47
+ def params(tag: str, model: str, weights: str, device: str) -> PyParams: ...
48
+ @_typing.overload
49
+ def params(tag: str, model: str, device: str) -> PyParams: ...
50
+
51
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ie/detail/__init__.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ ParamDesc_Kind_Load: int
4
+ PARAM_DESC_KIND_LOAD: int
5
+ ParamDesc_Kind_Import: int
6
+ PARAM_DESC_KIND_IMPORT: int
7
+ ParamDesc_Kind = int
8
+ """One of [ParamDesc_Kind_Load, PARAM_DESC_KIND_LOAD, ParamDesc_Kind_Import, PARAM_DESC_KIND_IMPORT]"""
9
+
10
+
11
+ # Classes
12
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/imgproc/__init__.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ from cv2.gapi.imgproc import fluid as fluid
4
+
5
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/imgproc/fluid/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/oak/__init__.pyi ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ EncoderConfig_RateControlMode_CBR: int
4
+ ENCODER_CONFIG_RATE_CONTROL_MODE_CBR: int
5
+ EncoderConfig_RateControlMode_VBR: int
6
+ ENCODER_CONFIG_RATE_CONTROL_MODE_VBR: int
7
+ EncoderConfig_RateControlMode = int
8
+ """One of [EncoderConfig_RateControlMode_CBR, ENCODER_CONFIG_RATE_CONTROL_MODE_CBR, EncoderConfig_RateControlMode_VBR, ENCODER_CONFIG_RATE_CONTROL_MODE_VBR]"""
9
+
10
+ EncoderConfig_Profile_H264_BASELINE: int
11
+ ENCODER_CONFIG_PROFILE_H264_BASELINE: int
12
+ EncoderConfig_Profile_H264_HIGH: int
13
+ ENCODER_CONFIG_PROFILE_H264_HIGH: int
14
+ EncoderConfig_Profile_H264_MAIN: int
15
+ ENCODER_CONFIG_PROFILE_H264_MAIN: int
16
+ EncoderConfig_Profile_H265_MAIN: int
17
+ ENCODER_CONFIG_PROFILE_H265_MAIN: int
18
+ EncoderConfig_Profile_MJPEG: int
19
+ ENCODER_CONFIG_PROFILE_MJPEG: int
20
+ EncoderConfig_Profile = int
21
+ """One of [EncoderConfig_Profile_H264_BASELINE, ENCODER_CONFIG_PROFILE_H264_BASELINE, EncoderConfig_Profile_H264_HIGH, ENCODER_CONFIG_PROFILE_H264_HIGH, EncoderConfig_Profile_H264_MAIN, ENCODER_CONFIG_PROFILE_H264_MAIN, EncoderConfig_Profile_H265_MAIN, ENCODER_CONFIG_PROFILE_H265_MAIN, EncoderConfig_Profile_MJPEG, ENCODER_CONFIG_PROFILE_MJPEG]"""
22
+
23
+ ColorCameraParams_BoardSocket_RGB: int
24
+ COLOR_CAMERA_PARAMS_BOARD_SOCKET_RGB: int
25
+ ColorCameraParams_BoardSocket_BGR: int
26
+ COLOR_CAMERA_PARAMS_BOARD_SOCKET_BGR: int
27
+ ColorCameraParams_BoardSocket = int
28
+ """One of [ColorCameraParams_BoardSocket_RGB, COLOR_CAMERA_PARAMS_BOARD_SOCKET_RGB, ColorCameraParams_BoardSocket_BGR, COLOR_CAMERA_PARAMS_BOARD_SOCKET_BGR]"""
29
+
30
+ ColorCameraParams_Resolution_THE_1080_P: int
31
+ COLOR_CAMERA_PARAMS_RESOLUTION_THE_1080_P: int
32
+ ColorCameraParams_Resolution = int
33
+ """One of [ColorCameraParams_Resolution_THE_1080_P, COLOR_CAMERA_PARAMS_RESOLUTION_THE_1080_P]"""
34
+
35
+
36
+ # Classes
37
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/onnx/__init__.pyi ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2.gapi.onnx.ep
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ from cv2.gapi.onnx import ep as ep
9
+
10
+
11
+ # Enumerations
12
+ TraitAs_TENSOR: int
13
+ TRAIT_AS_TENSOR: int
14
+ TraitAs_IMAGE: int
15
+ TRAIT_AS_IMAGE: int
16
+ TraitAs = int
17
+ """One of [TraitAs_TENSOR, TRAIT_AS_TENSOR, TraitAs_IMAGE, TRAIT_AS_IMAGE]"""
18
+
19
+
20
+
21
+ # Classes
22
+ class PyParams:
23
+ # Functions
24
+ @_typing.overload
25
+ def __init__(self) -> None: ...
26
+ @_typing.overload
27
+ def __init__(self, tag: str, model_path: str) -> None: ...
28
+
29
+ def cfgMeanStd(self, layer_name: str, m: cv2.typing.Scalar, s: cv2.typing.Scalar) -> PyParams: ...
30
+
31
+ def cfgNormalize(self, layer_name: str, flag: bool) -> PyParams: ...
32
+
33
+ @_typing.overload
34
+ def cfgAddExecutionProvider(self, ep: cv2.gapi.onnx.ep.OpenVINO) -> PyParams: ...
35
+ @_typing.overload
36
+ def cfgAddExecutionProvider(self, ep: cv2.gapi.onnx.ep.DirectML) -> PyParams: ...
37
+ @_typing.overload
38
+ def cfgAddExecutionProvider(self, ep: cv2.gapi.onnx.ep.CoreML) -> PyParams: ...
39
+ @_typing.overload
40
+ def cfgAddExecutionProvider(self, ep: cv2.gapi.onnx.ep.CUDA) -> PyParams: ...
41
+ @_typing.overload
42
+ def cfgAddExecutionProvider(self, ep: cv2.gapi.onnx.ep.TensorRT) -> PyParams: ...
43
+
44
+ def cfgDisableMemPattern(self) -> PyParams: ...
45
+
46
+
47
+
48
+ # Functions
49
+ def params(tag: str, model_path: str) -> PyParams: ...
50
+
51
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/onnx/ep/__init__.pyi ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2.typing
4
+ import typing as _typing
5
+
6
+
7
+ # Classes
8
+ class CoreML:
9
+ # Functions
10
+ def __init__(self) -> None: ...
11
+
12
+ def cfgUseCPUOnly(self) -> CoreML: ...
13
+
14
+ def cfgEnableOnSubgraph(self) -> CoreML: ...
15
+
16
+ def cfgEnableOnlyNeuralEngine(self) -> CoreML: ...
17
+
18
+
19
+ class CUDA:
20
+ # Functions
21
+ @_typing.overload
22
+ def __init__(self) -> None: ...
23
+ @_typing.overload
24
+ def __init__(self, dev_id: int) -> None: ...
25
+
26
+
27
+ class TensorRT:
28
+ # Functions
29
+ @_typing.overload
30
+ def __init__(self) -> None: ...
31
+ @_typing.overload
32
+ def __init__(self, dev_id: int) -> None: ...
33
+
34
+
35
+ class OpenVINO:
36
+ # Functions
37
+ @_typing.overload
38
+ def __init__(self) -> None: ...
39
+ @_typing.overload
40
+ def __init__(self, dev_type: str) -> None: ...
41
+ @_typing.overload
42
+ def __init__(self, params: cv2.typing.map_string_and_string) -> None: ...
43
+
44
+ def cfgCacheDir(self, dir: str) -> OpenVINO: ...
45
+
46
+ def cfgNumThreads(self, nthreads: int) -> OpenVINO: ...
47
+
48
+ def cfgEnableOpenCLThrottling(self) -> OpenVINO: ...
49
+
50
+ def cfgEnableDynamicShapes(self) -> OpenVINO: ...
51
+
52
+
53
+ class DirectML:
54
+ # Functions
55
+ @_typing.overload
56
+ def __init__(self) -> None: ...
57
+ @_typing.overload
58
+ def __init__(self, device_id: int) -> None: ...
59
+ @_typing.overload
60
+ def __init__(self, adapter_name: str) -> None: ...
61
+
62
+
63
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ot/__init__.pyi ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import typing as _typing
5
+
6
+
7
+ from cv2.gapi.ot import cpu as cpu
8
+
9
+
10
+ # Enumerations
11
+ NEW: int
12
+ TRACKED: int
13
+ LOST: int
14
+ TrackingStatus = int
15
+ """One of [NEW, TRACKED, LOST]"""
16
+
17
+
18
+
19
+ # Classes
20
+ class ObjectTrackerParams:
21
+ max_num_objects: int
22
+ input_image_format: int
23
+ tracking_per_class: bool
24
+
25
+
26
+ # Functions
27
+ @_typing.overload
28
+ def track(mat: cv2.GMat, detected_rects: cv2.GArrayT, detected_class_labels: cv2.GArrayT, delta: float) -> tuple[cv2.GArrayT, cv2.GArrayT, cv2.GArrayT, cv2.GArrayT]: ...
29
+ @_typing.overload
30
+ def track(frame: cv2.GFrame, detected_rects: cv2.GArrayT, detected_class_labels: cv2.GArrayT, delta: float) -> tuple[cv2.GArrayT, cv2.GArrayT, cv2.GArrayT, cv2.GArrayT]: ...
31
+
32
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ot/cpu/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/ov/__init__.pyi ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2.typing
4
+ import typing as _typing
5
+
6
+
7
+ # Classes
8
+ class PyParams:
9
+ # Functions
10
+ @_typing.overload
11
+ def __init__(self) -> None: ...
12
+ @_typing.overload
13
+ def __init__(self, tag: str, model_path: str, bin_path: str, device: str) -> None: ...
14
+ @_typing.overload
15
+ def __init__(self, tag: str, blob_path: str, device: str) -> None: ...
16
+
17
+ def cfgPluginConfig(self, config: cv2.typing.map_string_and_string) -> PyParams: ...
18
+
19
+ @_typing.overload
20
+ def cfgInputTensorLayout(self, tensor_layout: str) -> PyParams: ...
21
+ @_typing.overload
22
+ def cfgInputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
23
+
24
+ @_typing.overload
25
+ def cfgInputModelLayout(self, tensor_layout: str) -> PyParams: ...
26
+ @_typing.overload
27
+ def cfgInputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
28
+
29
+ @_typing.overload
30
+ def cfgOutputTensorLayout(self, tensor_layout: str) -> PyParams: ...
31
+ @_typing.overload
32
+ def cfgOutputTensorLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
33
+
34
+ @_typing.overload
35
+ def cfgOutputModelLayout(self, tensor_layout: str) -> PyParams: ...
36
+ @_typing.overload
37
+ def cfgOutputModelLayout(self, layout_map: cv2.typing.map_string_and_string) -> PyParams: ...
38
+
39
+ @_typing.overload
40
+ def cfgOutputTensorPrecision(self, precision: int) -> PyParams: ...
41
+ @_typing.overload
42
+ def cfgOutputTensorPrecision(self, precision_map: cv2.typing.map_string_and_int) -> PyParams: ...
43
+
44
+ @_typing.overload
45
+ def cfgReshape(self, new_shape: _typing.Sequence[int]) -> PyParams: ...
46
+ @_typing.overload
47
+ def cfgReshape(self, new_shape_map: cv2.typing.map_string_and_vector_size_t) -> PyParams: ...
48
+
49
+ def cfgNumRequests(self, nireq: int) -> PyParams: ...
50
+
51
+ @_typing.overload
52
+ def cfgMean(self, mean_values: _typing.Sequence[float]) -> PyParams: ...
53
+ @_typing.overload
54
+ def cfgMean(self, mean_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
55
+
56
+ @_typing.overload
57
+ def cfgScale(self, scale_values: _typing.Sequence[float]) -> PyParams: ...
58
+ @_typing.overload
59
+ def cfgScale(self, scale_map: cv2.typing.map_string_and_vector_float) -> PyParams: ...
60
+
61
+ @_typing.overload
62
+ def cfgResize(self, interpolation: int) -> PyParams: ...
63
+ @_typing.overload
64
+ def cfgResize(self, interpolation: cv2.typing.map_string_and_int) -> PyParams: ...
65
+
66
+
67
+
68
+ # Functions
69
+ @_typing.overload
70
+ def params(tag: str, model_path: str, weights: str, device: str) -> PyParams: ...
71
+ @_typing.overload
72
+ def params(tag: str, bin_path: str, device: str) -> PyParams: ...
73
+
74
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/own/__init__.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ from cv2.gapi.own import detail as detail
4
+
5
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/own/detail/__init__.pyi ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ MatHeader_AUTO_STEP: int
4
+ MAT_HEADER_AUTO_STEP: int
5
+ MatHeader_TYPE_MASK: int
6
+ MAT_HEADER_TYPE_MASK: int
7
+
8
+
9
+ # Classes
10
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/render/__init__.pyi ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ from cv2.gapi.render import ocv as ocv
4
+
5
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/render/ocv/__init__.pyi ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+
5
+
6
+ # Functions
7
+ def kernels() -> cv2.GKernelPackage: ...
8
+
9
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/streaming/__init__.pyi ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import typing as _typing
5
+
6
+
7
+ # Enumerations
8
+ sync_policy_dont_sync: int
9
+ SYNC_POLICY_DONT_SYNC: int
10
+ sync_policy_drop: int
11
+ SYNC_POLICY_DROP: int
12
+ sync_policy = int
13
+ """One of [sync_policy_dont_sync, SYNC_POLICY_DONT_SYNC, sync_policy_drop, SYNC_POLICY_DROP]"""
14
+
15
+
16
+
17
+ # Classes
18
+ class queue_capacity:
19
+ capacity: int
20
+
21
+ # Functions
22
+ def __init__(self, cap: int = ...) -> None: ...
23
+
24
+
25
+
26
+ # Functions
27
+ def desync(g: cv2.GMat) -> cv2.GMat: ...
28
+
29
+ def seqNo(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
30
+
31
+ def seq_id(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
32
+
33
+ @_typing.overload
34
+ def size(src: cv2.GMat) -> cv2.GOpaqueT: ...
35
+ @_typing.overload
36
+ def size(r: cv2.GOpaqueT) -> cv2.GOpaqueT: ...
37
+ @_typing.overload
38
+ def size(src: cv2.GFrame) -> cv2.GOpaqueT: ...
39
+
40
+ def timestamp(arg1: cv2.GMat) -> cv2.GOpaqueT: ...
41
+
42
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/video/__init__.pyi ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ TYPE_BS_MOG2: int
5
+ TYPE_BS_KNN: int
6
+ BackgroundSubtractorType = int
7
+ """One of [TYPE_BS_MOG2, TYPE_BS_KNN]"""
8
+
9
+
10
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/__init__.pyi ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.gapi
5
+ import cv2.gapi.wip.gst
6
+ import cv2.typing
7
+ import typing as _typing
8
+
9
+
10
+ from cv2.gapi.wip import draw as draw
11
+ from cv2.gapi.wip import gst as gst
12
+ from cv2.gapi.wip import onevpl as onevpl
13
+
14
+
15
+ # Classes
16
+ class GOutputs:
17
+ # Functions
18
+ def getGMat(self) -> cv2.GMat: ...
19
+
20
+ def getGScalar(self) -> cv2.GScalar: ...
21
+
22
+ def getGArray(self, type: cv2.gapi.ArgType) -> cv2.GArrayT: ...
23
+
24
+ def getGOpaque(self, type: cv2.gapi.ArgType) -> cv2.GOpaqueT: ...
25
+
26
+
27
+ class IStreamSource:
28
+ ...
29
+
30
+
31
+ # Functions
32
+ def get_streaming_source(pipeline: cv2.gapi.wip.gst.GStreamerPipeline, appsinkName: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ...
33
+
34
+ @_typing.overload
35
+ def make_capture_src(path: str, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ...
36
+ @_typing.overload
37
+ def make_capture_src(id: int, properties: cv2.typing.map_int_and_double = ...) -> IStreamSource: ...
38
+
39
+ def make_gst_src(pipeline: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ...
40
+
41
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/draw/__init__.pyi ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class Text:
10
+ text: str
11
+ org: cv2.typing.Point
12
+ ff: int
13
+ fs: float
14
+ color: cv2.typing.Scalar
15
+ thick: int
16
+ lt: int
17
+ bottom_left_origin: bool
18
+
19
+ # Functions
20
+ @_typing.overload
21
+ def __init__(self, text_: str, org_: cv2.typing.Point, ff_: int, fs_: float, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., bottom_left_origin_: bool = ...) -> None: ...
22
+ @_typing.overload
23
+ def __init__(self) -> None: ...
24
+
25
+
26
+ class Rect:
27
+ rect: cv2.typing.Rect
28
+ color: cv2.typing.Scalar
29
+ thick: int
30
+ lt: int
31
+ shift: int
32
+
33
+ # Functions
34
+ @_typing.overload
35
+ def __init__(self) -> None: ...
36
+ @_typing.overload
37
+ def __init__(self, rect_: cv2.typing.Rect2i, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
38
+
39
+
40
+ class Circle:
41
+ center: cv2.typing.Point
42
+ radius: int
43
+ color: cv2.typing.Scalar
44
+ thick: int
45
+ lt: int
46
+ shift: int
47
+
48
+ # Functions
49
+ @_typing.overload
50
+ def __init__(self, center_: cv2.typing.Point, radius_: int, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
51
+ @_typing.overload
52
+ def __init__(self) -> None: ...
53
+
54
+
55
+ class Line:
56
+ pt1: cv2.typing.Point
57
+ pt2: cv2.typing.Point
58
+ color: cv2.typing.Scalar
59
+ thick: int
60
+ lt: int
61
+ shift: int
62
+
63
+ # Functions
64
+ @_typing.overload
65
+ def __init__(self, pt1_: cv2.typing.Point, pt2_: cv2.typing.Point, color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
66
+ @_typing.overload
67
+ def __init__(self) -> None: ...
68
+
69
+
70
+ class Mosaic:
71
+ mos: cv2.typing.Rect
72
+ cellSz: int
73
+ decim: int
74
+
75
+ # Functions
76
+ @_typing.overload
77
+ def __init__(self) -> None: ...
78
+ @_typing.overload
79
+ def __init__(self, mos_: cv2.typing.Rect2i, cellSz_: int, decim_: int) -> None: ...
80
+
81
+
82
+ class Image:
83
+ org: cv2.typing.Point
84
+ img: cv2.typing.MatLike
85
+ alpha: cv2.typing.MatLike
86
+
87
+ # Functions
88
+ @_typing.overload
89
+ def __init__(self, org_: cv2.typing.Point, img_: cv2.typing.MatLike, alpha_: cv2.typing.MatLike) -> None: ...
90
+ @_typing.overload
91
+ def __init__(self) -> None: ...
92
+
93
+
94
+ class Poly:
95
+ points: _typing.Sequence[cv2.typing.Point]
96
+ color: cv2.typing.Scalar
97
+ thick: int
98
+ lt: int
99
+ shift: int
100
+
101
+ # Functions
102
+ @_typing.overload
103
+ def __init__(self, points_: _typing.Sequence[cv2.typing.Point], color_: cv2.typing.Scalar, thick_: int = ..., lt_: int = ..., shift_: int = ...) -> None: ...
104
+ @_typing.overload
105
+ def __init__(self) -> None: ...
106
+
107
+
108
+
109
+ # Functions
110
+ @_typing.overload
111
+ def render(bgr: cv2.typing.MatLike, prims: _typing.Sequence[cv2.typing.Prim], args: _typing.Sequence[cv2.GCompileArg] = ...) -> None: ...
112
+ @_typing.overload
113
+ def render(y_plane: cv2.typing.MatLike, uv_plane: cv2.typing.MatLike, prims: _typing.Sequence[cv2.typing.Prim], args: _typing.Sequence[cv2.GCompileArg] = ...) -> None: ...
114
+
115
+ def render3ch(src: cv2.GMat, prims: cv2.GArrayT) -> cv2.GMat: ...
116
+
117
+ def renderNV12(y: cv2.GMat, uv: cv2.GMat, prims: cv2.GArrayT) -> tuple[cv2.GMat, cv2.GMat]: ...
118
+
119
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/gst/__init__.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ GStreamerSource_OutputType_FRAME: int
4
+ GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME: int
5
+ GStreamerSource_OutputType_MAT: int
6
+ GSTREAMER_SOURCE_OUTPUT_TYPE_MAT: int
7
+ GStreamerSource_OutputType = int
8
+ """One of [GStreamerSource_OutputType_FRAME, GSTREAMER_SOURCE_OUTPUT_TYPE_FRAME, GStreamerSource_OutputType_MAT, GSTREAMER_SOURCE_OUTPUT_TYPE_MAT]"""
9
+
10
+
11
+ # Classes
12
+ class GStreamerPipeline:
13
+ # Functions
14
+ def __init__(self, pipeline: str) -> None: ...
15
+
16
+
17
+
evalkit_tf446/lib/python3.10/site-packages/cv2/gapi/wip/onevpl/__init__.pyi ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ AccelType_HOST: int
5
+ ACCEL_TYPE_HOST: int
6
+ AccelType_DX11: int
7
+ ACCEL_TYPE_DX11: int
8
+ AccelType_VAAPI: int
9
+ ACCEL_TYPE_VAAPI: int
10
+ AccelType_LAST_VALUE: int
11
+ ACCEL_TYPE_LAST_VALUE: int
12
+ AccelType = int
13
+ """One of [AccelType_HOST, ACCEL_TYPE_HOST, AccelType_DX11, ACCEL_TYPE_DX11, AccelType_VAAPI, ACCEL_TYPE_VAAPI, AccelType_LAST_VALUE, ACCEL_TYPE_LAST_VALUE]"""
14
+
15
+
16
+
evalkit_tf446/lib/python3.10/site-packages/cv2/ipp/__init__.pyi ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Functions
4
+ def getIppVersion() -> str: ...
5
+
6
+ def setUseIPP(flag: bool) -> None: ...
7
+
8
+ def setUseIPP_NotExact(flag: bool) -> None: ...
9
+
10
+ def useIPP() -> bool: ...
11
+
12
+ def useIPP_NotExact() -> bool: ...
13
+
14
+
evalkit_tf446/lib/python3.10/site-packages/cv2/load_config_py2.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import sys
3
+
4
+ if sys.version_info[:2] < (3, 0):
5
+ def exec_file_wrapper(fpath, g_vars, l_vars):
6
+ execfile(fpath, g_vars, l_vars)
evalkit_tf446/lib/python3.10/site-packages/cv2/load_config_py3.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import os
3
+ import sys
4
+
5
+ if sys.version_info[:2] >= (3, 0):
6
+ def exec_file_wrapper(fpath, g_vars, l_vars):
7
+ with open(fpath) as f:
8
+ code = compile(f.read(), os.path.basename(fpath), 'exec')
9
+ exec(code, g_vars, l_vars)
evalkit_tf446/lib/python3.10/site-packages/cv2/ogl/__init__.pyi ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ POINTS: int
5
+ LINES: int
6
+ LINE_LOOP: int
7
+ LINE_STRIP: int
8
+ TRIANGLES: int
9
+ TRIANGLE_STRIP: int
10
+ TRIANGLE_FAN: int
11
+ QUADS: int
12
+ QUAD_STRIP: int
13
+ POLYGON: int
14
+ RenderModes = int
15
+ """One of [POINTS, LINES, LINE_LOOP, LINE_STRIP, TRIANGLES, TRIANGLE_STRIP, TRIANGLE_FAN, QUADS, QUAD_STRIP, POLYGON]"""
16
+
17
+
18
+ Buffer_ARRAY_BUFFER: int
19
+ BUFFER_ARRAY_BUFFER: int
20
+ Buffer_ELEMENT_ARRAY_BUFFER: int
21
+ BUFFER_ELEMENT_ARRAY_BUFFER: int
22
+ Buffer_PIXEL_PACK_BUFFER: int
23
+ BUFFER_PIXEL_PACK_BUFFER: int
24
+ Buffer_PIXEL_UNPACK_BUFFER: int
25
+ BUFFER_PIXEL_UNPACK_BUFFER: int
26
+ Buffer_Target = int
27
+ """One of [Buffer_ARRAY_BUFFER, BUFFER_ARRAY_BUFFER, Buffer_ELEMENT_ARRAY_BUFFER, BUFFER_ELEMENT_ARRAY_BUFFER, Buffer_PIXEL_PACK_BUFFER, BUFFER_PIXEL_PACK_BUFFER, Buffer_PIXEL_UNPACK_BUFFER, BUFFER_PIXEL_UNPACK_BUFFER]"""
28
+
29
+ Buffer_READ_ONLY: int
30
+ BUFFER_READ_ONLY: int
31
+ Buffer_WRITE_ONLY: int
32
+ BUFFER_WRITE_ONLY: int
33
+ Buffer_READ_WRITE: int
34
+ BUFFER_READ_WRITE: int
35
+ Buffer_Access = int
36
+ """One of [Buffer_READ_ONLY, BUFFER_READ_ONLY, Buffer_WRITE_ONLY, BUFFER_WRITE_ONLY, Buffer_READ_WRITE, BUFFER_READ_WRITE]"""
37
+
38
+ Texture2D_NONE: int
39
+ TEXTURE2D_NONE: int
40
+ Texture2D_DEPTH_COMPONENT: int
41
+ TEXTURE2D_DEPTH_COMPONENT: int
42
+ Texture2D_RGB: int
43
+ TEXTURE2D_RGB: int
44
+ Texture2D_RGBA: int
45
+ TEXTURE2D_RGBA: int
46
+ Texture2D_Format = int
47
+ """One of [Texture2D_NONE, TEXTURE2D_NONE, Texture2D_DEPTH_COMPONENT, TEXTURE2D_DEPTH_COMPONENT, Texture2D_RGB, TEXTURE2D_RGB, Texture2D_RGBA, TEXTURE2D_RGBA]"""
48
+
49
+
50
+ # Classes
51
+
evalkit_tf446/lib/python3.10/site-packages/cv2/py.typed ADDED
File without changes
evalkit_tf446/lib/python3.10/site-packages/cv2/qt/plugins/platforms/libqxcb.so ADDED
Binary file (29.3 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/cv2/samples/__init__.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ # Functions
4
+ def addSamplesDataSearchPath(path: str) -> None: ...
5
+
6
+ def addSamplesDataSearchSubDirectory(subdir: str) -> None: ...
7
+
8
+ def findFile(relative_path: str, required: bool = ..., silentMode: bool = ...) -> str: ...
9
+
10
+ def findFileOrKeep(relative_path: str, silentMode: bool = ...) -> str: ...
11
+
12
+
evalkit_tf446/lib/python3.10/site-packages/cv2/segmentation/__init__.pyi ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class IntelligentScissorsMB:
10
+ # Functions
11
+ def __init__(self) -> None: ...
12
+
13
+ def setWeights(self, weight_non_edge: float, weight_gradient_direction: float, weight_gradient_magnitude: float) -> IntelligentScissorsMB: ...
14
+
15
+ def setGradientMagnitudeMaxLimit(self, gradient_magnitude_threshold_max: float = ...) -> IntelligentScissorsMB: ...
16
+
17
+ def setEdgeFeatureZeroCrossingParameters(self, gradient_magnitude_min_value: float = ...) -> IntelligentScissorsMB: ...
18
+
19
+ def setEdgeFeatureCannyParameters(self, threshold1: float, threshold2: float, apertureSize: int = ..., L2gradient: bool = ...) -> IntelligentScissorsMB: ...
20
+
21
+ @_typing.overload
22
+ def applyImage(self, image: cv2.typing.MatLike) -> IntelligentScissorsMB: ...
23
+ @_typing.overload
24
+ def applyImage(self, image: cv2.UMat) -> IntelligentScissorsMB: ...
25
+
26
+ @_typing.overload
27
+ def applyImageFeatures(self, non_edge: cv2.typing.MatLike, gradient_direction: cv2.typing.MatLike, gradient_magnitude: cv2.typing.MatLike, image: cv2.typing.MatLike | None = ...) -> IntelligentScissorsMB: ...
28
+ @_typing.overload
29
+ def applyImageFeatures(self, non_edge: cv2.UMat, gradient_direction: cv2.UMat, gradient_magnitude: cv2.UMat, image: cv2.UMat | None = ...) -> IntelligentScissorsMB: ...
30
+
31
+ def buildMap(self, sourcePt: cv2.typing.Point) -> None: ...
32
+
33
+ @_typing.overload
34
+ def getContour(self, targetPt: cv2.typing.Point, contour: cv2.typing.MatLike | None = ..., backward: bool = ...) -> cv2.typing.MatLike: ...
35
+ @_typing.overload
36
+ def getContour(self, targetPt: cv2.typing.Point, contour: cv2.UMat | None = ..., backward: bool = ...) -> cv2.UMat: ...
37
+
38
+
39
+
evalkit_tf446/lib/python3.10/site-packages/cv2/typing/__init__.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = [
2
+ "IntPointer",
3
+ "MatLike",
4
+ "MatShape",
5
+ "Size",
6
+ "Size2f",
7
+ "Scalar",
8
+ "Point",
9
+ "Point2i",
10
+ "Point2f",
11
+ "Point2d",
12
+ "Point3i",
13
+ "Point3f",
14
+ "Point3d",
15
+ "Range",
16
+ "Rect",
17
+ "Rect2i",
18
+ "Rect2f",
19
+ "Rect2d",
20
+ "Moments",
21
+ "RotatedRect",
22
+ "TermCriteria",
23
+ "Vec2i",
24
+ "Vec2f",
25
+ "Vec2d",
26
+ "Vec3i",
27
+ "Vec3f",
28
+ "Vec3d",
29
+ "Vec4i",
30
+ "Vec4f",
31
+ "Vec4d",
32
+ "Vec6f",
33
+ "FeatureDetector",
34
+ "DescriptorExtractor",
35
+ "FeatureExtractor",
36
+ "GProtoArg",
37
+ "GProtoInputArgs",
38
+ "GProtoOutputArgs",
39
+ "GRunArg",
40
+ "GOptRunArg",
41
+ "GMetaArg",
42
+ "Prim",
43
+ "Matx33f",
44
+ "Matx33d",
45
+ "Matx44f",
46
+ "Matx44d",
47
+ "GTypeInfo",
48
+ "ExtractArgsCallback",
49
+ "ExtractMetaCallback",
50
+ "LayerId",
51
+ "IndexParams",
52
+ "SearchParams",
53
+ "map_string_and_string",
54
+ "map_string_and_int",
55
+ "map_string_and_vector_size_t",
56
+ "map_string_and_vector_float",
57
+ "map_int_and_double",
58
+ ]
59
+
60
+ import cv2.dnn
61
+ import cv2.mat_wrapper
62
+ import typing as _typing
63
+ import cv2.gapi.wip.draw
64
+ import numpy
65
+ import cv2
66
+
67
+
68
+ if _typing.TYPE_CHECKING:
69
+ NumPyArrayNumeric = numpy.ndarray[_typing.Any, numpy.dtype[numpy.integer[_typing.Any] | numpy.floating[_typing.Any]]]
70
+ else:
71
+ NumPyArrayNumeric = numpy.ndarray
72
+
73
+
74
+ if _typing.TYPE_CHECKING:
75
+ NumPyArrayFloat32 = numpy.ndarray[_typing.Any, numpy.dtype[numpy.float32]]
76
+ else:
77
+ NumPyArrayFloat32 = numpy.ndarray
78
+
79
+
80
+ if _typing.TYPE_CHECKING:
81
+ NumPyArrayFloat64 = numpy.ndarray[_typing.Any, numpy.dtype[numpy.float64]]
82
+ else:
83
+ NumPyArrayFloat64 = numpy.ndarray
84
+
85
+
86
+ if _typing.TYPE_CHECKING:
87
+ TermCriteria_Type = cv2.TermCriteria_Type
88
+ else:
89
+ TermCriteria_Type = int
90
+
91
+
92
+ IntPointer = int
93
+ """Represents an arbitrary pointer"""
94
+ MatLike = _typing.Union[cv2.mat_wrapper.Mat, NumPyArrayNumeric]
95
+ MatShape = _typing.Sequence[int]
96
+ Size = _typing.Sequence[int]
97
+ """Required length is 2"""
98
+ Size2f = _typing.Sequence[float]
99
+ """Required length is 2"""
100
+ Scalar = _typing.Sequence[float]
101
+ """Required length is at most 4"""
102
+ Point = _typing.Sequence[int]
103
+ """Required length is 2"""
104
+ Point2i = Point
105
+ Point2f = _typing.Sequence[float]
106
+ """Required length is 2"""
107
+ Point2d = _typing.Sequence[float]
108
+ """Required length is 2"""
109
+ Point3i = _typing.Sequence[int]
110
+ """Required length is 3"""
111
+ Point3f = _typing.Sequence[float]
112
+ """Required length is 3"""
113
+ Point3d = _typing.Sequence[float]
114
+ """Required length is 3"""
115
+ Range = _typing.Sequence[int]
116
+ """Required length is 2"""
117
+ Rect = _typing.Sequence[int]
118
+ """Required length is 4"""
119
+ Rect2i = _typing.Sequence[int]
120
+ """Required length is 4"""
121
+ Rect2f = _typing.Sequence[float]
122
+ """Required length is 4"""
123
+ Rect2d = _typing.Sequence[float]
124
+ """Required length is 4"""
125
+ Moments = _typing.Dict[str, float]
126
+ RotatedRect = _typing.Tuple[Point2f, Size2f, float]
127
+ """Any type providing sequence protocol is supported"""
128
+ TermCriteria = _typing.Tuple[TermCriteria_Type, int, float]
129
+ """Any type providing sequence protocol is supported"""
130
+ Vec2i = _typing.Sequence[int]
131
+ """Required length is 2"""
132
+ Vec2f = _typing.Sequence[float]
133
+ """Required length is 2"""
134
+ Vec2d = _typing.Sequence[float]
135
+ """Required length is 2"""
136
+ Vec3i = _typing.Sequence[int]
137
+ """Required length is 3"""
138
+ Vec3f = _typing.Sequence[float]
139
+ """Required length is 3"""
140
+ Vec3d = _typing.Sequence[float]
141
+ """Required length is 3"""
142
+ Vec4i = _typing.Sequence[int]
143
+ """Required length is 4"""
144
+ Vec4f = _typing.Sequence[float]
145
+ """Required length is 4"""
146
+ Vec4d = _typing.Sequence[float]
147
+ """Required length is 4"""
148
+ Vec6f = _typing.Sequence[float]
149
+ """Required length is 6"""
150
+ FeatureDetector = cv2.Feature2D
151
+ DescriptorExtractor = cv2.Feature2D
152
+ FeatureExtractor = cv2.Feature2D
153
+ GProtoArg = _typing.Union[Scalar, cv2.GMat, cv2.GOpaqueT, cv2.GArrayT]
154
+ GProtoInputArgs = _typing.Sequence[GProtoArg]
155
+ GProtoOutputArgs = _typing.Sequence[GProtoArg]
156
+ GRunArg = _typing.Union[MatLike, Scalar, cv2.GOpaqueT, cv2.GArrayT, _typing.Sequence[_typing.Any], None]
157
+ GOptRunArg = _typing.Optional[GRunArg]
158
+ GMetaArg = _typing.Union[cv2.GMat, Scalar, cv2.GOpaqueT, cv2.GArrayT]
159
+ Prim = _typing.Union[cv2.gapi.wip.draw.Text, cv2.gapi.wip.draw.Circle, cv2.gapi.wip.draw.Image, cv2.gapi.wip.draw.Line, cv2.gapi.wip.draw.Rect, cv2.gapi.wip.draw.Mosaic, cv2.gapi.wip.draw.Poly]
160
+ Matx33f = NumPyArrayFloat32
161
+ """NDArray(shape=(3, 3), dtype=numpy.float32)"""
162
+ Matx33d = NumPyArrayFloat64
163
+ """NDArray(shape=(3, 3), dtype=numpy.float64)"""
164
+ Matx44f = NumPyArrayFloat32
165
+ """NDArray(shape=(4, 4), dtype=numpy.float32)"""
166
+ Matx44d = NumPyArrayFloat64
167
+ """NDArray(shape=(4, 4), dtype=numpy.float64)"""
168
+ GTypeInfo = _typing.Union[cv2.GMat, Scalar, cv2.GOpaqueT, cv2.GArrayT]
169
+ ExtractArgsCallback = _typing.Callable[[_typing.Sequence[GTypeInfo]], _typing.Sequence[GRunArg]]
170
+ ExtractMetaCallback = _typing.Callable[[_typing.Sequence[GTypeInfo]], _typing.Sequence[GMetaArg]]
171
+ LayerId = cv2.dnn.DictValue
172
+ IndexParams = _typing.Dict[str, _typing.Union[bool, int, float, str]]
173
+ SearchParams = _typing.Dict[str, _typing.Union[bool, int, float, str]]
174
+ map_string_and_string = _typing.Dict[str, str]
175
+ map_string_and_int = _typing.Dict[str, int]
176
+ map_string_and_vector_size_t = _typing.Dict[str, _typing.Sequence[int]]
177
+ map_string_and_vector_float = _typing.Dict[str, _typing.Sequence[float]]
178
+ map_int_and_double = _typing.Dict[int, float]
evalkit_tf446/lib/python3.10/site-packages/cv2/typing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.78 kB). View file
 
evalkit_tf446/lib/python3.10/site-packages/cv2/version.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ opencv_version = "4.10.0.84"
2
+ contrib = False
3
+ headless = False
4
+ rolling = False
5
+ ci_build = True
evalkit_tf446/lib/python3.10/site-packages/cycler/__init__.py ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cycler
3
+ ======
4
+
5
+ Cycling through combinations of values, producing dictionaries.
6
+
7
+ You can add cyclers::
8
+
9
+ from cycler import cycler
10
+ cc = (cycler(color=list('rgb')) +
11
+ cycler(linestyle=['-', '--', '-.']))
12
+ for d in cc:
13
+ print(d)
14
+
15
+ Results in::
16
+
17
+ {'color': 'r', 'linestyle': '-'}
18
+ {'color': 'g', 'linestyle': '--'}
19
+ {'color': 'b', 'linestyle': '-.'}
20
+
21
+
22
+ You can multiply cyclers::
23
+
24
+ from cycler import cycler
25
+ cc = (cycler(color=list('rgb')) *
26
+ cycler(linestyle=['-', '--', '-.']))
27
+ for d in cc:
28
+ print(d)
29
+
30
+ Results in::
31
+
32
+ {'color': 'r', 'linestyle': '-'}
33
+ {'color': 'r', 'linestyle': '--'}
34
+ {'color': 'r', 'linestyle': '-.'}
35
+ {'color': 'g', 'linestyle': '-'}
36
+ {'color': 'g', 'linestyle': '--'}
37
+ {'color': 'g', 'linestyle': '-.'}
38
+ {'color': 'b', 'linestyle': '-'}
39
+ {'color': 'b', 'linestyle': '--'}
40
+ {'color': 'b', 'linestyle': '-.'}
41
+ """
42
+
43
+
44
+ from __future__ import annotations
45
+
46
+ from collections.abc import Hashable, Iterable, Generator
47
+ import copy
48
+ from functools import reduce
49
+ from itertools import product, cycle
50
+ from operator import mul, add
51
+ # Dict, List, Union required for runtime cast calls
52
+ from typing import TypeVar, Generic, Callable, Union, Dict, List, Any, overload, cast
53
+
54
+ __version__ = "0.12.1"
55
+
56
+ K = TypeVar("K", bound=Hashable)
57
+ L = TypeVar("L", bound=Hashable)
58
+ V = TypeVar("V")
59
+ U = TypeVar("U")
60
+
61
+
62
+ def _process_keys(
63
+ left: Cycler[K, V] | Iterable[dict[K, V]],
64
+ right: Cycler[K, V] | Iterable[dict[K, V]] | None,
65
+ ) -> set[K]:
66
+ """
67
+ Helper function to compose cycler keys.
68
+
69
+ Parameters
70
+ ----------
71
+ left, right : iterable of dictionaries or None
72
+ The cyclers to be composed.
73
+
74
+ Returns
75
+ -------
76
+ keys : set
77
+ The keys in the composition of the two cyclers.
78
+ """
79
+ l_peek: dict[K, V] = next(iter(left)) if left != [] else {}
80
+ r_peek: dict[K, V] = next(iter(right)) if right is not None else {}
81
+ l_key: set[K] = set(l_peek.keys())
82
+ r_key: set[K] = set(r_peek.keys())
83
+ if l_key & r_key:
84
+ raise ValueError("Can not compose overlapping cycles")
85
+ return l_key | r_key
86
+
87
+
88
+ def concat(left: Cycler[K, V], right: Cycler[K, U]) -> Cycler[K, V | U]:
89
+ r"""
90
+ Concatenate `Cycler`\s, as if chained using `itertools.chain`.
91
+
92
+ The keys must match exactly.
93
+
94
+ Examples
95
+ --------
96
+ >>> num = cycler('a', range(3))
97
+ >>> let = cycler('a', 'abc')
98
+ >>> num.concat(let)
99
+ cycler('a', [0, 1, 2, 'a', 'b', 'c'])
100
+
101
+ Returns
102
+ -------
103
+ `Cycler`
104
+ The concatenated cycler.
105
+ """
106
+ if left.keys != right.keys:
107
+ raise ValueError(
108
+ "Keys do not match:\n"
109
+ "\tIntersection: {both!r}\n"
110
+ "\tDisjoint: {just_one!r}".format(
111
+ both=left.keys & right.keys, just_one=left.keys ^ right.keys
112
+ )
113
+ )
114
+ _l = cast(Dict[K, List[Union[V, U]]], left.by_key())
115
+ _r = cast(Dict[K, List[Union[V, U]]], right.by_key())
116
+ return reduce(add, (_cycler(k, _l[k] + _r[k]) for k in left.keys))
117
+
118
+
119
+ class Cycler(Generic[K, V]):
120
+ """
121
+ Composable cycles.
122
+
123
+ This class has compositions methods:
124
+
125
+ ``+``
126
+ for 'inner' products (zip)
127
+
128
+ ``+=``
129
+ in-place ``+``
130
+
131
+ ``*``
132
+ for outer products (`itertools.product`) and integer multiplication
133
+
134
+ ``*=``
135
+ in-place ``*``
136
+
137
+ and supports basic slicing via ``[]``.
138
+
139
+ Parameters
140
+ ----------
141
+ left, right : Cycler or None
142
+ The 'left' and 'right' cyclers.
143
+ op : func or None
144
+ Function which composes the 'left' and 'right' cyclers.
145
+ """
146
+
147
+ def __call__(self):
148
+ return cycle(self)
149
+
150
+ def __init__(
151
+ self,
152
+ left: Cycler[K, V] | Iterable[dict[K, V]] | None,
153
+ right: Cycler[K, V] | None = None,
154
+ op: Any = None,
155
+ ):
156
+ """
157
+ Semi-private init.
158
+
159
+ Do not use this directly, use `cycler` function instead.
160
+ """
161
+ if isinstance(left, Cycler):
162
+ self._left: Cycler[K, V] | list[dict[K, V]] = Cycler(
163
+ left._left, left._right, left._op
164
+ )
165
+ elif left is not None:
166
+ # Need to copy the dictionary or else that will be a residual
167
+ # mutable that could lead to strange errors
168
+ self._left = [copy.copy(v) for v in left]
169
+ else:
170
+ self._left = []
171
+
172
+ if isinstance(right, Cycler):
173
+ self._right: Cycler[K, V] | None = Cycler(
174
+ right._left, right._right, right._op
175
+ )
176
+ else:
177
+ self._right = None
178
+
179
+ self._keys: set[K] = _process_keys(self._left, self._right)
180
+ self._op: Any = op
181
+
182
+ def __contains__(self, k):
183
+ return k in self._keys
184
+
185
+ @property
186
+ def keys(self) -> set[K]:
187
+ """The keys this Cycler knows about."""
188
+ return set(self._keys)
189
+
190
+ def change_key(self, old: K, new: K) -> None:
191
+ """
192
+ Change a key in this cycler to a new name.
193
+ Modification is performed in-place.
194
+
195
+ Does nothing if the old key is the same as the new key.
196
+ Raises a ValueError if the new key is already a key.
197
+ Raises a KeyError if the old key isn't a key.
198
+ """
199
+ if old == new:
200
+ return
201
+ if new in self._keys:
202
+ raise ValueError(
203
+ f"Can't replace {old} with {new}, {new} is already a key"
204
+ )
205
+ if old not in self._keys:
206
+ raise KeyError(
207
+ f"Can't replace {old} with {new}, {old} is not a key"
208
+ )
209
+
210
+ self._keys.remove(old)
211
+ self._keys.add(new)
212
+
213
+ if self._right is not None and old in self._right.keys:
214
+ self._right.change_key(old, new)
215
+
216
+ # self._left should always be non-None
217
+ # if self._keys is non-empty.
218
+ elif isinstance(self._left, Cycler):
219
+ self._left.change_key(old, new)
220
+ else:
221
+ # It should be completely safe at this point to
222
+ # assume that the old key can be found in each
223
+ # iteration.
224
+ self._left = [{new: entry[old]} for entry in self._left]
225
+
226
+ @classmethod
227
+ def _from_iter(cls, label: K, itr: Iterable[V]) -> Cycler[K, V]:
228
+ """
229
+ Class method to create 'base' Cycler objects
230
+ that do not have a 'right' or 'op' and for which
231
+ the 'left' object is not another Cycler.
232
+
233
+ Parameters
234
+ ----------
235
+ label : hashable
236
+ The property key.
237
+
238
+ itr : iterable
239
+ Finite length iterable of the property values.
240
+
241
+ Returns
242
+ -------
243
+ `Cycler`
244
+ New 'base' cycler.
245
+ """
246
+ ret: Cycler[K, V] = cls(None)
247
+ ret._left = list({label: v} for v in itr)
248
+ ret._keys = {label}
249
+ return ret
250
+
251
+ def __getitem__(self, key: slice) -> Cycler[K, V]:
252
+ # TODO : maybe add numpy style fancy slicing
253
+ if isinstance(key, slice):
254
+ trans = self.by_key()
255
+ return reduce(add, (_cycler(k, v[key]) for k, v in trans.items()))
256
+ else:
257
+ raise ValueError("Can only use slices with Cycler.__getitem__")
258
+
259
+ def __iter__(self) -> Generator[dict[K, V], None, None]:
260
+ if self._right is None:
261
+ for left in self._left:
262
+ yield dict(left)
263
+ else:
264
+ if self._op is None:
265
+ raise TypeError(
266
+ "Operation cannot be None when both left and right are defined"
267
+ )
268
+ for a, b in self._op(self._left, self._right):
269
+ out = {}
270
+ out.update(a)
271
+ out.update(b)
272
+ yield out
273
+
274
+ def __add__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
275
+ """
276
+ Pair-wise combine two equal length cyclers (zip).
277
+
278
+ Parameters
279
+ ----------
280
+ other : Cycler
281
+ """
282
+ if len(self) != len(other):
283
+ raise ValueError(
284
+ f"Can only add equal length cycles, not {len(self)} and {len(other)}"
285
+ )
286
+ return Cycler(
287
+ cast(Cycler[Union[K, L], Union[V, U]], self),
288
+ cast(Cycler[Union[K, L], Union[V, U]], other),
289
+ zip
290
+ )
291
+
292
+ @overload
293
+ def __mul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
294
+ ...
295
+
296
+ @overload
297
+ def __mul__(self, other: int) -> Cycler[K, V]:
298
+ ...
299
+
300
+ def __mul__(self, other):
301
+ """
302
+ Outer product of two cyclers (`itertools.product`) or integer
303
+ multiplication.
304
+
305
+ Parameters
306
+ ----------
307
+ other : Cycler or int
308
+ """
309
+ if isinstance(other, Cycler):
310
+ return Cycler(
311
+ cast(Cycler[Union[K, L], Union[V, U]], self),
312
+ cast(Cycler[Union[K, L], Union[V, U]], other),
313
+ product
314
+ )
315
+ elif isinstance(other, int):
316
+ trans = self.by_key()
317
+ return reduce(
318
+ add, (_cycler(k, v * other) for k, v in trans.items())
319
+ )
320
+ else:
321
+ return NotImplemented
322
+
323
+ @overload
324
+ def __rmul__(self, other: Cycler[L, U]) -> Cycler[K | L, V | U]:
325
+ ...
326
+
327
+ @overload
328
+ def __rmul__(self, other: int) -> Cycler[K, V]:
329
+ ...
330
+
331
+ def __rmul__(self, other):
332
+ return self * other
333
+
334
+ def __len__(self) -> int:
335
+ op_dict: dict[Callable, Callable[[int, int], int]] = {zip: min, product: mul}
336
+ if self._right is None:
337
+ return len(self._left)
338
+ l_len = len(self._left)
339
+ r_len = len(self._right)
340
+ return op_dict[self._op](l_len, r_len)
341
+
342
+ # iadd and imul do not exapand the the type as the returns must be consistent with
343
+ # self, thus they flag as inconsistent with add/mul
344
+ def __iadd__(self, other: Cycler[K, V]) -> Cycler[K, V]: # type: ignore[misc]
345
+ """
346
+ In-place pair-wise combine two equal length cyclers (zip).
347
+
348
+ Parameters
349
+ ----------
350
+ other : Cycler
351
+ """
352
+ if not isinstance(other, Cycler):
353
+ raise TypeError("Cannot += with a non-Cycler object")
354
+ # True shallow copy of self is fine since this is in-place
355
+ old_self = copy.copy(self)
356
+ self._keys = _process_keys(old_self, other)
357
+ self._left = old_self
358
+ self._op = zip
359
+ self._right = Cycler(other._left, other._right, other._op)
360
+ return self
361
+
362
+ def __imul__(self, other: Cycler[K, V] | int) -> Cycler[K, V]: # type: ignore[misc]
363
+ """
364
+ In-place outer product of two cyclers (`itertools.product`).
365
+
366
+ Parameters
367
+ ----------
368
+ other : Cycler
369
+ """
370
+ if not isinstance(other, Cycler):
371
+ raise TypeError("Cannot *= with a non-Cycler object")
372
+ # True shallow copy of self is fine since this is in-place
373
+ old_self = copy.copy(self)
374
+ self._keys = _process_keys(old_self, other)
375
+ self._left = old_self
376
+ self._op = product
377
+ self._right = Cycler(other._left, other._right, other._op)
378
+ return self
379
+
380
+ def __eq__(self, other: object) -> bool:
381
+ if not isinstance(other, Cycler):
382
+ return False
383
+ if len(self) != len(other):
384
+ return False
385
+ if self.keys ^ other.keys:
386
+ return False
387
+ return all(a == b for a, b in zip(self, other))
388
+
389
+ __hash__ = None # type: ignore
390
+
391
+ def __repr__(self) -> str:
392
+ op_map = {zip: "+", product: "*"}
393
+ if self._right is None:
394
+ lab = self.keys.pop()
395
+ itr = list(v[lab] for v in self)
396
+ return f"cycler({lab!r}, {itr!r})"
397
+ else:
398
+ op = op_map.get(self._op, "?")
399
+ msg = "({left!r} {op} {right!r})"
400
+ return msg.format(left=self._left, op=op, right=self._right)
401
+
402
+ def _repr_html_(self) -> str:
403
+ # an table showing the value of each key through a full cycle
404
+ output = "<table>"
405
+ sorted_keys = sorted(self.keys, key=repr)
406
+ for key in sorted_keys:
407
+ output += f"<th>{key!r}</th>"
408
+ for d in iter(self):
409
+ output += "<tr>"
410
+ for k in sorted_keys:
411
+ output += f"<td>{d[k]!r}</td>"
412
+ output += "</tr>"
413
+ output += "</table>"
414
+ return output
415
+
416
+ def by_key(self) -> dict[K, list[V]]:
417
+ """
418
+ Values by key.
419
+
420
+ This returns the transposed values of the cycler. Iterating
421
+ over a `Cycler` yields dicts with a single value for each key,
422
+ this method returns a `dict` of `list` which are the values
423
+ for the given key.
424
+
425
+ The returned value can be used to create an equivalent `Cycler`
426
+ using only `+`.
427
+
428
+ Returns
429
+ -------
430
+ transpose : dict
431
+ dict of lists of the values for each key.
432
+ """
433
+
434
+ # TODO : sort out if this is a bottle neck, if there is a better way
435
+ # and if we care.
436
+
437
+ keys = self.keys
438
+ out: dict[K, list[V]] = {k: list() for k in keys}
439
+
440
+ for d in self:
441
+ for k in keys:
442
+ out[k].append(d[k])
443
+ return out
444
+
445
+ # for back compatibility
446
+ _transpose = by_key
447
+
448
+ def simplify(self) -> Cycler[K, V]:
449
+ """
450
+ Simplify the cycler into a sum (but no products) of cyclers.
451
+
452
+ Returns
453
+ -------
454
+ simple : Cycler
455
+ """
456
+ # TODO: sort out if it is worth the effort to make sure this is
457
+ # balanced. Currently it is is
458
+ # (((a + b) + c) + d) vs
459
+ # ((a + b) + (c + d))
460
+ # I would believe that there is some performance implications
461
+ trans = self.by_key()
462
+ return reduce(add, (_cycler(k, v) for k, v in trans.items()))
463
+
464
+ concat = concat
465
+
466
+
467
+ @overload
468
+ def cycler(arg: Cycler[K, V]) -> Cycler[K, V]:
469
+ ...
470
+
471
+
472
+ @overload
473
+ def cycler(**kwargs: Iterable[V]) -> Cycler[str, V]:
474
+ ...
475
+
476
+
477
+ @overload
478
+ def cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]:
479
+ ...
480
+
481
+
482
+ def cycler(*args, **kwargs):
483
+ """
484
+ Create a new `Cycler` object from a single positional argument,
485
+ a pair of positional arguments, or the combination of keyword arguments.
486
+
487
+ cycler(arg)
488
+ cycler(label1=itr1[, label2=iter2[, ...]])
489
+ cycler(label, itr)
490
+
491
+ Form 1 simply copies a given `Cycler` object.
492
+
493
+ Form 2 composes a `Cycler` as an inner product of the
494
+ pairs of keyword arguments. In other words, all of the
495
+ iterables are cycled simultaneously, as if through zip().
496
+
497
+ Form 3 creates a `Cycler` from a label and an iterable.
498
+ This is useful for when the label cannot be a keyword argument
499
+ (e.g., an integer or a name that has a space in it).
500
+
501
+ Parameters
502
+ ----------
503
+ arg : Cycler
504
+ Copy constructor for Cycler (does a shallow copy of iterables).
505
+ label : name
506
+ The property key. In the 2-arg form of the function,
507
+ the label can be any hashable object. In the keyword argument
508
+ form of the function, it must be a valid python identifier.
509
+ itr : iterable
510
+ Finite length iterable of the property values.
511
+ Can be a single-property `Cycler` that would
512
+ be like a key change, but as a shallow copy.
513
+
514
+ Returns
515
+ -------
516
+ cycler : Cycler
517
+ New `Cycler` for the given property
518
+
519
+ """
520
+ if args and kwargs:
521
+ raise TypeError(
522
+ "cycler() can only accept positional OR keyword arguments -- not both."
523
+ )
524
+
525
+ if len(args) == 1:
526
+ if not isinstance(args[0], Cycler):
527
+ raise TypeError(
528
+ "If only one positional argument given, it must "
529
+ "be a Cycler instance."
530
+ )
531
+ return Cycler(args[0])
532
+ elif len(args) == 2:
533
+ return _cycler(*args)
534
+ elif len(args) > 2:
535
+ raise TypeError(
536
+ "Only a single Cycler can be accepted as the lone "
537
+ "positional argument. Use keyword arguments instead."
538
+ )
539
+
540
+ if kwargs:
541
+ return reduce(add, (_cycler(k, v) for k, v in kwargs.items()))
542
+
543
+ raise TypeError("Must have at least a positional OR keyword arguments")
544
+
545
+
546
+ def _cycler(label: K, itr: Iterable[V]) -> Cycler[K, V]:
547
+ """
548
+ Create a new `Cycler` object from a property name and iterable of values.
549
+
550
+ Parameters
551
+ ----------
552
+ label : hashable
553
+ The property key.
554
+ itr : iterable
555
+ Finite length iterable of the property values.
556
+
557
+ Returns
558
+ -------
559
+ cycler : Cycler
560
+ New `Cycler` for the given property
561
+ """
562
+ if isinstance(itr, Cycler):
563
+ keys = itr.keys
564
+ if len(keys) != 1:
565
+ msg = "Can not create Cycler from a multi-property Cycler"
566
+ raise ValueError(msg)
567
+
568
+ lab = keys.pop()
569
+ # Doesn't need to be a new list because
570
+ # _from_iter() will be creating that new list anyway.
571
+ itr = (v[lab] for v in itr)
572
+
573
+ return Cycler._from_iter(label, itr)
evalkit_tf446/lib/python3.10/site-packages/cycler/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (16.6 kB). View file