prasb commited on
Commit
cae1119
·
verified ·
1 Parent(s): cbd752c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cv2/__init__.py +181 -0
  3. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cv2/load_config_py3.py +9 -0
  4. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/LICENSE +202 -0
  5. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/METADATA +62 -0
  6. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/RECORD +19 -0
  7. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/REQUESTED +0 -0
  8. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/WHEEL +6 -0
  9. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/top_level.txt +1 -0
  10. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/INSTALLER +1 -0
  11. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_api.py +323 -0
  12. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_unix.py +65 -0
  13. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_util.py +47 -0
  14. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/py.typed +0 -0
  15. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5.cpython-38-x86_64-linux-gnu.so +3 -0
  16. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5f.cpython-38-x86_64-linux-gnu.so +3 -0
  17. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/INSTALLER +1 -0
  18. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/METADATA +264 -0
  19. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/RECORD +70 -0
  20. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/REQUESTED +0 -0
  21. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/WHEEL +5 -0
  22. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/top_level.txt +1 -0
  23. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/__init__.py +8 -0
  24. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_definitions.py +50 -0
  25. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_io.py +683 -0
  26. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_parsing.py +201 -0
  27. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_utils.py +105 -0
  28. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/INSTALLER +1 -0
  29. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/LICENSE-3RD-PARTY.txt +0 -0
  30. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/LICENSE.txt +21 -0
  31. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/METADATA +305 -0
  32. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/RECORD +144 -0
  33. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/REQUESTED +0 -0
  34. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/WHEEL +6 -0
  35. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/top_level.txt +1 -0
  36. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/__init__.py +7 -0
  37. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/debug.py +157 -0
  38. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/error.py +75 -0
  39. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/__init__.py +40 -0
  40. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_c99_config.py +3 -0
  41. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_cwt.py +203 -0
  42. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_doc_utils.py +187 -0
  43. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_dwt.py +517 -0
  44. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_functions.py +240 -0
  45. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_mra.py +427 -0
  46. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_multidim.py +314 -0
  47. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_multilevel.py +1561 -0
  48. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_pytest.py +68 -0
  49. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_pytesttester.py +164 -0
  50. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_swt.py +824 -0
.gitattributes CHANGED
@@ -294,3 +294,6 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/utils.
294
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5fd.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
295
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/defs.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
296
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5d.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
294
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5fd.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
295
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/defs.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
296
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5d.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
297
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5f.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
298
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wrapt/_wrappers.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
299
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cv2/__init__.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''
2
+ OpenCV Python binary extension loader
3
+ '''
4
+ import os
5
+ import importlib
6
+ import sys
7
+
8
+ __all__ = []
9
+
10
+ try:
11
+ import numpy
12
+ import numpy.core.multiarray
13
+ except ImportError:
14
+ print('OpenCV bindings requires "numpy" package.')
15
+ print('Install it via command:')
16
+ print(' pip install numpy')
17
+ raise
18
+
19
+ # TODO
20
+ # is_x64 = sys.maxsize > 2**32
21
+
22
+
23
+ def __load_extra_py_code_for_module(base, name, enable_debug_print=False):
24
+ module_name = "{}.{}".format(__name__, name)
25
+ export_module_name = "{}.{}".format(base, name)
26
+ native_module = sys.modules.pop(module_name, None)
27
+ try:
28
+ py_module = importlib.import_module(module_name)
29
+ except ImportError as err:
30
+ if enable_debug_print:
31
+ print("Can't load Python code for module:", module_name,
32
+ ". Reason:", err)
33
+ # Extension doesn't contain extra py code
34
+ return False
35
+
36
+ if not hasattr(base, name):
37
+ setattr(sys.modules[base], name, py_module)
38
+ sys.modules[export_module_name] = py_module
39
+ # If it is C extension module it is already loaded by cv2 package
40
+ if native_module:
41
+ setattr(py_module, "_native", native_module)
42
+ for k, v in filter(lambda kv: not hasattr(py_module, kv[0]),
43
+ native_module.__dict__.items()):
44
+ if enable_debug_print: print(' symbol({}): {} = {}'.format(name, k, v))
45
+ setattr(py_module, k, v)
46
+ return True
47
+
48
+
49
+ def __collect_extra_submodules(enable_debug_print=False):
50
+ def modules_filter(module):
51
+ return all((
52
+ # module is not internal
53
+ not module.startswith("_"),
54
+ not module.startswith("python-"),
55
+ # it is not a file
56
+ os.path.isdir(os.path.join(_extra_submodules_init_path, module))
57
+ ))
58
+ if sys.version_info[0] < 3:
59
+ if enable_debug_print:
60
+ print("Extra submodules is loaded only for Python 3")
61
+ return []
62
+
63
+ __INIT_FILE_PATH = os.path.abspath(__file__)
64
+ _extra_submodules_init_path = os.path.dirname(__INIT_FILE_PATH)
65
+ return filter(modules_filter, os.listdir(_extra_submodules_init_path))
66
+
67
+
68
+ def bootstrap():
69
+ import sys
70
+
71
+ import copy
72
+ save_sys_path = copy.copy(sys.path)
73
+
74
+ if hasattr(sys, 'OpenCV_LOADER'):
75
+ print(sys.path)
76
+ raise ImportError('ERROR: recursion is detected during loading of "cv2" binary extensions. Check OpenCV installation.')
77
+ sys.OpenCV_LOADER = True
78
+
79
+ DEBUG = False
80
+ if hasattr(sys, 'OpenCV_LOADER_DEBUG'):
81
+ DEBUG = True
82
+
83
+ import platform
84
+ if DEBUG: print('OpenCV loader: os.name="{}" platform.system()="{}"'.format(os.name, str(platform.system())))
85
+
86
+ LOADER_DIR = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
87
+
88
+ PYTHON_EXTENSIONS_PATHS = []
89
+ BINARIES_PATHS = []
90
+
91
+ g_vars = globals()
92
+ l_vars = locals().copy()
93
+
94
+ if sys.version_info[:2] < (3, 0):
95
+ from . load_config_py2 import exec_file_wrapper
96
+ else:
97
+ from . load_config_py3 import exec_file_wrapper
98
+
99
+ def load_first_config(fnames, required=True):
100
+ for fname in fnames:
101
+ fpath = os.path.join(LOADER_DIR, fname)
102
+ if not os.path.exists(fpath):
103
+ if DEBUG: print('OpenCV loader: config not found, skip: {}'.format(fpath))
104
+ continue
105
+ if DEBUG: print('OpenCV loader: loading config: {}'.format(fpath))
106
+ exec_file_wrapper(fpath, g_vars, l_vars)
107
+ return True
108
+ if required:
109
+ raise ImportError('OpenCV loader: missing configuration file: {}. Check OpenCV installation.'.format(fnames))
110
+
111
+ load_first_config(['config.py'], True)
112
+ load_first_config([
113
+ 'config-{}.{}.py'.format(sys.version_info[0], sys.version_info[1]),
114
+ 'config-{}.py'.format(sys.version_info[0])
115
+ ], True)
116
+
117
+ if DEBUG: print('OpenCV loader: PYTHON_EXTENSIONS_PATHS={}'.format(str(l_vars['PYTHON_EXTENSIONS_PATHS'])))
118
+ if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS'])))
119
+
120
+ applySysPathWorkaround = False
121
+ if hasattr(sys, 'OpenCV_REPLACE_SYS_PATH_0'):
122
+ applySysPathWorkaround = True
123
+ else:
124
+ try:
125
+ BASE_DIR = os.path.dirname(LOADER_DIR)
126
+ if sys.path[0] == BASE_DIR or os.path.realpath(sys.path[0]) == BASE_DIR:
127
+ applySysPathWorkaround = True
128
+ except:
129
+ if DEBUG: print('OpenCV loader: exception during checking workaround for sys.path[0]')
130
+ pass # applySysPathWorkaround is False
131
+
132
+ for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']):
133
+ sys.path.insert(1 if not applySysPathWorkaround else 0, p)
134
+
135
+ if os.name == 'nt':
136
+ if sys.version_info[:2] >= (3, 8): # https://github.com/python/cpython/pull/12302
137
+ for p in l_vars['BINARIES_PATHS']:
138
+ try:
139
+ os.add_dll_directory(p)
140
+ except Exception as e:
141
+ if DEBUG: print('Failed os.add_dll_directory(): '+ str(e))
142
+ pass
143
+ os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '')
144
+ if DEBUG: print('OpenCV loader: PATH={}'.format(str(os.environ['PATH'])))
145
+ else:
146
+ # amending of LD_LIBRARY_PATH works for sub-processes only
147
+ os.environ['LD_LIBRARY_PATH'] = ':'.join(l_vars['BINARIES_PATHS']) + ':' + os.environ.get('LD_LIBRARY_PATH', '')
148
+
149
+ if DEBUG: print("Relink everything from native cv2 module to cv2 package")
150
+
151
+ py_module = sys.modules.pop("cv2")
152
+
153
+ native_module = importlib.import_module("cv2")
154
+
155
+ sys.modules["cv2"] = py_module
156
+ setattr(py_module, "_native", native_module)
157
+
158
+ for item_name, item in filter(lambda kv: kv[0] not in ("__file__", "__loader__", "__spec__",
159
+ "__name__", "__package__"),
160
+ native_module.__dict__.items()):
161
+ if item_name not in g_vars:
162
+ g_vars[item_name] = item
163
+
164
+ sys.path = save_sys_path # multiprocessing should start from bootstrap code (https://github.com/opencv/opencv/issues/18502)
165
+
166
+ try:
167
+ del sys.OpenCV_LOADER
168
+ except Exception as e:
169
+ if DEBUG:
170
+ print("Exception during delete OpenCV_LOADER:", e)
171
+
172
+ if DEBUG: print('OpenCV loader: binary extension... OK')
173
+
174
+ for submodule in __collect_extra_submodules(DEBUG):
175
+ if __load_extra_py_code_for_module("cv2", submodule, DEBUG):
176
+ if DEBUG: print("Extra Python code for", submodule, "is loaded")
177
+
178
+ if DEBUG: print('OpenCV loader: DONE')
179
+
180
+
181
+ bootstrap()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cv2/load_config_py3.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ import os
3
+ import sys
4
+
5
+ if sys.version_info[:2] >= (3, 0):
6
+ def exec_file_wrapper(fpath, g_vars, l_vars):
7
+ with open(fpath) as f:
8
+ code = compile(f.read(), os.path.basename(fpath), 'exec')
9
+ exec(code, g_vars, l_vars)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/LICENSE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ http://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ http://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/METADATA ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: docker-pycreds
3
+ Version: 0.4.0
4
+ Summary: Python bindings for the docker credentials store API
5
+ Home-page: https://github.com/shin-/dockerpy-creds
6
+ Author: UNKNOWN
7
+ Author-email: UNKNOWN
8
+ License: Apache License 2.0
9
+ Platform: UNKNOWN
10
+ Classifier: Development Status :: 4 - Beta
11
+ Classifier: Environment :: Other Environment
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: Operating System :: OS Independent
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 2
16
+ Classifier: Programming Language :: Python :: 2.6
17
+ Classifier: Programming Language :: Python :: 2.7
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.3
20
+ Classifier: Programming Language :: Python :: 3.4
21
+ Classifier: Programming Language :: Python :: 3.5
22
+ Classifier: Programming Language :: Python :: 3.6
23
+ Classifier: Topic :: Utilities
24
+ Classifier: License :: OSI Approved :: Apache Software License
25
+ Requires-Dist: six (>=1.4.0)
26
+
27
+ # docker-pycreds
28
+
29
+ [![CircleCI](https://circleci.com/gh/shin-/dockerpy-creds/tree/master.svg?style=svg)](https://circleci.com/gh/shin-/dockerpy-creds/tree/master)
30
+
31
+ Python bindings for the docker credentials store API
32
+
33
+ ## Credentials store info
34
+
35
+ [Docker documentation page](https://docs.docker.com/engine/reference/commandline/login/#/credentials-store)
36
+
37
+ ## Requirements
38
+
39
+ On top of the dependencies in `requirements.txt`, the `docker-credential`
40
+ executable for the platform must be installed on the user's system.
41
+
42
+ ## API usage
43
+
44
+ ```python
45
+
46
+ import dockerpycreds
47
+
48
+ store = dockerpycreds.Store('secretservice')
49
+ store.store(
50
+ server='https://index.docker.io/v1/', username='johndoe',
51
+ secret='hunter2'
52
+ )
53
+
54
+ print(store.list())
55
+
56
+ print(store.get('https://index.docker.io/v1/'))
57
+
58
+
59
+ store.erase('https://index.docker.io/v1/')
60
+ ```
61
+
62
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/RECORD ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ docker_pycreds-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ docker_pycreds-0.4.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
3
+ docker_pycreds-0.4.0.dist-info/METADATA,sha256=FOmAKPL9hzMoIGVedpB45WzPyJmvEqUPoWIWElf5S18,1821
4
+ docker_pycreds-0.4.0.dist-info/RECORD,,
5
+ docker_pycreds-0.4.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ docker_pycreds-0.4.0.dist-info/WHEEL,sha256=CihQvCnsGZQBGAHLEUMf0IdA4fRduS_NBUTMgCTtvPM,110
7
+ docker_pycreds-0.4.0.dist-info/top_level.txt,sha256=iHN9Ul5VnNovfI4c7fel4nGnEtE4BezC_IQ8_3meRVw,14
8
+ dockerpycreds/__init__.py,sha256=vdD-zY6geCywLcpf8Naplshb5a5dc8czaWtbL7VdEDE,116
9
+ dockerpycreds/__pycache__/__init__.cpython-38.pyc,,
10
+ dockerpycreds/__pycache__/constants.cpython-38.pyc,,
11
+ dockerpycreds/__pycache__/errors.cpython-38.pyc,,
12
+ dockerpycreds/__pycache__/store.cpython-38.pyc,,
13
+ dockerpycreds/__pycache__/utils.cpython-38.pyc,,
14
+ dockerpycreds/__pycache__/version.cpython-38.pyc,,
15
+ dockerpycreds/constants.py,sha256=5HjRGorpVvkamvLBI9yLPvur1E0glaq_ZhuRfbVA_bE,142
16
+ dockerpycreds/errors.py,sha256=ghtvHruuU_wK_f90fnoep-yoVNx4t-OpYrB84v2yHjg,576
17
+ dockerpycreds/store.py,sha256=uTjPxLKzqWanbCWe7fGVJyZX9PKv6QRE5omRSz6J4qE,3799
18
+ dockerpycreds/utils.py,sha256=_ErlaAETnJEvPbBRdrLKBzKCB37eH0uncCdBySPIIKo,1020
19
+ dockerpycreds/version.py,sha256=G-VpNfv2NAp81lTH0IeEq1qv9H2RSJW_Bn2UuwqxF_M,91
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.32.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ dockerpycreds
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_api.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import os
6
+ import time
7
+ import warnings
8
+ from abc import ABC, abstractmethod
9
+ from dataclasses import dataclass
10
+ from threading import local
11
+ from typing import TYPE_CHECKING, Any, ClassVar
12
+ from weakref import WeakValueDictionary
13
+
14
+ from ._error import Timeout
15
+
16
+ if TYPE_CHECKING:
17
+ import sys
18
+ from types import TracebackType
19
+
20
+ if sys.version_info >= (3, 11): # pragma: no cover (py311+)
21
+ from typing import Self
22
+ else: # pragma: no cover (<py311)
23
+ from typing_extensions import Self
24
+
25
+
26
+ _LOGGER = logging.getLogger("filelock")
27
+
28
+
29
+ # This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__
30
+ # is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired
31
+ # again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak)
32
+ class AcquireReturnProxy:
33
+ """A context aware object that will release the lock file when exiting."""
34
+
35
+ def __init__(self, lock: BaseFileLock) -> None:
36
+ self.lock = lock
37
+
38
+ def __enter__(self) -> BaseFileLock:
39
+ return self.lock
40
+
41
+ def __exit__(
42
+ self,
43
+ exc_type: type[BaseException] | None,
44
+ exc_value: BaseException | None,
45
+ traceback: TracebackType | None,
46
+ ) -> None:
47
+ self.lock.release()
48
+
49
+
50
+ @dataclass
51
+ class FileLockContext:
52
+ """A dataclass which holds the context for a ``BaseFileLock`` object."""
53
+
54
+ # The context is held in a separate class to allow optional use of thread local storage via the
55
+ # ThreadLocalFileContext class.
56
+
57
+ #: The path to the lock file.
58
+ lock_file: str
59
+
60
+ #: The default timeout value.
61
+ timeout: float
62
+
63
+ #: The mode for the lock files
64
+ mode: int
65
+
66
+ #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held
67
+ lock_file_fd: int | None = None
68
+
69
+ #: The lock counter is used for implementing the nested locking mechanism.
70
+ lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0
71
+
72
+
73
+ class ThreadLocalFileContext(FileLockContext, local):
74
+ """A thread local version of the ``FileLockContext`` class."""
75
+
76
+
77
+ class BaseFileLock(ABC, contextlib.ContextDecorator):
78
+ """Abstract base class for a file lock object."""
79
+
80
+ _instances: ClassVar[WeakValueDictionary[str, BaseFileLock]] = WeakValueDictionary()
81
+
82
+ def __new__( # noqa: PLR0913
83
+ cls,
84
+ lock_file: str | os.PathLike[str],
85
+ timeout: float = -1, # noqa: ARG003
86
+ mode: int = 0o644, # noqa: ARG003
87
+ thread_local: bool = True, # noqa: ARG003, FBT001, FBT002
88
+ *,
89
+ is_singleton: bool = False,
90
+ **kwargs: dict[str, Any], # capture remaining kwargs for subclasses # noqa: ARG003
91
+ ) -> Self:
92
+ """Create a new lock object or if specified return the singleton instance for the lock file."""
93
+ if not is_singleton:
94
+ return super().__new__(cls)
95
+
96
+ instance = cls._instances.get(str(lock_file))
97
+ if not instance:
98
+ instance = super().__new__(cls)
99
+ cls._instances[str(lock_file)] = instance
100
+
101
+ return instance # type: ignore[return-value] # https://github.com/python/mypy/issues/15322
102
+
103
+ def __init__( # noqa: PLR0913
104
+ self,
105
+ lock_file: str | os.PathLike[str],
106
+ timeout: float = -1,
107
+ mode: int = 0o644,
108
+ thread_local: bool = True, # noqa: FBT001, FBT002
109
+ *,
110
+ is_singleton: bool = False,
111
+ ) -> None:
112
+ """
113
+ Create a new lock object.
114
+
115
+ :param lock_file: path to the file
116
+ :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in \
117
+ the acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it \
118
+ to a negative value. A timeout of 0 means, that there is exactly one attempt to acquire the file lock.
119
+ :param mode: file permissions for the lockfile
120
+ :param thread_local: Whether this object's internal context should be thread local or not. If this is set to \
121
+ ``False`` then the lock will be reentrant across threads.
122
+ :param is_singleton: If this is set to ``True`` then only one instance of this class will be created \
123
+ per lock file. This is useful if you want to use the lock object for reentrant locking without needing \
124
+ to pass the same object around.
125
+ """
126
+ self._is_thread_local = thread_local
127
+ self._is_singleton = is_singleton
128
+
129
+ # Create the context. Note that external code should not work with the context directly and should instead use
130
+ # properties of this class.
131
+ kwargs: dict[str, Any] = {
132
+ "lock_file": os.fspath(lock_file),
133
+ "timeout": timeout,
134
+ "mode": mode,
135
+ }
136
+ self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs)
137
+
138
+ def is_thread_local(self) -> bool:
139
+ """:return: a flag indicating if this lock is thread local or not"""
140
+ return self._is_thread_local
141
+
142
+ @property
143
+ def is_singleton(self) -> bool:
144
+ """:return: a flag indicating if this lock is singleton or not"""
145
+ return self._is_singleton
146
+
147
+ @property
148
+ def lock_file(self) -> str:
149
+ """:return: path to the lock file"""
150
+ return self._context.lock_file
151
+
152
+ @property
153
+ def timeout(self) -> float:
154
+ """
155
+ :return: the default timeout value, in seconds
156
+
157
+ .. versionadded:: 2.0.0
158
+ """
159
+ return self._context.timeout
160
+
161
+ @timeout.setter
162
+ def timeout(self, value: float | str) -> None:
163
+ """
164
+ Change the default timeout value.
165
+
166
+ :param value: the new value, in seconds
167
+ """
168
+ self._context.timeout = float(value)
169
+
170
+ @abstractmethod
171
+ def _acquire(self) -> None:
172
+ """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file."""
173
+ raise NotImplementedError
174
+
175
+ @abstractmethod
176
+ def _release(self) -> None:
177
+ """Releases the lock and sets self._context.lock_file_fd to None."""
178
+ raise NotImplementedError
179
+
180
+ @property
181
+ def is_locked(self) -> bool:
182
+ """
183
+
184
+ :return: A boolean indicating if the lock file is holding the lock currently.
185
+
186
+ .. versionchanged:: 2.0.0
187
+
188
+ This was previously a method and is now a property.
189
+ """
190
+ return self._context.lock_file_fd is not None
191
+
192
+ @property
193
+ def lock_counter(self) -> int:
194
+ """:return: The number of times this lock has been acquired (but not yet released)."""
195
+ return self._context.lock_counter
196
+
197
+ def acquire(
198
+ self,
199
+ timeout: float | None = None,
200
+ poll_interval: float = 0.05,
201
+ *,
202
+ poll_intervall: float | None = None,
203
+ blocking: bool = True,
204
+ ) -> AcquireReturnProxy:
205
+ """
206
+ Try to acquire the file lock.
207
+
208
+ :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and
209
+ if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired
210
+ :param poll_interval: interval of trying to acquire the lock file
211
+ :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead
212
+ :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the
213
+ first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired.
214
+ :raises Timeout: if fails to acquire lock within the timeout period
215
+ :return: a context object that will unlock the file when the context is exited
216
+
217
+ .. code-block:: python
218
+
219
+ # You can use this method in the context manager (recommended)
220
+ with lock.acquire():
221
+ pass
222
+
223
+ # Or use an equivalent try-finally construct:
224
+ lock.acquire()
225
+ try:
226
+ pass
227
+ finally:
228
+ lock.release()
229
+
230
+ .. versionchanged:: 2.0.0
231
+
232
+ This method returns now a *proxy* object instead of *self*,
233
+ so that it can be used in a with statement without side effects.
234
+
235
+ """
236
+ # Use the default timeout, if no timeout is provided.
237
+ if timeout is None:
238
+ timeout = self._context.timeout
239
+
240
+ if poll_intervall is not None:
241
+ msg = "use poll_interval instead of poll_intervall"
242
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
243
+ poll_interval = poll_intervall
244
+
245
+ # Increment the number right at the beginning. We can still undo it, if something fails.
246
+ self._context.lock_counter += 1
247
+
248
+ lock_id = id(self)
249
+ lock_filename = self.lock_file
250
+ start_time = time.perf_counter()
251
+ try:
252
+ while True:
253
+ if not self.is_locked:
254
+ _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename)
255
+ self._acquire()
256
+ if self.is_locked:
257
+ _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename)
258
+ break
259
+ if blocking is False:
260
+ _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename)
261
+ raise Timeout(lock_filename) # noqa: TRY301
262
+ if 0 <= timeout < time.perf_counter() - start_time:
263
+ _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename)
264
+ raise Timeout(lock_filename) # noqa: TRY301
265
+ msg = "Lock %s not acquired on %s, waiting %s seconds ..."
266
+ _LOGGER.debug(msg, lock_id, lock_filename, poll_interval)
267
+ time.sleep(poll_interval)
268
+ except BaseException: # Something did go wrong, so decrement the counter.
269
+ self._context.lock_counter = max(0, self._context.lock_counter - 1)
270
+ raise
271
+ return AcquireReturnProxy(lock=self)
272
+
273
+ def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002
274
+ """
275
+ Releases the file lock. Please note, that the lock is only completely released, if the lock counter is 0. Also
276
+ note, that the lock file itself is not automatically deleted.
277
+
278
+ :param force: If true, the lock counter is ignored and the lock is released in every case/
279
+ """
280
+ if self.is_locked:
281
+ self._context.lock_counter -= 1
282
+
283
+ if self._context.lock_counter == 0 or force:
284
+ lock_id, lock_filename = id(self), self.lock_file
285
+
286
+ _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename)
287
+ self._release()
288
+ self._context.lock_counter = 0
289
+ _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename)
290
+
291
+ def __enter__(self) -> Self:
292
+ """
293
+ Acquire the lock.
294
+
295
+ :return: the lock object
296
+ """
297
+ self.acquire()
298
+ return self
299
+
300
+ def __exit__(
301
+ self,
302
+ exc_type: type[BaseException] | None,
303
+ exc_value: BaseException | None,
304
+ traceback: TracebackType | None,
305
+ ) -> None:
306
+ """
307
+ Release the lock.
308
+
309
+ :param exc_type: the exception type if raised
310
+ :param exc_value: the exception value if raised
311
+ :param traceback: the exception traceback if raised
312
+ """
313
+ self.release()
314
+
315
+ def __del__(self) -> None:
316
+ """Called when the lock object is deleted."""
317
+ self.release(force=True)
318
+
319
+
320
+ __all__ = [
321
+ "BaseFileLock",
322
+ "AcquireReturnProxy",
323
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_unix.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import ENOSYS
7
+ from typing import cast
8
+
9
+ from ._api import BaseFileLock
10
+ from ._util import ensure_directory_exists
11
+
12
+ #: a flag to indicate if the fcntl API is available
13
+ has_fcntl = False
14
+ if sys.platform == "win32": # pragma: win32 cover
15
+
16
+ class UnixFileLock(BaseFileLock):
17
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
18
+
19
+ def _acquire(self) -> None:
20
+ raise NotImplementedError
21
+
22
+ def _release(self) -> None:
23
+ raise NotImplementedError
24
+
25
+ else: # pragma: win32 no cover
26
+ try:
27
+ import fcntl
28
+ except ImportError:
29
+ pass
30
+ else:
31
+ has_fcntl = True
32
+
33
+ class UnixFileLock(BaseFileLock):
34
+ """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems."""
35
+
36
+ def _acquire(self) -> None:
37
+ ensure_directory_exists(self.lock_file)
38
+ open_flags = os.O_RDWR | os.O_CREAT | os.O_TRUNC
39
+ fd = os.open(self.lock_file, open_flags, self._context.mode)
40
+ with suppress(PermissionError): # This locked is not owned by this UID
41
+ os.fchmod(fd, self._context.mode)
42
+ try:
43
+ fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
44
+ except OSError as exception:
45
+ os.close(fd)
46
+ if exception.errno == ENOSYS: # NotImplemented error
47
+ msg = "FileSystem does not appear to support flock; user SoftFileLock instead"
48
+ raise NotImplementedError(msg) from exception
49
+ else:
50
+ self._context.lock_file_fd = fd
51
+
52
+ def _release(self) -> None:
53
+ # Do not remove the lockfile:
54
+ # https://github.com/tox-dev/py-filelock/issues/31
55
+ # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
56
+ fd = cast(int, self._context.lock_file_fd)
57
+ self._context.lock_file_fd = None
58
+ fcntl.flock(fd, fcntl.LOCK_UN)
59
+ os.close(fd)
60
+
61
+
62
+ __all__ = [
63
+ "has_fcntl",
64
+ "UnixFileLock",
65
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_util.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import stat
5
+ import sys
6
+ from errno import EACCES, EISDIR
7
+ from pathlib import Path
8
+
9
+
10
+ def raise_on_not_writable_file(filename: str) -> None:
11
+ """
12
+ Raise an exception if attempting to open the file for writing would fail.
13
+ This is done so files that will never be writable can be separated from
14
+ files that are writable but currently locked
15
+ :param filename: file to check
16
+ :raises OSError: as if the file was opened for writing.
17
+ """
18
+ try: # use stat to do exists + can write to check without race condition
19
+ file_stat = os.stat(filename) # noqa: PTH116
20
+ except OSError:
21
+ return # swallow does not exist or other errors
22
+
23
+ if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it
24
+ if not (file_stat.st_mode & stat.S_IWUSR):
25
+ raise PermissionError(EACCES, "Permission denied", filename)
26
+
27
+ if stat.S_ISDIR(file_stat.st_mode):
28
+ if sys.platform == "win32": # pragma: win32 cover
29
+ # On Windows, this is PermissionError
30
+ raise PermissionError(EACCES, "Permission denied", filename)
31
+ else: # pragma: win32 no cover # noqa: RET506
32
+ # On linux / macOS, this is IsADirectoryError
33
+ raise IsADirectoryError(EISDIR, "Is a directory", filename)
34
+
35
+
36
+ def ensure_directory_exists(filename: Path | str) -> None:
37
+ """
38
+ Ensure the directory containing the file exists (create it if necessary)
39
+ :param filename: file.
40
+ """
41
+ Path(filename).parent.mkdir(parents=True, exist_ok=True)
42
+
43
+
44
+ __all__ = [
45
+ "raise_on_not_writable_file",
46
+ "ensure_directory_exists",
47
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/py.typed ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5.cpython-38-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9bd8b2c9d5f9c812bde2780c63ad38c986ecf939d0dfd2b701102f0d0ba5829e
3
+ size 265208
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5f.cpython-38-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:926a102964bde03e17f0809cd42688b579e9f8faf9065a625d9c1a106039695d
3
+ size 289768
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/METADATA ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: humanize
3
+ Version: 4.2.2
4
+ Summary: Python humanize utilities
5
+ Home-page: https://github.com/python-humanize/humanize
6
+ Author: Jason Moiron
7
+ Author-email: jmoiron@jmoiron.net
8
+ Maintainer: Hugo van Kemenade
9
+ License: MIT
10
+ Project-URL: Source, https://github.com/python-humanize/humanize
11
+ Project-URL: Issue tracker, https://github.com/python-humanize/humanize/issues
12
+ Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-humanize?utm_source=pypi-humanize&utm_medium=pypi
13
+ Project-URL: Documentation, https://python-humanize.readthedocs.io/
14
+ Project-URL: Release notes, https://github.com/python-humanize/humanize/releases
15
+ Keywords: humanize time size
16
+ Classifier: Development Status :: 5 - Production/Stable
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: License :: OSI Approved :: MIT License
19
+ Classifier: Operating System :: OS Independent
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Programming Language :: Python :: 3
22
+ Classifier: Programming Language :: Python :: 3 :: Only
23
+ Classifier: Programming Language :: Python :: 3.7
24
+ Classifier: Programming Language :: Python :: 3.8
25
+ Classifier: Programming Language :: Python :: 3.9
26
+ Classifier: Programming Language :: Python :: 3.10
27
+ Classifier: Programming Language :: Python :: 3.11
28
+ Classifier: Programming Language :: Python :: Implementation :: CPython
29
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
30
+ Classifier: Topic :: Text Processing
31
+ Classifier: Topic :: Text Processing :: General
32
+ Requires-Python: >=3.7
33
+ Description-Content-Type: text/markdown
34
+ License-File: LICENCE
35
+ Requires-Dist: importlib-metadata ; python_version < "3.8"
36
+ Provides-Extra: tests
37
+ Requires-Dist: freezegun ; extra == 'tests'
38
+ Requires-Dist: pytest ; extra == 'tests'
39
+ Requires-Dist: pytest-cov ; extra == 'tests'
40
+
41
+ # humanize
42
+
43
+ [![PyPI version](https://img.shields.io/pypi/v/humanize.svg?logo=pypi&logoColor=FFE873)](https://pypi.org/project/humanize/)
44
+ [![Supported Python versions](https://img.shields.io/pypi/pyversions/humanize.svg?logo=python&logoColor=FFE873)](https://pypi.org/project/humanize/)
45
+ [![Documentation Status](https://readthedocs.org/projects/python-humanize/badge/?version=latest)](https://python-humanize.readthedocs.io/en/latest/?badge=latest)
46
+ [![PyPI downloads](https://img.shields.io/pypi/dm/humanize.svg)](https://pypistats.org/packages/humanize)
47
+ [![GitHub Actions status](https://github.com/python-humanize/humanize/workflows/Test/badge.svg)](https://github.com/python-humanize/humanize/actions)
48
+ [![codecov](https://codecov.io/gh/python-humanize/humanize/branch/main/graph/badge.svg)](https://codecov.io/gh/python-humanize/humanize)
49
+ [![MIT License](https://img.shields.io/github/license/python-humanize/humanize.svg)](LICENCE)
50
+ [![Tidelift](https://tidelift.com/badges/package/pypi/humanize)](https://tidelift.com/subscription/pkg/pypi-humanize?utm_source=pypi-humanize&utm_medium=badge)
51
+
52
+ This modest package contains various common humanization utilities, like turning
53
+ a number into a fuzzy human-readable duration ("3 minutes ago") or into a
54
+ human-readable size or throughput. It is localized to:
55
+
56
+ - Arabic
57
+ - Bengali
58
+ - Brazilian Portuguese
59
+ - Catalan
60
+ - Danish
61
+ - Dutch
62
+ - European Portuguese
63
+ - Finnish
64
+ - French
65
+ - German
66
+ - Indonesian
67
+ - Italian
68
+ - Japanese
69
+ - Korean
70
+ - Persian
71
+ - Polish
72
+ - Russian
73
+ - Simplified Chinese
74
+ - Slovak
75
+ - Slovenian
76
+ - Spanish
77
+ - Swedish
78
+ - Turkish
79
+ - Ukrainian
80
+ - Vietnamese
81
+
82
+ ## API reference
83
+
84
+ [https://python-humanize.readthedocs.io](https://python-humanize.readthedocs.io)
85
+
86
+ <!-- usage-start -->
87
+
88
+ ## Usage
89
+
90
+ ### Integer humanization
91
+
92
+ ```pycon
93
+ >>> import humanize
94
+ >>> humanize.intcomma(12345)
95
+ '12,345'
96
+ >>> humanize.intword(123455913)
97
+ '123.5 million'
98
+ >>> humanize.intword(12345591313)
99
+ '12.3 billion'
100
+ >>> humanize.apnumber(4)
101
+ 'four'
102
+ >>> humanize.apnumber(41)
103
+ '41'
104
+ ```
105
+
106
+ ### Date & time humanization
107
+
108
+ ```pycon
109
+ >>> import humanize
110
+ >>> import datetime as dt
111
+ >>> humanize.naturalday(dt.datetime.now())
112
+ 'today'
113
+ >>> humanize.naturaldelta(dt.timedelta(seconds=1001))
114
+ '16 minutes'
115
+ >>> humanize.naturalday(dt.datetime.now() - dt.timedelta(days=1))
116
+ 'yesterday'
117
+ >>> humanize.naturalday(dt.date(2007, 6, 5))
118
+ 'Jun 05'
119
+ >>> humanize.naturaldate(dt.date(2007, 6, 5))
120
+ 'Jun 05 2007'
121
+ >>> humanize.naturaltime(dt.datetime.now() - dt.timedelta(seconds=1))
122
+ 'a second ago'
123
+ >>> humanize.naturaltime(dt.datetime.now() - dt.timedelta(seconds=3600))
124
+ 'an hour ago'
125
+ ```
126
+
127
+ ### Precise time delta
128
+
129
+ ```pycon
130
+ >>> import humanize
131
+ >>> import datetime as dt
132
+ >>> delta = dt.timedelta(seconds=3633, days=2, microseconds=123000)
133
+ >>> humanize.precisedelta(delta)
134
+ '2 days, 1 hour and 33.12 seconds'
135
+ >>> humanize.precisedelta(delta, minimum_unit="microseconds")
136
+ '2 days, 1 hour, 33 seconds and 123 milliseconds'
137
+ >>> humanize.precisedelta(delta, suppress=["days"], format="%0.4f")
138
+ '49 hours and 33.1230 seconds'
139
+ ```
140
+
141
+ #### Smaller units
142
+
143
+ If seconds are too large, set `minimum_unit` to milliseconds or microseconds:
144
+
145
+ ```pycon
146
+ >>> import humanize
147
+ >>> import datetime as dt
148
+ >>> humanize.naturaldelta(dt.timedelta(seconds=2))
149
+ '2 seconds'
150
+ ```
151
+
152
+ ```pycon
153
+ >>> delta = dt.timedelta(milliseconds=4)
154
+ >>> humanize.naturaldelta(delta)
155
+ 'a moment'
156
+ >>> humanize.naturaldelta(delta, minimum_unit="milliseconds")
157
+ '4 milliseconds'
158
+ >>> humanize.naturaldelta(delta, minimum_unit="microseconds")
159
+ '4 milliseconds'
160
+ ```
161
+
162
+ ```pycon
163
+ >>> humanize.naturaltime(delta)
164
+ 'now'
165
+ >>> humanize.naturaltime(delta, minimum_unit="milliseconds")
166
+ '4 milliseconds ago'
167
+ >>> humanize.naturaltime(delta, minimum_unit="microseconds")
168
+ '4 milliseconds ago'
169
+ ```
170
+
171
+ ### File size humanization
172
+
173
+ ```pycon
174
+ >>> import humanize
175
+ >>> humanize.naturalsize(1_000_000)
176
+ '1.0 MB'
177
+ >>> humanize.naturalsize(1_000_000, binary=True)
178
+ '976.6 KiB'
179
+ >>> humanize.naturalsize(1_000_000, gnu=True)
180
+ '976.6K'
181
+ ```
182
+
183
+ ### Human-readable floating point numbers
184
+
185
+ ```pycon
186
+ >>> import humanize
187
+ >>> humanize.fractional(1/3)
188
+ '1/3'
189
+ >>> humanize.fractional(1.5)
190
+ '1 1/2'
191
+ >>> humanize.fractional(0.3)
192
+ '3/10'
193
+ >>> humanize.fractional(0.333)
194
+ '333/1000'
195
+ >>> humanize.fractional(1)
196
+ '1'
197
+ ```
198
+
199
+ ### Scientific notation
200
+
201
+ ```pycon
202
+ >>> import humanize
203
+ >>> humanize.scientific(0.3)
204
+ '3.00 x 10⁻¹'
205
+ >>> humanize.scientific(500)
206
+ '5.00 x 10²'
207
+ >>> humanize.scientific("20000")
208
+ '2.00 x 10⁴'
209
+ >>> humanize.scientific(1**10)
210
+ '1.00 x 10⁰'
211
+ >>> humanize.scientific(1**10, precision=1)
212
+ '1.0 x 10⁰'
213
+ >>> humanize.scientific(1**10, precision=0)
214
+ '1 x 10⁰'
215
+ ```
216
+
217
+ ## Localization
218
+
219
+ How to change locale at runtime:
220
+
221
+ ```pycon
222
+ >>> import humanize
223
+ >>> import datetime as dt
224
+ >>> humanize.naturaltime(dt.timedelta(seconds=3))
225
+ '3 seconds ago'
226
+ >>> _t = humanize.i18n.activate("ru_RU")
227
+ >>> humanize.naturaltime(dt.timedelta(seconds=3))
228
+ '3 секунды назад'
229
+ >>> humanize.i18n.deactivate()
230
+ >>> humanize.naturaltime(dt.timedelta(seconds=3))
231
+ '3 seconds ago'
232
+ ```
233
+
234
+ You can pass additional parameter `path` to `activate` to specify a path to search
235
+ locales in.
236
+
237
+ ```pycon
238
+ >>> import humanize
239
+ >>> humanize.i18n.activate("xx_XX")
240
+ <...>
241
+ FileNotFoundError: [Errno 2] No translation file found for domain: 'humanize'
242
+ >>> humanize.i18n.activate("pt_BR", path="path/to/my/own/translation/")
243
+ <gettext.GNUTranslations instance ...>
244
+ ```
245
+
246
+ <!-- usage-end -->
247
+
248
+ How to add new phrases to existing locale files:
249
+
250
+ ```console
251
+ $ xgettext --from-code=UTF-8 -o humanize.pot -k'_' -k'N_' -k'P_:1c,2' -l python src/humanize/*.py # extract new phrases
252
+ $ msgmerge -U src/humanize/locale/ru_RU/LC_MESSAGES/humanize.po humanize.pot # add them to locale files
253
+ ```
254
+
255
+ How to add a new locale:
256
+
257
+ ```console
258
+ $ msginit -i humanize.pot -o humanize/locale/<locale name>/LC_MESSAGES/humanize.po --locale <locale name>
259
+ ```
260
+
261
+ Where `<locale name>` is a locale abbreviation, eg. `en_GB`, `pt_BR` or just `ru`, `fr`
262
+ etc.
263
+
264
+ List the language at the top of this README.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/RECORD ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ humanize-4.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ humanize-4.2.2.dist-info/LICENCE,sha256=i6bBgRKkMUAK08dD9wZwB5swJUXZiIT8LyipHDg6A4A,1078
3
+ humanize-4.2.2.dist-info/METADATA,sha256=RceLVGyIQlQIoZ6XiIxa6J4Gn6WTbFoOD5dGgKlZusA,7672
4
+ humanize-4.2.2.dist-info/RECORD,,
5
+ humanize-4.2.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ humanize-4.2.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
7
+ humanize-4.2.2.dist-info/top_level.txt,sha256=1B-bfyhHo8hE-z_Kn_iBhAmKipLlcRema3YIwAD3u5A,9
8
+ humanize/__init__.py,sha256=eHXLDwlS7FrYU_GU0bCoUeYN6SPwz-UH0f2y1aBbTzo,959
9
+ humanize/__pycache__/__init__.cpython-38.pyc,,
10
+ humanize/__pycache__/filesize.cpython-38.pyc,,
11
+ humanize/__pycache__/i18n.cpython-38.pyc,,
12
+ humanize/__pycache__/number.cpython-38.pyc,,
13
+ humanize/__pycache__/time.cpython-38.pyc,,
14
+ humanize/filesize.py,sha256=-SZ1l1-SquKegx6kky5BM4S5HDslieExwXaiAdctvNA,2273
15
+ humanize/i18n.py,sha256=i7WgiA02FSv2jpPCYOjmp6AQrFTQvhSbdBkbnGRimDQ,4625
16
+ humanize/locale/ar/LC_MESSAGES/humanize.mo,sha256=eFh_sqyVD47W3cHxJcHwJBM4qbdP2nBI9QXjZnw3BBk,3856
17
+ humanize/locale/ar/LC_MESSAGES/humanize.po,sha256=2QWYaZhxAQukPl5RmBFNsC4yE-1DTP5JunSmf4RlDXg,6933
18
+ humanize/locale/bn_BD/LC_MESSAGES/humanize.mo,sha256=6NB9epGOIUwSsUY7lUD-aW9IFAdLw1z_TGIZen1Fb9w,2399
19
+ humanize/locale/bn_BD/LC_MESSAGES/humanize.po,sha256=sM2dzgpJ6n8unSxSwA0jpBd4ZbUphm2k96vim1aTIgo,5554
20
+ humanize/locale/ca_ES/LC_MESSAGES/humanize.mo,sha256=veqJwGHEzymT-ott8nPopAm2SZ4wto3yJRzU8JGXIG8,3345
21
+ humanize/locale/ca_ES/LC_MESSAGES/humanize.po,sha256=bjUvQssMTmw2rjYoeam2r13j4CP_Efu8txHS_XzTsjA,6489
22
+ humanize/locale/da_DK/LC_MESSAGES/humanize.mo,sha256=m_IEC2ajKBYYw9Sh-_CwYIX0-vrwIHQ6nacChzInfOI,3524
23
+ humanize/locale/da_DK/LC_MESSAGES/humanize.po,sha256=C693HAYkm-1kMZVDul5pWPD0i93yu-xNGgKHRnDT6Zs,6619
24
+ humanize/locale/de_DE/LC_MESSAGES/humanize.mo,sha256=Evtv5Sz2GCNTW7S413xfqRC-hmPiz-uKOgh5m2Cyf78,3291
25
+ humanize/locale/de_DE/LC_MESSAGES/humanize.po,sha256=rg0H3I7F7Bbto35yEATC1CL54vviXaRhJ_I60XG4-0I,6647
26
+ humanize/locale/es_ES/LC_MESSAGES/humanize.mo,sha256=WRam8vl8OU2O76EkwQmhMVeliuZKO3W2_6zP9yp63W0,3466
27
+ humanize/locale/es_ES/LC_MESSAGES/humanize.po,sha256=TT3ln5Dbf4ykXY-G5JffiBt96vOFhc2NbR1WTtgOpxU,6594
28
+ humanize/locale/fa_IR/LC_MESSAGES/humanize.mo,sha256=42B1yDpYHdrnMVev01aEr_BaLlq-gMvh17CfjTM1HBI,4010
29
+ humanize/locale/fa_IR/LC_MESSAGES/humanize.po,sha256=XW0FDUzPO3WbDwiaCusFe3t68SNkU3JIIB6JB1TmfVQ,7124
30
+ humanize/locale/fi_FI/LC_MESSAGES/humanize.mo,sha256=5lleMFhHuP2dkg01BewxrLcPWBE-W8CdCNWSme1E1kI,3369
31
+ humanize/locale/fi_FI/LC_MESSAGES/humanize.po,sha256=yMiEAY0Wjzum2MoXbGIqLc_jK7DvZrasg5Wxk7_R-uo,6697
32
+ humanize/locale/fr_FR/LC_MESSAGES/humanize.mo,sha256=dPGlVRVq25z1w8AZwgZQUuAGlnwTr1fsHr0V3Ou-5QM,2645
33
+ humanize/locale/fr_FR/LC_MESSAGES/humanize.po,sha256=9ykifaVg2d9KXBqimfyOfPkyrAUUXG9XqdkZKZA98H4,6905
34
+ humanize/locale/id_ID/LC_MESSAGES/humanize.mo,sha256=6kl6Wh4ROJODFTkIbhV6UQ2JKT1inQn9lDxUeEepUTw,3005
35
+ humanize/locale/id_ID/LC_MESSAGES/humanize.po,sha256=HXhSoHca2AXAvz4k6pNTzyfvoTr22AmYgoUsosmeE8U,6057
36
+ humanize/locale/it_IT/LC_MESSAGES/humanize.mo,sha256=2l-5qYLpZXG4bn--WH_eCKStUaVHwQ8owWpU626fv_E,3188
37
+ humanize/locale/it_IT/LC_MESSAGES/humanize.po,sha256=hPp_0Li-IIERDMasXr7BRWQixeDR2R5RUvZTHLFURhU,6565
38
+ humanize/locale/ja_JP/LC_MESSAGES/humanize.mo,sha256=QaWCy6xeudjHEhJZmrStFghg-Lax9KC55JbhiG8AFE8,2457
39
+ humanize/locale/ja_JP/LC_MESSAGES/humanize.po,sha256=KEJS3VWc0BupyLag-SVRXv4H7Y6lk8_Kx7eghJJQVUU,6107
40
+ humanize/locale/ko_KR/LC_MESSAGES/humanize.mo,sha256=yDaJCpAY79DonnPJQcx0E8nqjuHJvmlv523Zgw-pjpg,2016
41
+ humanize/locale/ko_KR/LC_MESSAGES/humanize.po,sha256=KyilkPrVkuDWkerBoTcQDkEzetatFoYYe9Q2wGjlp2Q,7019
42
+ humanize/locale/nl_NL/LC_MESSAGES/humanize.mo,sha256=pUzwmGiXcgEgepNqS4KrRLbJnCG2uNdZdfzCu_HFSJw,3258
43
+ humanize/locale/nl_NL/LC_MESSAGES/humanize.po,sha256=OMANrojN8tR5zdwEZywhRwdgo5JtSO3k9507CRhQ5CQ,6604
44
+ humanize/locale/pl_PL/LC_MESSAGES/humanize.mo,sha256=bNeJCcc2gsI3K4_usa_imigYISzGofwsrZ7X7iiC6Pw,3621
45
+ humanize/locale/pl_PL/LC_MESSAGES/humanize.po,sha256=hcGIIqwX9xIuuEW7zwZSjJIXSDxmnG2bG6ABD3w1HKY,7148
46
+ humanize/locale/pt_BR/LC_MESSAGES/humanize.mo,sha256=lElhHaH6FWRwZWit5vs3kQmh5b0bQXYfRY6zqdVXm5U,3184
47
+ humanize/locale/pt_BR/LC_MESSAGES/humanize.po,sha256=WJDyBf3REPuI49q-u6-Hf5KsIdQJELY000HfbtS4eEo,6524
48
+ humanize/locale/pt_PT/LC_MESSAGES/humanize.mo,sha256=AFoTysVc89o2EMopIngkO7vdvYmQE5BmczaWN1sjQI0,3403
49
+ humanize/locale/pt_PT/LC_MESSAGES/humanize.po,sha256=OMgvxGlwlR_xr0-1Hj-tNFKnSpPzihXN7Es1TWCoIyQ,6567
50
+ humanize/locale/ru_RU/LC_MESSAGES/humanize.mo,sha256=i-go1a1XjSR-PpWHBlpOst7DhoY-XlLTDP-paFJ68yY,4538
51
+ humanize/locale/ru_RU/LC_MESSAGES/humanize.po,sha256=38CGjLgWEe8Ci287EZVQbUynVd2nInFvSfBmUBlJ1AA,7906
52
+ humanize/locale/sk_SK/LC_MESSAGES/humanize.mo,sha256=0_mdWYPZhFsPpx5hWEUweV5_MCuIgXJIhE0gBI7qw1Y,3540
53
+ humanize/locale/sk_SK/LC_MESSAGES/humanize.po,sha256=lEKJqzVEMjOeQrE-U_x-g2oOvVPMBq36FVTLxH8Xe5w,7180
54
+ humanize/locale/sl_SI/LC_MESSAGES/humanize.mo,sha256=scf8JQb2OCHRuZLDjaaX4Sta1E1p7ff4XGqXE5GuJP8,3979
55
+ humanize/locale/sl_SI/LC_MESSAGES/humanize.po,sha256=suLp80dryZpMUDWqtm9M6pXFmaojKQKpoPv89UAbMzo,7618
56
+ humanize/locale/sv_SE/LC_MESSAGES/humanize.mo,sha256=Z8Guy6d2Cf2PGW7LFB5FOe4fy3uX5zQCITchn-3t7Ss,3498
57
+ humanize/locale/sv_SE/LC_MESSAGES/humanize.po,sha256=ZB9DAMEWT8MbpC_i-dTSZVZ-n3V67xTft1SDCi3P4Lc,6596
58
+ humanize/locale/tr_TR/LC_MESSAGES/humanize.mo,sha256=fgdMjVbGMQpV1FuMs366OCh4oGAPzdHHWfVGynbQ00E,3151
59
+ humanize/locale/tr_TR/LC_MESSAGES/humanize.po,sha256=hWmHZ1b65Sw0f89JJncRpqPo6IXVD7yXA4lrA37hrg0,6479
60
+ humanize/locale/uk_UA/LC_MESSAGES/humanize.mo,sha256=1Dgq35RkIlF_fUFvV9kayXEZZoD7ceRsViCweUsrZSM,4188
61
+ humanize/locale/uk_UA/LC_MESSAGES/humanize.po,sha256=4bHPUlV0Kc5hFK12wrlMl2PCkW9FWIzgSkxUv0OEXEM,7661
62
+ humanize/locale/vi_VN/LC_MESSAGES/humanize.mo,sha256=qN3hRa5kdPw49HwB4W8aonaiuUDrjyj-CbLswcEUbFQ,3164
63
+ humanize/locale/vi_VN/LC_MESSAGES/humanize.po,sha256=9vvoquJBsPT1Il9W_a6Nh2LL8dvxn_JaBJCM7hwLQeg,6920
64
+ humanize/locale/zh_CN/LC_MESSAGES/humanize.mo,sha256=l5MDM8l9QHK0Q2Y_cpDYZmyklnQAqy-dPSbt8Od5MxM,3011
65
+ humanize/locale/zh_CN/LC_MESSAGES/humanize.po,sha256=nhwY-JtHt-QZctgy_HU-W56qfaTkv6brObpEJu8M9vc,6359
66
+ humanize/locale/zh_HK/LC_MESSAGES/humanize.mo,sha256=nNSSvh4RQ6WXm6aVInM4cIk1zY3PKuUx6r5f7smTW6w,3323
67
+ humanize/locale/zh_HK/LC_MESSAGES/humanize.po,sha256=s7eEhwtRjjRds8LskIzmVqxTAEm7-7aUbk04s3Vp0eE,6495
68
+ humanize/number.py,sha256=E9FdEOrcZ2f5IHVEeZKaFE5NnMm9KT_4TAuZl69zWZE,14489
69
+ humanize/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
70
+ humanize/time.py,sha256=8damCbq1kXF-VojBfwPPRZB9JjX0oG4-25y8nTOiS4Y,18278
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/humanize-4.2.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ humanize
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """ imageio_ffmpeg, FFMPEG wrapper for Python.
2
+ """
3
+
4
+ # flake8: noqa
5
+
6
+ from ._definitions import __version__
7
+ from ._utils import get_ffmpeg_exe, get_ffmpeg_version
8
+ from ._io import count_frames_and_secs, read_frames, write_frames
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_definitions.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import struct
3
+
4
+ __version__ = "0.4.7"
5
+
6
+
7
+ def get_platform():
8
+ bits = struct.calcsize("P") * 8
9
+ if sys.platform.startswith("linux"):
10
+ return "linux{}".format(bits)
11
+ elif sys.platform.startswith("freebsd"):
12
+ return "freebsd{}".format(bits)
13
+ elif sys.platform.startswith("win"):
14
+ return "win{}".format(bits)
15
+ elif sys.platform.startswith("cygwin"):
16
+ return "win{}".format(bits)
17
+ elif sys.platform.startswith("darwin"):
18
+ return "osx{}".format(bits)
19
+ else: # pragma: no cover
20
+ return None
21
+
22
+
23
+ # The Linux static builds (https://johnvansickle.com/ffmpeg/) are build
24
+ # for Linux kernels 2.6.32 and up (at the time of writing, ffmpeg v4.1).
25
+ # This corresponds to CentOS 6. This means we should use manylinux2010 and not
26
+ # manylinux1.
27
+ # manylinux1: https://www.python.org/dev/peps/pep-0513
28
+ # manylinux2010: https://www.python.org/dev/peps/pep-0571
29
+
30
+
31
+ # Platform string -> ffmpeg filename
32
+ FNAME_PER_PLATFORM = {
33
+ "osx64": "ffmpeg-osx64-v4.2.2", # 10.10+
34
+ "win32": "ffmpeg-win32-v4.2.2.exe", # Windows 7+
35
+ "win64": "ffmpeg-win64-v4.2.2.exe",
36
+ # "linux32": "ffmpeg-linux32-v4.2.2",
37
+ "linux64": "ffmpeg-linux64-v4.2.2", # Kernel 3.2.0+
38
+ "linuxaarch64": "ffmpeg-linuxaarch64-v4.2.2",
39
+ }
40
+
41
+ osxplats = "macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64"
42
+
43
+ # Wheel tag -> platform string
44
+ WHEEL_BUILDS = {
45
+ "py3-none-manylinux2010_x86_64": "linux64",
46
+ "py3-none-manylinux2014_aarch64": "linuxaarch64",
47
+ "py3-none-" + osxplats: "osx64",
48
+ "py3-none-win32": "win32",
49
+ "py3-none-win_amd64": "win64",
50
+ }
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_io.py ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import time
3
+ import pathlib
4
+ import subprocess
5
+ from functools import lru_cache
6
+ from collections import defaultdict
7
+
8
+ from ._utils import get_ffmpeg_exe, _popen_kwargs, logger
9
+ from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs
10
+
11
+
12
+ ISWIN = sys.platform.startswith("win")
13
+
14
+ h264_encoder_preference = defaultdict(lambda: -1)
15
+ # The libx264 was the default encoder for a longe time with imageio
16
+ h264_encoder_preference["libx264"] = 100
17
+
18
+ # Encoder with the nvidia graphics card dedicated hardware
19
+ h264_encoder_preference["h264_nvenc"] = 90
20
+ # Deprecated names for the same encoder
21
+ h264_encoder_preference["nvenc_h264"] = 90
22
+ h264_encoder_preference["nvenc"] = 90
23
+
24
+ # vaapi provides hardware encoding with intel integrated graphics chipsets
25
+ h264_encoder_preference["h264_vaapi"] = 80
26
+
27
+ # openh264 is cisco's open source encoder
28
+ h264_encoder_preference["libopenh264"] = 70
29
+
30
+ h264_encoder_preference["libx264rgb"] = 50
31
+
32
+
33
+ def ffmpeg_test_encoder(encoder):
34
+ # Use the null streams to validate if we can encode anything
35
+ # https://trac.ffmpeg.org/wiki/Null
36
+ cmd = [
37
+ _get_exe(),
38
+ "-hide_banner",
39
+ "-f",
40
+ "lavfi",
41
+ "-i",
42
+ "nullsrc=s=256x256:d=8",
43
+ "-vcodec",
44
+ encoder,
45
+ "-f",
46
+ "null",
47
+ "-",
48
+ ]
49
+ p = subprocess.run(
50
+ cmd,
51
+ stdin=subprocess.PIPE,
52
+ stdout=subprocess.PIPE,
53
+ stderr=subprocess.PIPE,
54
+ )
55
+ return p.returncode == 0
56
+
57
+
58
+ def get_compiled_h264_encoders():
59
+ cmd = [_get_exe(), "-hide_banner", "-encoders"]
60
+ p = subprocess.run(
61
+ cmd,
62
+ stdin=subprocess.PIPE,
63
+ stdout=subprocess.PIPE,
64
+ stderr=subprocess.PIPE,
65
+ )
66
+ stdout = p.stdout.decode().replace("\r", "")
67
+ # 2022/04/08: hmaarrfk
68
+ # I couldn't find a good way to get the list of available encoders from
69
+ # the ffmpeg command
70
+ # The ffmpeg command return a table that looks like
71
+ # Notice the leading space at the very beginning
72
+ # On ubuntu with libffmpeg-nvenc-dev we get
73
+ # $ ffmpeg -hide_banner -encoders | grep -i h.264
74
+ #
75
+ # Encoders:
76
+ # V..... = Video
77
+ # A..... = Audio
78
+ # S..... = Subtitle
79
+ # .F.... = Frame-level multithreading
80
+ # ..S... = Slice-level multithreading
81
+ # ...X.. = Codec is experimental
82
+ # ....B. = Supports draw_horiz_band
83
+ # .....D = Supports direct rendering method 1
84
+ # ------
85
+ # V..... libx264 libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (codec h264)
86
+ # V..... libx264rgb libx264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 RGB (codec h264)
87
+ # V....D h264_nvenc NVIDIA NVENC H.264 encoder (codec h264)
88
+ # V..... h264_omx OpenMAX IL H.264 video encoder (codec h264)
89
+ # V..... h264_qsv H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (Intel Quick Sync Video acceleration) (codec h264)
90
+ # V..... h264_v4l2m2m V4L2 mem2mem H.264 encoder wrapper (codec h264)
91
+ # V....D h264_vaapi H.264/AVC (VAAPI) (codec h264)
92
+ # V..... nvenc NVIDIA NVENC H.264 encoder (codec h264)
93
+ # V..... nvenc_h264 NVIDIA NVENC H.264 encoder (codec h264)
94
+ #
95
+ # However, just because ffmpeg was compiled with the options enabled
96
+ # it doesn't mean that it will be successful
97
+ header_footer = stdout.split("------")
98
+ footer = header_footer[1].strip("\n")
99
+ encoders = []
100
+ for line in footer.split("\n"):
101
+ # Strip to remove any leading spaces
102
+ line = line.strip()
103
+ encoder = line.split(" ")[1]
104
+
105
+ if encoder in h264_encoder_preference:
106
+ # These encoders are known to support H.264
107
+ # We forcibly include them in case their description changes to
108
+ # not include the string "H.264"
109
+ encoders.append(encoder)
110
+ elif (line[0] == "V") and ("H.264" in line):
111
+ encoders.append(encoder)
112
+
113
+ encoders.sort(reverse=True, key=lambda x: h264_encoder_preference[x])
114
+ if "h264_nvenc" in encoders:
115
+ # Remove deprecated names for the same encoder
116
+ for encoder in ["nvenc", "nvenc_h264"]:
117
+ if encoder in encoders:
118
+ encoders.remove(encoder)
119
+ # Return an immutable tuple to avoid users corrupting the lru_cache
120
+ return tuple(encoders)
121
+
122
+
123
+ @lru_cache()
124
+ def get_first_available_h264_encoder():
125
+ compiled_encoders = get_compiled_h264_encoders()
126
+ for encoder in compiled_encoders:
127
+ if ffmpeg_test_encoder(encoder):
128
+ return encoder
129
+ else:
130
+ raise RuntimeError(
131
+ "No valid H.264 encoder was found with the ffmpeg installation"
132
+ )
133
+
134
+
135
+ @lru_cache()
136
+ def _get_exe():
137
+ return get_ffmpeg_exe()
138
+
139
+
140
+ def count_frames_and_secs(path):
141
+ """
142
+ Get the number of frames and number of seconds for the given video
143
+ file. Note that this operation can be quite slow for large files.
144
+
145
+ Disclaimer: I've seen this produce different results from actually reading
146
+ the frames with older versions of ffmpeg (2.x). Therefore I cannot say
147
+ with 100% certainty that the returned values are always exact.
148
+ """
149
+ # https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg
150
+
151
+ if isinstance(path, pathlib.PurePath):
152
+ path = str(path)
153
+ if not isinstance(path, str):
154
+ raise TypeError("Video path must be a string or pathlib.Path.")
155
+
156
+ cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"]
157
+ try:
158
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **_popen_kwargs())
159
+ except subprocess.CalledProcessError as err:
160
+ out = err.output.decode(errors="ignore")
161
+ raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out))
162
+
163
+ # Note that other than with the subprocess calls below, ffmpeg wont hang here.
164
+ # Worst case Python will stop/crash and ffmpeg will continue running until done.
165
+
166
+ nframes = nsecs = None
167
+ for line in reversed(out.splitlines()):
168
+ if line.startswith(b"frame="):
169
+ line = line.decode(errors="ignore")
170
+ i = line.find("frame=")
171
+ if i >= 0:
172
+ s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
173
+ nframes = int(s)
174
+ i = line.find("time=")
175
+ if i >= 0:
176
+ s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip()
177
+ nsecs = cvsecs(*s.split(":"))
178
+ return nframes, nsecs
179
+
180
+ raise RuntimeError("Could not get number of frames") # pragma: no cover
181
+
182
+
183
+ def read_frames(
184
+ path,
185
+ pix_fmt="rgb24",
186
+ bpp=None,
187
+ input_params=None,
188
+ output_params=None,
189
+ bits_per_pixel=None,
190
+ ):
191
+ """
192
+ Create a generator to iterate over the frames in a video file.
193
+
194
+ It first yields a small metadata dictionary that contains:
195
+
196
+ * ffmpeg_version: the ffmpeg version in use (as a string).
197
+ * codec: a hint about the codec used to encode the video, e.g. "h264".
198
+ * source_size: the width and height of the encoded video frames.
199
+ * size: the width and height of the frames that will be produced.
200
+ * fps: the frames per second. Can be zero if it could not be detected.
201
+ * duration: duration in seconds. Can be zero if it could not be detected.
202
+
203
+ After that, it yields frames until the end of the video is reached. Each
204
+ frame is a bytes object.
205
+
206
+ This function makes no assumptions about the number of frames in
207
+ the data. For one because this is hard to predict exactly, but also
208
+ because it may depend on the provided output_params. If you want
209
+ to know the number of frames in a video file, use count_frames_and_secs().
210
+ It is also possible to estimate the number of frames from the fps and
211
+ duration, but note that even if both numbers are present, the resulting
212
+ value is not always correct.
213
+
214
+ Example:
215
+
216
+ gen = read_frames(path)
217
+ meta = gen.__next__()
218
+ for frame in gen:
219
+ print(len(frame))
220
+
221
+ Parameters:
222
+ path (str): the filename of the file to read from.
223
+ pix_fmt (str): the pixel format of the frames to be read.
224
+ The default is "rgb24" (frames are uint8 RGB images).
225
+ input_params (list): Additional ffmpeg input command line parameters.
226
+ output_params (list): Additional ffmpeg output command line parameters.
227
+ bits_per_pixel (int): The number of bits per pixel in the output frames.
228
+ This depends on the given pix_fmt. Default is 24 (RGB)
229
+ bpp (int): DEPRECATED, USE bits_per_pixel INSTEAD. The number of bytes per pixel in the output frames.
230
+ This depends on the given pix_fmt. Some pixel formats like yuv420p have 12 bits per pixel
231
+ and cannot be set in bytes as integer. For this reason the bpp argument is deprecated.
232
+ """
233
+
234
+ # ----- Input args
235
+
236
+ if isinstance(path, pathlib.PurePath):
237
+ path = str(path)
238
+ if not isinstance(path, str):
239
+ raise TypeError("Video path must be a string or pathlib.Path.")
240
+ # Note: Dont check whether it exists. The source could be e.g. a camera.
241
+
242
+ pix_fmt = pix_fmt or "rgb24"
243
+ bpp = bpp or 3
244
+ bits_per_pixel = bits_per_pixel or bpp * 8
245
+ input_params = input_params or []
246
+ output_params = output_params or []
247
+
248
+ assert isinstance(pix_fmt, str), "pix_fmt must be a string"
249
+ assert isinstance(bits_per_pixel, int), "bpp and bits_per_pixel must be an int"
250
+ assert isinstance(input_params, list), "input_params must be a list"
251
+ assert isinstance(output_params, list), "output_params must be a list"
252
+
253
+ # ----- Prepare
254
+
255
+ pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"]
256
+
257
+ cmd = [_get_exe()]
258
+ cmd += input_params + ["-i", path]
259
+ cmd += pre_output_params + output_params + ["-"]
260
+
261
+ p = subprocess.Popen(
262
+ cmd,
263
+ stdin=subprocess.PIPE,
264
+ stdout=subprocess.PIPE,
265
+ stderr=subprocess.PIPE,
266
+ **_popen_kwargs(prevent_sigint=True)
267
+ )
268
+
269
+ log_catcher = LogCatcher(p.stderr)
270
+
271
+ # Init policy by which to terminate ffmpeg. May be set to "kill" later.
272
+ stop_policy = "timeout" # not wait; ffmpeg should be able to quit quickly
273
+
274
+ # Enter try block directly after opening the process.
275
+ # We terminate ffmpeg in the finally clause.
276
+ # Generators are automatically closed when they get deleted,
277
+ # so the finally block is guaranteed to run.
278
+ try:
279
+
280
+ # ----- Load meta data
281
+
282
+ # Wait for the log catcher to get the meta information
283
+ etime = time.time() + 10.0
284
+ while log_catcher.is_alive() and not log_catcher.header and time.time() < etime:
285
+ time.sleep(0.01)
286
+
287
+ # Check whether we have the information
288
+ if not log_catcher.header:
289
+ err2 = log_catcher.get_text(0.2)
290
+ fmt = "Could not load meta information\n=== stderr ===\n{}"
291
+ raise IOError(fmt.format(err2))
292
+ elif "No such file or directory" in log_catcher.header:
293
+ raise IOError("{} not found! Wrong path?".format(path))
294
+
295
+ meta = parse_ffmpeg_header(log_catcher.header)
296
+ yield meta
297
+
298
+ # ----- Read frames
299
+
300
+ w, h = meta["size"]
301
+ framesize_bits = w * h * bits_per_pixel
302
+ framesize_bytes = framesize_bits / 8
303
+ assert (
304
+ framesize_bytes.is_integer()
305
+ ), "incorrect bits_per_pixel, framesize in bytes must be an int"
306
+ framesize_bytes = int(framesize_bytes)
307
+ framenr = 0
308
+
309
+ while True:
310
+ framenr += 1
311
+ try:
312
+ bb = bytes()
313
+ while len(bb) < framesize_bytes:
314
+ extra_bytes = p.stdout.read(framesize_bytes - len(bb))
315
+ if not extra_bytes:
316
+ if len(bb) == 0:
317
+ return
318
+ else:
319
+ raise RuntimeError(
320
+ "End of file reached before full frame could be read."
321
+ )
322
+ bb += extra_bytes
323
+ yield bb
324
+ except Exception as err:
325
+ err1 = str(err)
326
+ err2 = log_catcher.get_text(0.4)
327
+ fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}"
328
+ raise RuntimeError(fmt.format(framenr, err1, err2))
329
+
330
+ except GeneratorExit:
331
+ # Note that GeneratorExit does not inherit from Exception but BaseException
332
+ pass
333
+
334
+ except Exception:
335
+ # Normal exceptions fall through
336
+ raise
337
+
338
+ except BaseException:
339
+ # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit
340
+ stop_policy = "kill"
341
+ raise
342
+
343
+ finally:
344
+ # Stop the LogCatcher thread, which reads from stderr.
345
+ log_catcher.stop_me()
346
+
347
+ # Make sure that ffmpeg is terminated.
348
+ if p.poll() is None:
349
+
350
+ # Ask ffmpeg to quit
351
+ try:
352
+ # I read somewhere that modern ffmpeg on Linux prefers a
353
+ # "ctrl-c", but tests so far suggests sending q is more robust.
354
+ # > p.send_signal(signal.SIGINT)
355
+ # Sending q via communicate works, but can hang (see #17)
356
+ # > p.communicate(b"q")
357
+ # So let's do similar to what communicate does, but without
358
+ # reading stdout (which may block). It looks like only closing
359
+ # stdout is enough (tried Windows+Linux), but let's play safe.
360
+ # Found that writing to stdin can cause "Invalid argument" on
361
+ # Windows # and "Broken Pipe" on Unix.
362
+ # p.stdin.write(b"q") # commented out in v0.4.1
363
+ p.stdout.close()
364
+ p.stdin.close()
365
+ # p.stderr.close() -> not here, the log_catcher closes it
366
+ except Exception as err: # pragma: no cover
367
+ logger.warning("Error while attempting stop ffmpeg (r): " + str(err))
368
+
369
+ if stop_policy == "timeout":
370
+ # Wait until timeout, produce a warning and kill if it still exists
371
+ try:
372
+ etime = time.time() + 1.5
373
+ while time.time() < etime and p.poll() is None:
374
+ time.sleep(0.01)
375
+ finally:
376
+ if p.poll() is None: # pragma: no cover
377
+ logger.warning("We had to kill ffmpeg to stop it.")
378
+ p.kill()
379
+
380
+ else: # stop_policy == "kill"
381
+ # Just kill it
382
+ p.kill()
383
+
384
+
385
+ def write_frames(
386
+ path,
387
+ size,
388
+ pix_fmt_in="rgb24",
389
+ pix_fmt_out="yuv420p",
390
+ fps=16,
391
+ quality=5,
392
+ bitrate=None,
393
+ codec=None,
394
+ macro_block_size=16,
395
+ ffmpeg_log_level="warning",
396
+ ffmpeg_timeout=None,
397
+ input_params=None,
398
+ output_params=None,
399
+ audio_path=None,
400
+ audio_codec=None,
401
+ ):
402
+ """
403
+ Create a generator to write frames (bytes objects) into a video file.
404
+
405
+ The frames are written by using the generator's `send()` method. Frames
406
+ can be anything that can be written to a file. Typically these are
407
+ bytes objects, but c-contiguous Numpy arrays also work.
408
+
409
+ Example:
410
+
411
+ gen = write_frames(path, size)
412
+ gen.send(None) # seed the generator
413
+ for frame in frames:
414
+ gen.send(frame)
415
+ gen.close() # don't forget this
416
+
417
+ Parameters:
418
+ path (str): the filename to write to.
419
+ size (tuple): the width and height of the frames.
420
+ pix_fmt_in (str): the pixel format of incoming frames.
421
+ E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24".
422
+ pix_fmt_out (str): the pixel format to store frames. Default yuv420p".
423
+ fps (float): The frames per second. Default 16.
424
+ quality (float): A measure for quality between 0 and 10. Default 5.
425
+ Ignored if bitrate is given.
426
+ bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good.
427
+ codec (str): The codec. Default "libx264" for .mp4 (if available from
428
+ the ffmpeg executable) or "msmpeg4" for .wmv.
429
+ macro_block_size (int): You probably want to align the size of frames
430
+ to this value to avoid image resizing. Default 16. Can be set
431
+ to 1 to avoid block alignment, though this is not recommended.
432
+ ffmpeg_log_level (str): The ffmpeg logging level. Default "warning".
433
+ ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process
434
+ to finish. Value of 0 or None will wait forever (default). The time that
435
+ ffmpeg needs depends on CPU speed, compression, and frame size.
436
+ input_params (list): Additional ffmpeg input command line parameters.
437
+ output_params (list): Additional ffmpeg output command line parameters.
438
+ audio_path (str): A input file path for encoding with an audio stream.
439
+ Default None, no audio.
440
+ audio_codec (str): The audio codec to use if audio_path is provided.
441
+ "copy" will try to use audio_path's audio codec without re-encoding.
442
+ Default None, but some formats must have certain codecs specified.
443
+ """
444
+
445
+ # ----- Input args
446
+
447
+ if isinstance(path, pathlib.PurePath):
448
+ path = str(path)
449
+ if not isinstance(path, str):
450
+ raise TypeError("Video path must be a string or pathlib.Path.")
451
+
452
+ # The pix_fmt_out yuv420p is the best for the outpur to work in
453
+ # QuickTime and most other players. These players only support
454
+ # the YUV planar color space with 4:2:0 chroma subsampling for
455
+ # H.264 video. Otherwise, depending on the source, ffmpeg may
456
+ # output to a pixel format that may be incompatible with these
457
+ # players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers
458
+
459
+ pix_fmt_in = pix_fmt_in or "rgb24"
460
+ pix_fmt_out = pix_fmt_out or "yuv420p"
461
+ fps = fps or 16
462
+ # bitrate, codec, macro_block_size can all be None or ...
463
+ macro_block_size = macro_block_size or 16
464
+ ffmpeg_log_level = ffmpeg_log_level or "warning"
465
+ input_params = input_params or []
466
+ output_params = output_params or []
467
+ ffmpeg_timeout = ffmpeg_timeout or 0
468
+
469
+ floatish = float, int
470
+ if isinstance(size, (tuple, list)):
471
+ assert len(size) == 2, "size must be a 2-tuple"
472
+ assert isinstance(size[0], int) and isinstance(
473
+ size[1], int
474
+ ), "size must be ints"
475
+ sizestr = "{:d}x{:d}".format(*size)
476
+ # elif isinstance(size, str):
477
+ # assert "x" in size, "size as string must have format NxM"
478
+ # sizestr = size
479
+ else:
480
+ assert False, "size must be str or tuple"
481
+ assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str"
482
+ assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str"
483
+ assert isinstance(fps, floatish), "fps must be float"
484
+ if quality is not None:
485
+ assert isinstance(quality, floatish), "quality must be float"
486
+ assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive"
487
+ assert isinstance(macro_block_size, int), "macro_block_size must be int"
488
+ assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str"
489
+ assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float"
490
+ assert isinstance(input_params, list), "input_params must be a list"
491
+ assert isinstance(output_params, list), "output_params must be a list"
492
+
493
+ # ----- Prepare
494
+
495
+ # Get parameters
496
+ if not codec:
497
+ if path.lower().endswith(".wmv"):
498
+ # This is a safer default codec on windows to get videos that
499
+ # will play in powerpoint and other apps. H264 is not always
500
+ # available on windows.
501
+ codec = "msmpeg4"
502
+ else:
503
+ codec = get_first_available_h264_encoder()
504
+
505
+ audio_params = ["-an"]
506
+ if audio_path is not None and not path.lower().endswith(".gif"):
507
+ audio_params = ["-i", audio_path]
508
+ if audio_codec is not None:
509
+ output_params += ["-acodec", audio_codec]
510
+ output_params += ["-map", "0:v:0", "-map", "1:a:0"]
511
+
512
+ # Get command
513
+ cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr]
514
+ cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params
515
+ cmd += ["-i", "-"] + audio_params
516
+ cmd += ["-vcodec", codec, "-pix_fmt", pix_fmt_out]
517
+
518
+ # Add fixed bitrate or variable bitrate compression flags
519
+ if bitrate is not None:
520
+ cmd += ["-b:v", str(bitrate)]
521
+ elif quality is not None: # If None, then we don't add anything
522
+ quality = 1 - quality / 10.0
523
+ if codec == "libx264":
524
+ # crf ranges 0 to 51, 51 being worst.
525
+ quality = int(quality * 51)
526
+ cmd += ["-crf", str(quality)] # for h264
527
+ else: # Many codecs accept q:v
528
+ # q:v range can vary, 1-31, 31 being worst
529
+ # But q:v does not always have the same range.
530
+ # May need a way to find range for any codec.
531
+ quality = int(quality * 30) + 1
532
+ cmd += ["-qscale:v", str(quality)] # for others
533
+
534
+ # Note, for most codecs, the image dimensions must be divisible by
535
+ # 16 the default for the macro_block_size is 16. Check if image is
536
+ # divisible, if not have ffmpeg upsize to nearest size and warn
537
+ # user they should correct input image if this is not desired.
538
+ if macro_block_size > 1:
539
+ if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0:
540
+ out_w = size[0]
541
+ out_h = size[1]
542
+ if size[0] % macro_block_size > 0:
543
+ out_w += macro_block_size - (size[0] % macro_block_size)
544
+ if size[1] % macro_block_size > 0:
545
+ out_h += macro_block_size - (size[1] % macro_block_size)
546
+ cmd += ["-vf", "scale={}:{}".format(out_w, out_h)]
547
+ logger.warning(
548
+ "IMAGEIO FFMPEG_WRITER WARNING: input image is not"
549
+ " divisible by macro_block_size={}, resizing from {} "
550
+ "to {} to ensure video compatibility with most codecs "
551
+ "and players. To prevent resizing, make your input "
552
+ "image divisible by the macro_block_size or set the "
553
+ "macro_block_size to 1 (risking incompatibility).".format(
554
+ macro_block_size, size[:2], (out_w, out_h)
555
+ )
556
+ )
557
+
558
+ # Rather than redirect stderr to a pipe, just set minimal
559
+ # output from ffmpeg by default. That way if there are warnings
560
+ # the user will see them.
561
+ cmd += ["-v", ffmpeg_log_level]
562
+ cmd += output_params
563
+ cmd.append(path)
564
+ cmd_str = " ".join(cmd)
565
+ if any(
566
+ [level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")]
567
+ ):
568
+ logger.info("RUNNING FFMPEG COMMAND: " + cmd_str)
569
+
570
+ # Launch process
571
+ p = subprocess.Popen(
572
+ cmd,
573
+ stdin=subprocess.PIPE,
574
+ stdout=subprocess.PIPE,
575
+ stderr=None,
576
+ **_popen_kwargs(prevent_sigint=True)
577
+ )
578
+
579
+ # Note that directing stderr to a pipe on windows will cause ffmpeg
580
+ # to hang if the buffer is not periodically cleared using
581
+ # StreamCatcher or other means.
582
+ # Setting bufsize to 0 or a small value does not seem to have much effect
583
+ # (tried on Windows and Linux). I suspect that ffmpeg buffers
584
+ # multiple frames (before encoding in a batch).
585
+
586
+ # Init policy by which to terminate ffmpeg. May be set to "kill" later.
587
+ stop_policy = "timeout"
588
+ if not ffmpeg_timeout:
589
+ stop_policy = "wait"
590
+
591
+ # ----- Write frames
592
+
593
+ # Enter try block directly after opening the process.
594
+ # We terminate ffmpeg in the finally clause.
595
+ # Generators are automatically closed when they get deleted,
596
+ # so the finally block is guaranteed to run.
597
+ try:
598
+
599
+ # Just keep going until the generator.close() is called (raises GeneratorExit).
600
+ # This could also happen when the generator is deleted somehow.
601
+ nframes = 0
602
+ while True:
603
+
604
+ # Get frame
605
+ bb = yield
606
+
607
+ # framesize = size[0] * size[1] * depth * bpp
608
+ # assert isinstance(bb, bytes), "Frame must be send as bytes"
609
+ # assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes"
610
+ # Actually, we accept anything that can be written to file.
611
+ # This e.g. allows writing numpy arrays without having to make a copy ...
612
+
613
+ # Write
614
+ try:
615
+ p.stdin.write(bb)
616
+ except Exception as err:
617
+ # Show the command and stderr from pipe
618
+ msg = (
619
+ "{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR "
620
+ "OUTPUT:\n".format(err, cmd_str)
621
+ )
622
+ raise IOError(msg)
623
+
624
+ nframes += 1
625
+
626
+ except GeneratorExit:
627
+ # Note that GeneratorExit does not inherit from Exception but BaseException
628
+ # Detect premature closing
629
+ if nframes == 0:
630
+ logger.warning("No frames have been written; the written video is invalid.")
631
+
632
+ except Exception:
633
+ # Normal exceptions fall through
634
+ raise
635
+
636
+ except BaseException:
637
+ # Detect KeyboardInterrupt / SystemExit: don't wait for ffmpeg to quit
638
+ stop_policy = "kill"
639
+ raise
640
+
641
+ finally:
642
+
643
+ # Make sure that ffmpeg is terminated.
644
+ if p.poll() is None:
645
+
646
+ # Tell ffmpeg that we're done
647
+ try:
648
+ p.stdin.close()
649
+ except Exception as err: # pragma: no cover
650
+ logger.warning("Error while attempting stop ffmpeg (w): " + str(err))
651
+
652
+ if stop_policy == "timeout":
653
+ # Wait until timeout, produce a warning and kill if it still exists
654
+ try:
655
+ etime = time.time() + ffmpeg_timeout
656
+ while (time.time() < etime) and p.poll() is None:
657
+ time.sleep(0.01)
658
+ finally:
659
+ if p.poll() is None: # pragma: no cover
660
+ logger.warning(
661
+ "We had to kill ffmpeg to stop it. "
662
+ + "Consider increasing ffmpeg_timeout, "
663
+ + "or setting it to zero (no timeout)."
664
+ )
665
+ p.kill()
666
+
667
+ elif stop_policy == "wait":
668
+ # Wait forever, kill if it if we're interrupted
669
+ try:
670
+ while p.poll() is None:
671
+ time.sleep(0.01)
672
+ finally: # the above can raise e.g. by ctrl-c or systemexit
673
+ if p.poll() is None: # pragma: no cover
674
+ p.kill()
675
+
676
+ else: # stop_policy == "kill":
677
+ # Just kill it
678
+ p.kill()
679
+ # Just to be safe, wrap in try/except
680
+ try:
681
+ p.stdout.close()
682
+ except Exception:
683
+ pass
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_parsing.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import threading
4
+
5
+ from ._utils import logger
6
+
7
+
8
+ class LogCatcher(threading.Thread):
9
+ """Thread to keep reading from stderr so that the buffer does not
10
+ fill up and stalls the ffmpeg process. On stderr a message is send
11
+ on every few frames with some meta information. We only keep the
12
+ last ones.
13
+ """
14
+
15
+ def __init__(self, file):
16
+ self._file = file
17
+ self._header = ""
18
+ self._lines = []
19
+ self._remainder = b""
20
+ threading.Thread.__init__(self)
21
+ self.daemon = True # do not let this thread hold up Python shutdown
22
+ self._should_stop = False
23
+ self.start()
24
+
25
+ def stop_me(self):
26
+ self._should_stop = True
27
+
28
+ @property
29
+ def header(self):
30
+ """Get header text. Empty string if the header is not yet parsed."""
31
+ return self._header
32
+
33
+ def get_text(self, timeout=0):
34
+ """Get the whole text written to stderr so far. To preserve
35
+ memory, only the last 50 to 100 frames are kept.
36
+
37
+ If a timeout is given, wait for this thread to finish. When
38
+ something goes wrong, we stop ffmpeg and want a full report of
39
+ stderr, but this thread might need a tiny bit more time.
40
+ """
41
+
42
+ # Wait?
43
+ if timeout > 0:
44
+ etime = time.time() + timeout
45
+ while self.is_alive() and time.time() < etime: # pragma: no cover
46
+ time.sleep(0.01)
47
+ # Return str
48
+ lines = b"\n".join(self._lines)
49
+ return self._header + "\n" + lines.decode("utf-8", "ignore")
50
+
51
+ def run(self):
52
+
53
+ # Create ref here so it still exists even if Py is shutting down
54
+ limit_lines_local = limit_lines
55
+
56
+ while not self._should_stop:
57
+ time.sleep(0)
58
+ # Read one line. Detect when closed, and exit
59
+ try:
60
+ line = self._file.read(20)
61
+ except ValueError: # pragma: no cover
62
+ break
63
+ if not line:
64
+ break
65
+ # Process to divide in lines
66
+ line = line.replace(b"\r", b"\n").replace(b"\n\n", b"\n")
67
+ lines = line.split(b"\n")
68
+ lines[0] = self._remainder + lines[0]
69
+ self._remainder = lines.pop(-1)
70
+ # Process each line
71
+ self._lines.extend(lines)
72
+ if not self._header:
73
+ if get_output_video_line(self._lines):
74
+ header = b"\n".join(self._lines)
75
+ self._header += header.decode("utf-8", "ignore")
76
+ elif self._lines:
77
+ self._lines = limit_lines_local(self._lines)
78
+
79
+ # Close the file when we're done
80
+ # See #61 and #69
81
+ try:
82
+ self._file.close()
83
+ except Exception:
84
+ pass
85
+
86
+
87
+ def get_output_video_line(lines):
88
+ """Get the line that defines the video stream that ffmpeg outputs,
89
+ and which we read.
90
+ """
91
+ in_output = False
92
+ for line in lines:
93
+ sline = line.lstrip()
94
+ if sline.startswith(b"Output "):
95
+ in_output = True
96
+ elif in_output:
97
+ if sline.startswith(b"Stream ") and b" Video:" in sline:
98
+ return line
99
+
100
+
101
+ def limit_lines(lines, N=32):
102
+ """When number of lines > 2*N, reduce to N."""
103
+ if len(lines) > 2 * N:
104
+ lines = [b"... showing only last few lines ..."] + lines[-N:]
105
+ return lines
106
+
107
+
108
+ def cvsecs(*args):
109
+ """converts a time to second. Either cvsecs(min, secs) or
110
+ cvsecs(hours, mins, secs).
111
+ """
112
+ if len(args) == 1:
113
+ return float(args[0])
114
+ elif len(args) == 2:
115
+ return 60 * float(args[0]) + float(args[1])
116
+ elif len(args) == 3:
117
+ return 3600 * float(args[0]) + 60 * float(args[1]) + float(args[2])
118
+
119
+
120
+ def parse_ffmpeg_header(text):
121
+ lines = text.splitlines()
122
+ meta = {}
123
+
124
+ # meta["header"] = text # Can enable this for debugging
125
+
126
+ # Get version
127
+ ver = lines[0].split("version", 1)[-1].split("Copyright")[0]
128
+ meta["ffmpeg_version"] = ver.strip() + " " + lines[1].strip()
129
+
130
+ # get the output line that speaks about video
131
+ videolines = [
132
+ l for l in lines if l.lstrip().startswith("Stream ") and " Video: " in l
133
+ ]
134
+
135
+ # Codec and pix_fmt hint
136
+ line = videolines[0]
137
+ meta["codec"] = line.split("Video: ", 1)[-1].lstrip().split(" ", 1)[0].strip()
138
+ meta["pix_fmt"] = line.split("Video: ", 1)[-1].split(",")[1].strip()
139
+
140
+ # get the output line that speaks about audio
141
+ audiolines = [
142
+ l for l in lines if l.lstrip().startswith("Stream ") and " Audio: " in l
143
+ ]
144
+
145
+ if len(audiolines) > 0:
146
+ audio_line = audiolines[0]
147
+ meta["audio_codec"] = (
148
+ audio_line.split("Audio: ", 1)[-1].lstrip().split(" ", 1)[0].strip()
149
+ )
150
+
151
+ # get the frame rate.
152
+ # matches can be empty, see #171, assume nframes = inf
153
+ # the regexp omits values of "1k tbr" which seems a specific edge-case #262
154
+ # it seems that tbr is generally to be preferred #262
155
+ fps = 0
156
+ for line in (videolines[0], videolines[-1]):
157
+ matches = re.findall(r" ([0-9]+\.?[0-9]*) (tbr|fps)", line)
158
+ matches.sort(key=lambda x: x[1] == "tbr", reverse=True)
159
+ if matches:
160
+ fps = float(matches[0][0].strip())
161
+ meta["fps"] = fps
162
+
163
+ # get the size of the original stream, of the form 460x320 (w x h)
164
+ line = videolines[0]
165
+ match = re.search(" [0-9]*x[0-9]*(,| )", line)
166
+ parts = line[match.start() : match.end() - 1].split("x")
167
+ meta["source_size"] = tuple(map(int, parts))
168
+
169
+ # get the size of what we receive, of the form 460x320 (w x h)
170
+ line = videolines[-1] # Pipe output
171
+ match = re.search(" [0-9]*x[0-9]*(,| )", line)
172
+ parts = line[match.start() : match.end() - 1].split("x")
173
+ meta["size"] = tuple(map(int, parts))
174
+
175
+ # Check the two sizes
176
+ if meta["source_size"] != meta["size"]:
177
+ logger.warning(
178
+ "The frame size for reading {} is "
179
+ "different from the source frame size {}.".format(
180
+ meta["size"], meta["source_size"]
181
+ )
182
+ )
183
+
184
+ # get the rotate metadata
185
+ reo_rotate = re.compile("rotate\s+:\s([0-9]+)")
186
+ match = reo_rotate.search(text)
187
+ rotate = 0
188
+ if match is not None:
189
+ rotate = match.groups()[0]
190
+ meta["rotate"] = int(rotate)
191
+
192
+ # get duration (in seconds)
193
+ line = [l for l in lines if "Duration: " in l][0]
194
+ match = re.search(" [0-9][0-9]:[0-9][0-9]:[0-9][0-9].[0-9][0-9]", line)
195
+ duration = 0
196
+ if match is not None:
197
+ hms = line[match.start() + 1 : match.end()].split(":")
198
+ duration = cvsecs(*hms)
199
+ meta["duration"] = duration
200
+
201
+ return meta
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio_ffmpeg/_utils.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import logging
4
+ import subprocess
5
+ from pkg_resources import resource_filename
6
+
7
+ from ._definitions import get_platform, FNAME_PER_PLATFORM
8
+
9
+ logger = logging.getLogger("imageio_ffmpeg")
10
+
11
+
12
+ def get_ffmpeg_exe():
13
+ """
14
+ Get the ffmpeg executable file. This can be the binary defined by
15
+ the IMAGEIO_FFMPEG_EXE environment variable, the binary distributed
16
+ with imageio-ffmpeg, an ffmpeg binary installed with conda, or the
17
+ system ffmpeg (in that order). A RuntimeError is raised if no valid
18
+ ffmpeg could be found.
19
+ """
20
+
21
+ # 1. Try environment variable. - Dont test it: the user is explicit here!
22
+ exe = os.getenv("IMAGEIO_FFMPEG_EXE", None)
23
+ if exe:
24
+ return exe
25
+
26
+ plat = get_platform()
27
+
28
+ # 2. Try from here
29
+ bin_dir = resource_filename("imageio_ffmpeg", "binaries")
30
+ exe = os.path.join(bin_dir, FNAME_PER_PLATFORM.get(plat, ""))
31
+ if exe and os.path.isfile(exe) and _is_valid_exe(exe):
32
+ return exe
33
+
34
+ # 3. Try binary from conda package
35
+ # (installed e.g. via `conda install ffmpeg -c conda-forge`)
36
+ if plat.startswith("win"):
37
+ exe = os.path.join(sys.prefix, "Library", "bin", "ffmpeg.exe")
38
+ else:
39
+ exe = os.path.join(sys.prefix, "bin", "ffmpeg")
40
+ if exe and os.path.isfile(exe) and _is_valid_exe(exe):
41
+ return exe
42
+
43
+ # 4. Try system ffmpeg command
44
+ exe = "ffmpeg"
45
+ if _is_valid_exe(exe):
46
+ return exe
47
+
48
+ # Nothing was found
49
+ raise RuntimeError(
50
+ "No ffmpeg exe could be found. Install ffmpeg on your system, "
51
+ "or set the IMAGEIO_FFMPEG_EXE environment variable."
52
+ )
53
+
54
+
55
+ def _popen_kwargs(prevent_sigint=False):
56
+ startupinfo = None
57
+ preexec_fn = None
58
+ creationflags = 0
59
+ if sys.platform.startswith("win"):
60
+ # Stops executable from flashing on Windows (see #22)
61
+ startupinfo = subprocess.STARTUPINFO()
62
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
63
+ if prevent_sigint:
64
+ # Prevent propagation of sigint (see #4)
65
+ # https://stackoverflow.com/questions/5045771
66
+ if sys.platform.startswith("win"):
67
+ creationflags = 0x00000200
68
+ else:
69
+ preexec_fn = os.setpgrp # the _pre_exec does not seem to work
70
+
71
+ falsy = ("", "0", "false", "no")
72
+ if os.getenv("IMAGEIO_FFMPEG_NO_PREVENT_SIGINT", "").lower() not in falsy:
73
+ # Unset preexec_fn to work around a strange hang on fork() (see #58)
74
+ preexec_fn = None
75
+
76
+ return {
77
+ "startupinfo": startupinfo,
78
+ "creationflags": creationflags,
79
+ "preexec_fn": preexec_fn,
80
+ }
81
+
82
+
83
+ def _is_valid_exe(exe):
84
+ cmd = [exe, "-version"]
85
+ try:
86
+ with open(os.devnull, "w") as null:
87
+ subprocess.check_call(
88
+ cmd, stdout=null, stderr=subprocess.STDOUT, **_popen_kwargs()
89
+ )
90
+ return True
91
+ except (OSError, ValueError, subprocess.CalledProcessError):
92
+ return False
93
+
94
+
95
+ def get_ffmpeg_version():
96
+ """
97
+ Get the version of the used ffmpeg executable (as a string).
98
+ """
99
+ exe = get_ffmpeg_exe()
100
+ line = subprocess.check_output([exe, "-version"], **_popen_kwargs()).split(
101
+ b"\n", 1
102
+ )[0]
103
+ line = line.decode(errors="ignore").strip()
104
+ version = line.split("version", 1)[-1].lstrip().split(" ", 1)[0].strip()
105
+ return version
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/LICENSE-3RD-PARTY.txt ADDED
The diff for this file is too large to render. See raw diff
 
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) Olli-Pekka Heinisuo
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/METADATA ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: opencv-python
3
+ Version: 4.9.0.80
4
+ Summary: Wrapper package for OpenCV python bindings.
5
+ Home-page: https://github.com/opencv/opencv-python
6
+ Maintainer: OpenCV Team
7
+ License: Apache 2.0
8
+ Platform: UNKNOWN
9
+ Classifier: Development Status :: 5 - Production/Stable
10
+ Classifier: Environment :: Console
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Education
13
+ Classifier: Intended Audience :: Information Technology
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: Apache Software License
16
+ Classifier: Operating System :: MacOS
17
+ Classifier: Operating System :: Microsoft :: Windows
18
+ Classifier: Operating System :: POSIX
19
+ Classifier: Operating System :: Unix
20
+ Classifier: Programming Language :: Python
21
+ Classifier: Programming Language :: Python :: 3
22
+ Classifier: Programming Language :: Python :: 3 :: Only
23
+ Classifier: Programming Language :: Python :: 3.6
24
+ Classifier: Programming Language :: Python :: 3.7
25
+ Classifier: Programming Language :: Python :: 3.8
26
+ Classifier: Programming Language :: Python :: 3.9
27
+ Classifier: Programming Language :: Python :: 3.10
28
+ Classifier: Programming Language :: Python :: 3.11
29
+ Classifier: Programming Language :: Python :: 3.12
30
+ Classifier: Programming Language :: C++
31
+ Classifier: Programming Language :: Python :: Implementation :: CPython
32
+ Classifier: Topic :: Scientific/Engineering
33
+ Classifier: Topic :: Scientific/Engineering :: Image Recognition
34
+ Classifier: Topic :: Software Development
35
+ Requires-Python: >=3.6
36
+ Description-Content-Type: text/markdown
37
+ License-File: LICENSE-3RD-PARTY.txt
38
+ License-File: LICENSE.txt
39
+ Requires-Dist: numpy >=1.13.3 ; python_version < "3.7"
40
+ Requires-Dist: numpy >=1.21.0 ; python_version <= "3.9" and platform_system == "Darwin" and platform_machine == "arm64"
41
+ Requires-Dist: numpy >=1.21.2 ; python_version >= "3.10"
42
+ Requires-Dist: numpy >=1.21.4 ; python_version >= "3.10" and platform_system == "Darwin"
43
+ Requires-Dist: numpy >=1.23.5 ; python_version >= "3.11"
44
+ Requires-Dist: numpy >=1.26.0 ; python_version >= "3.12"
45
+ Requires-Dist: numpy >=1.19.3 ; python_version >= "3.6" and platform_system == "Linux" and platform_machine == "aarch64"
46
+ Requires-Dist: numpy >=1.17.0 ; python_version >= "3.7"
47
+ Requires-Dist: numpy >=1.17.3 ; python_version >= "3.8"
48
+ Requires-Dist: numpy >=1.19.3 ; python_version >= "3.9"
49
+
50
+ [![Downloads](https://static.pepy.tech/badge/opencv-python)](http://pepy.tech/project/opencv-python)
51
+
52
+ ### Keep OpenCV Free
53
+
54
+ OpenCV is raising funds to keep the library free for everyone, and we need the support of the entire community to do it. [Donate to OpenCV on IndieGoGo](http://igg.me/at/opencv5) to show your support.
55
+
56
+ - [OpenCV on Wheels](#opencv-on-wheels)
57
+ - [Installation and Usage](#installation-and-usage)
58
+ - [Frequently Asked Questions](#frequently-asked-questions)
59
+ - [Documentation for opencv-python](#documentation-for-opencv-python)
60
+ - [CI build process](#ci-build-process)
61
+ - [Manual builds](#manual-builds)
62
+ - [Manual debug builds](#manual-debug-builds)
63
+ - [Source distributions](#source-distributions)
64
+ - [Licensing](#licensing)
65
+ - [Versioning](#versioning)
66
+ - [Releases](#releases)
67
+ - [Development builds](#development-builds)
68
+ - [Manylinux wheels](#manylinux-wheels)
69
+ - [Supported Python versions](#supported-python-versions)
70
+ - [Backward compatibility](#backward-compatibility)
71
+
72
+ ## OpenCV on Wheels
73
+
74
+ Pre-built CPU-only OpenCV packages for Python.
75
+
76
+ Check the manual build section if you wish to compile the bindings from source to enable additional modules such as CUDA.
77
+
78
+ ### Installation and Usage
79
+
80
+ 1. If you have previous/other manually installed (= not installed via ``pip``) version of OpenCV installed (e.g. cv2 module in the root of Python's site-packages), remove it before installation to avoid conflicts.
81
+ 2. Make sure that your `pip` version is up-to-date (19.3 is the minimum supported version): `pip install --upgrade pip`. Check version with `pip -V`. For example Linux distributions ship usually with very old `pip` versions which cause a lot of unexpected problems especially with the `manylinux` format.
82
+ 3. Select the correct package for your environment:
83
+
84
+ There are four different packages (see options 1, 2, 3 and 4 below) and you should **SELECT ONLY ONE OF THEM**. Do not install multiple different packages in the same environment. There is no plugin architecture: all the packages use the same namespace (`cv2`). If you installed multiple different packages in the same environment, uninstall them all with ``pip uninstall`` and reinstall only one package.
85
+
86
+ **a.** Packages for standard desktop environments (Windows, macOS, almost any GNU/Linux distribution)
87
+
88
+ - Option 1 - Main modules package: ``pip install opencv-python``
89
+ - Option 2 - Full package (contains both main modules and contrib/extra modules): ``pip install opencv-contrib-python`` (check contrib/extra modules listing from [OpenCV documentation](https://docs.opencv.org/master/))
90
+
91
+ **b.** Packages for server (headless) environments (such as Docker, cloud environments etc.), no GUI library dependencies
92
+
93
+ These packages are smaller than the two other packages above because they do not contain any GUI functionality (not compiled with Qt / other GUI components). This means that the packages avoid a heavy dependency chain to X11 libraries and you will have for example smaller Docker images as a result. You should always use these packages if you do not use `cv2.imshow` et al. or you are using some other package (such as PyQt) than OpenCV to create your GUI.
94
+
95
+ - Option 3 - Headless main modules package: ``pip install opencv-python-headless``
96
+ - Option 4 - Headless full package (contains both main modules and contrib/extra modules): ``pip install opencv-contrib-python-headless`` (check contrib/extra modules listing from [OpenCV documentation](https://docs.opencv.org/master/))
97
+
98
+ 4. Import the package:
99
+
100
+ ``import cv2``
101
+
102
+ All packages contain Haar cascade files. ``cv2.data.haarcascades`` can be used as a shortcut to the data folder. For example:
103
+
104
+ ``cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")``
105
+
106
+ 5. Read [OpenCV documentation](https://docs.opencv.org/master/)
107
+
108
+ 6. Before opening a new issue, read the FAQ below and have a look at the other issues which are already open.
109
+
110
+ Frequently Asked Questions
111
+ --------------------------
112
+
113
+ **Q: Do I need to install also OpenCV separately?**
114
+
115
+ A: No, the packages are special wheel binary packages and they already contain statically built OpenCV binaries.
116
+
117
+ **Q: Pip install fails with ``ModuleNotFoundError: No module named 'skbuild'``?**
118
+
119
+ Since ``opencv-python`` version 4.3.0.\*, ``manylinux1`` wheels were replaced by ``manylinux2014`` wheels. If your pip is too old, it will try to use the new source distribution introduced in 4.3.0.38 to manually build OpenCV because it does not know how to install ``manylinux2014`` wheels. However, source build will also fail because of too old ``pip`` because it does not understand build dependencies in ``pyproject.toml``. To use the new ``manylinux2014`` pre-built wheels (or to build from source), your ``pip`` version must be >= 19.3. Please upgrade ``pip`` with ``pip install --upgrade pip``.
120
+
121
+ **Q: Import fails on Windows: ``ImportError: DLL load failed: The specified module could not be found.``?**
122
+
123
+ A: If the import fails on Windows, make sure you have [Visual C++ redistributable 2015](https://www.microsoft.com/en-us/download/details.aspx?id=48145) installed. If you are using older Windows version than Windows 10 and latest system updates are not installed, [Universal C Runtime](https://support.microsoft.com/en-us/help/2999226/update-for-universal-c-runtime-in-windows) might be also required.
124
+
125
+ Windows N and KN editions do not include Media Feature Pack which is required by OpenCV. If you are using Windows N or KN edition, please install also [Windows Media Feature Pack](https://support.microsoft.com/en-us/help/3145500/media-feature-pack-list-for-windows-n-editions).
126
+
127
+ If you have Windows Server 2012+, media DLLs are probably missing too; please install the Feature called "Media Foundation" in the Server Manager. Beware, some posts advise to install "Windows Server Essentials Media Pack", but this one requires the "Windows Server Essentials Experience" role, and this role will deeply affect your Windows Server configuration (by enforcing active directory integration etc.); so just installing the "Media Foundation" should be a safer choice.
128
+
129
+ If the above does not help, check if you are using Anaconda. Old Anaconda versions have a bug which causes the error, see [this issue](https://github.com/opencv/opencv-python/issues/36) for a manual fix.
130
+
131
+ If you still encounter the error after you have checked all the previous solutions, download [Dependencies](https://github.com/lucasg/Dependencies) and open the ``cv2.pyd`` (located usually at ``C:\Users\username\AppData\Local\Programs\Python\PythonXX\Lib\site-packages\cv2``) file with it to debug missing DLL issues.
132
+
133
+ **Q: I have some other import errors?**
134
+
135
+ A: Make sure you have removed old manual installations of OpenCV Python bindings (cv2.so or cv2.pyd in site-packages).
136
+
137
+ **Q: Function foo() or method bar() returns wrong result, throws exception or crashes interpreter. What should I do?**
138
+
139
+ A: The repository contains only OpenCV-Python package build scripts, but not OpenCV itself. Python bindings for OpenCV are developed in official OpenCV repository and it's the best place to report issues. Also please check [OpenCV wiki](https://github.com/opencv/opencv/wiki) and [the official OpenCV forum](https://forum.opencv.org/) before file new bugs.
140
+
141
+ **Q: Why the packages do not include non-free algorithms?**
142
+
143
+ A: Non-free algorithms such as SURF are not included in these packages because they are patented / non-free and therefore cannot be distributed as built binaries. Note that SIFT is included in the builds due to patent expiration since OpenCV versions 4.3.0 and 3.4.10. See this issue for more info: https://github.com/skvark/opencv-python/issues/126
144
+
145
+ **Q: Why the package and import are different (opencv-python vs. cv2)?**
146
+
147
+ A: It's easier for users to understand ``opencv-python`` than ``cv2`` and it makes it easier to find the package with search engines. `cv2` (old interface in old OpenCV versions was named as `cv`) is the name that OpenCV developers chose when they created the binding generators. This is kept as the import name to be consistent with different kind of tutorials around the internet. Changing the import name or behaviour would be also confusing to experienced users who are accustomed to the ``import cv2``.
148
+
149
+ ## Documentation for opencv-python
150
+
151
+ [![Windows Build Status](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_windows.yml/badge.svg)](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_windows.yml)
152
+ [![(Linux Build status)](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_linux.yml/badge.svg)](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_linux.yml)
153
+ [![(Mac OS Build status)](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_macos.yml/badge.svg)](https://github.com/opencv/opencv-python/actions/workflows/build_wheels_macos.yml)
154
+
155
+ The aim of this repository is to provide means to package each new [OpenCV release](https://github.com/opencv/opencv/releases) for the most used Python versions and platforms.
156
+
157
+ ### CI build process
158
+
159
+ The project is structured like a normal Python package with a standard ``setup.py`` file.
160
+ The build process for a single entry in the build matrices is as follows (see for example `.github/workflows/build_wheels_linux.yml` file):
161
+
162
+ 0. In Linux and MacOS build: get OpenCV's optional C dependencies that we compile against
163
+
164
+ 1. Checkout repository and submodules
165
+
166
+ - OpenCV is included as submodule and the version is updated
167
+ manually by maintainers when a new OpenCV release has been made
168
+ - Contrib modules are also included as a submodule
169
+
170
+ 2. Find OpenCV version from the sources
171
+
172
+ 3. Build OpenCV
173
+
174
+ - tests are disabled, otherwise build time increases too much
175
+ - there are 4 build matrix entries for each build combination: with and without contrib modules, with and without GUI (headless)
176
+ - Linux builds run in manylinux Docker containers (CentOS 5)
177
+ - source distributions are separate entries in the build matrix
178
+
179
+ 4. Rearrange OpenCV's build result, add our custom files and generate wheel
180
+
181
+ 5. Linux and macOS wheels are transformed with auditwheel and delocate, correspondingly
182
+
183
+ 6. Install the generated wheel
184
+ 7. Test that Python can import the library and run some sanity checks
185
+ 8. Use twine to upload the generated wheel to PyPI (only in release builds)
186
+
187
+ Steps 1--4 are handled by ``pip wheel``.
188
+
189
+ The build can be customized with environment variables. In addition to any variables that OpenCV's build accepts, we recognize:
190
+
191
+ - ``CI_BUILD``. Set to ``1`` to emulate the CI environment build behaviour. Used only in CI builds to force certain build flags on in ``setup.py``. Do not use this unless you know what you are doing.
192
+ - ``ENABLE_CONTRIB`` and ``ENABLE_HEADLESS``. Set to ``1`` to build the contrib and/or headless version
193
+ - ``ENABLE_JAVA``, Set to ``1`` to enable the Java client build. This is disabled by default.
194
+ - ``CMAKE_ARGS``. Additional arguments for OpenCV's CMake invocation. You can use this to make a custom build.
195
+
196
+ See the next section for more info about manual builds outside the CI environment.
197
+
198
+ ### Manual builds
199
+
200
+ If some dependency is not enabled in the pre-built wheels, you can also run the build locally to create a custom wheel.
201
+
202
+ 1. Clone this repository: `git clone --recursive https://github.com/opencv/opencv-python.git`
203
+ 2. ``cd opencv-python``
204
+ - you can use `git` to checkout some other version of OpenCV in the `opencv` and `opencv_contrib` submodules if needed
205
+ 3. Add custom Cmake flags if needed, for example: `export CMAKE_ARGS="-DSOME_FLAG=ON -DSOME_OTHER_FLAG=OFF"` (in Windows you need to set environment variables differently depending on Command Line or PowerShell)
206
+ 4. Select the package flavor which you wish to build with `ENABLE_CONTRIB` and `ENABLE_HEADLESS`: i.e. `export ENABLE_CONTRIB=1` if you wish to build `opencv-contrib-python`
207
+ 5. Run ``pip wheel . --verbose``. NOTE: make sure you have the latest ``pip`` version, the ``pip wheel`` command replaces the old ``python setup.py bdist_wheel`` command which does not support ``pyproject.toml``.
208
+ - this might take anything from 5 minutes to over 2 hours depending on your hardware
209
+ 6. Pip will print fresh will location at the end of build procedure. If you use old approach with `setup.py` file wheel package will be placed in `dist` folder. Package is ready and you can do with that whatever you wish.
210
+ - Optional: on Linux use some of the `manylinux` images as a build hosts if maximum portability is needed and run `auditwheel` for the wheel after build
211
+ - Optional: on macOS use ``delocate`` (same as ``auditwheel`` but for macOS) for better portability
212
+
213
+ #### Manual debug builds
214
+
215
+ In order to build `opencv-python` in an unoptimized debug build, you need to side-step the normal process a bit.
216
+
217
+ 1. Install the packages `scikit-build` and `numpy` via pip.
218
+ 2. Run the command `python setup.py bdist_wheel --build-type=Debug`.
219
+ 3. Install the generated wheel file in the `dist/` folder with `pip install dist/wheelname.whl`.
220
+
221
+ If you would like the build produce all compiler commands, then the following combination of flags and environment variables has been tested to work on Linux:
222
+ ```
223
+ export CMAKE_ARGS='-DCMAKE_VERBOSE_MAKEFILE=ON'
224
+ export VERBOSE=1
225
+
226
+ python3 setup.py bdist_wheel --build-type=Debug
227
+ ```
228
+
229
+ See this issue for more discussion: https://github.com/opencv/opencv-python/issues/424
230
+
231
+ #### Source distributions
232
+
233
+ Since OpenCV version 4.3.0, also source distributions are provided in PyPI. This means that if your system is not compatible with any of the wheels in PyPI, ``pip`` will attempt to build OpenCV from sources. If you need a OpenCV version which is not available in PyPI as a source distribution, please follow the manual build guidance above instead of this one.
234
+
235
+ You can also force ``pip`` to build the wheels from the source distribution. Some examples:
236
+
237
+ - ``pip install --no-binary opencv-python opencv-python``
238
+ - ``pip install --no-binary :all: opencv-python``
239
+
240
+ If you need contrib modules or headless version, just change the package name (step 4 in the previous section is not needed). However, any additional CMake flags can be provided via environment variables as described in step 3 of the manual build section. If none are provided, OpenCV's CMake scripts will attempt to find and enable any suitable dependencies. Headless distributions have hard coded CMake flags which disable all possible GUI dependencies.
241
+
242
+ On slow systems such as Raspberry Pi the full build may take several hours. On a 8-core Ryzen 7 3700X the build takes about 6 minutes.
243
+
244
+ ### Licensing
245
+
246
+ Opencv-python package (scripts in this repository) is available under MIT license.
247
+
248
+ OpenCV itself is available under [Apache 2](https://github.com/opencv/opencv/blob/master/LICENSE) license.
249
+
250
+ Third party package licenses are at [LICENSE-3RD-PARTY.txt](https://github.com/opencv/opencv-python/blob/master/LICENSE-3RD-PARTY.txt).
251
+
252
+ All wheels ship with [FFmpeg](http://ffmpeg.org) licensed under the [LGPLv2.1](http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html).
253
+
254
+ Non-headless Linux wheels ship with [Qt 5](http://doc.qt.io/qt-5/lgpl.html) licensed under the [LGPLv3](http://www.gnu.org/licenses/lgpl-3.0.html).
255
+
256
+ The packages include also other binaries. Full list of licenses can be found from [LICENSE-3RD-PARTY.txt](https://github.com/opencv/opencv-python/blob/master/LICENSE-3RD-PARTY.txt).
257
+
258
+ ### Versioning
259
+
260
+ ``find_version.py`` script searches for the version information from OpenCV sources and appends also a revision number specific to this repository to the version string. It saves the version information to ``version.py`` file under ``cv2`` in addition to some other flags.
261
+
262
+ ### Releases
263
+
264
+ A release is made and uploaded to PyPI when a new tag is pushed to master branch. These tags differentiate packages (this repo might have modifications but OpenCV version stays same) and should be incremented sequentially. In practice, release version numbers look like this:
265
+
266
+ ``cv_major.cv_minor.cv_revision.package_revision`` e.g. ``3.1.0.0``
267
+
268
+ The master branch follows OpenCV master branch releases. 3.4 branch follows OpenCV 3.4 bugfix releases.
269
+
270
+ ### Development builds
271
+
272
+ Every commit to the master branch of this repo will be built. Possible build artifacts use local version identifiers:
273
+
274
+ ``cv_major.cv_minor.cv_revision+git_hash_of_this_repo`` e.g. ``3.1.0+14a8d39``
275
+
276
+ These artifacts can't be and will not be uploaded to PyPI.
277
+
278
+ ### Manylinux wheels
279
+
280
+ Linux wheels are built using [manylinux2014](https://github.com/pypa/manylinux). These wheels should work out of the box for most of the distros (which use GNU C standard library) out there since they are built against an old version of glibc.
281
+
282
+ The default ``manylinux2014`` images have been extended with some OpenCV dependencies. See [Docker folder](https://github.com/skvark/opencv-python/tree/master/docker) for more info.
283
+
284
+ ### Supported Python versions
285
+
286
+ Python 3.x compatible pre-built wheels are provided for the officially supported Python versions (not in EOL):
287
+
288
+ - 3.7
289
+ - 3.8
290
+ - 3.9
291
+ - 3.10
292
+ - 3.11
293
+ - 3.12
294
+
295
+ ### Backward compatibility
296
+
297
+ Starting from 4.2.0 and 3.4.9 builds the macOS Travis build environment was updated to XCode 9.4. The change effectively dropped support for older than 10.13 macOS versions.
298
+
299
+ Starting from 4.3.0 and 3.4.10 builds the Linux build environment was updated from `manylinux1` to `manylinux2014`. This dropped support for old Linux distributions.
300
+
301
+ Starting from version 4.7.0 the Mac OS GitHub Actions build environment was update to version 11. Mac OS 10.x support depricated. See https://github.com/actions/runner-images/issues/5583
302
+
303
+ Starting from version 4.9.0 the Mac OS GitHub Actions build environment was update to version 12. Mac OS 10.x support depricated by Brew and most of used packages.
304
+
305
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/RECORD ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cv2/Error/__init__.pyi,sha256=A6NKtoMeZAvZWHC6DrJiwMVChY7LLxFfvuZ2dW4KSm8,4076
2
+ cv2/LICENSE-3RD-PARTY.txt,sha256=ZKuaw_WkYDDW2Lvnl7aukYqNLQGG5HyiBLc0MbHrHMw,151157
3
+ cv2/LICENSE.txt,sha256=CdcZBY54Kse8cbohyUThE2zeK7lXwOiIEh8CGNa18Cw,1070
4
+ cv2/__init__.py,sha256=1tRL4PD5gcwFerwMFSN3tJbttHjR-mNVzgNfcR3JSyU,6575
5
+ cv2/__init__.pyi,sha256=evrJ7NbPFLgLGRiXV2F72kL9oerfKLFBEqD-P5wcfxg,293784
6
+ cv2/__pycache__/__init__.cpython-38.pyc,,
7
+ cv2/__pycache__/config-3.cpython-38.pyc,,
8
+ cv2/__pycache__/config.cpython-38.pyc,,
9
+ cv2/__pycache__/load_config_py2.cpython-38.pyc,,
10
+ cv2/__pycache__/load_config_py3.cpython-38.pyc,,
11
+ cv2/__pycache__/version.cpython-38.pyc,,
12
+ cv2/aruco/__init__.pyi,sha256=XOaNz4SbfQ0UFH8guZ9WgTybx8gekTOWr8452Yjz54E,13995
13
+ cv2/barcode/__init__.pyi,sha256=rUP5152HBCKUQ3WUqPl2zS5nI2w9v-AHaCeXYqPglxk,1029
14
+ cv2/config-3.py,sha256=mnqt9yS4IgAfXpY7Af1ON11F4su-Mo0sp7QqRAwIOhw,724
15
+ cv2/config.py,sha256=l04tQJbuGpqaNB3xvzPhaXNoO_GsczAG3if_LyO8WE0,111
16
+ cv2/cuda/__init__.pyi,sha256=gNkBAoEdrvkxwo4brAXNBCU_RDWixz575CWi2YEvYK4,16036
17
+ cv2/cv2.abi3.so,sha256=jbX0ZuzuXpODBp5u45ca4U4wutGrGaS8801TfBQmLFE,65162889
18
+ cv2/data/__init__.py,sha256=125Pcte_OtB55ZxjWg5ko8ugpnogZ1sRMyP48dtBCMw,70
19
+ cv2/data/__pycache__/__init__.cpython-38.pyc,,
20
+ cv2/data/haarcascade_eye.xml,sha256=ccxk_DBaNV3GAGeID2-71D3RVb1j7jhEZhob2jSy_Yw,341406
21
+ cv2/data/haarcascade_eye_tree_eyeglasses.xml,sha256=4y-cZ5NcM-nRMx6xT6WFVP8Xg1wDdCZjvLl6iS6Talc,601661
22
+ cv2/data/haarcascade_frontalcatface.xml,sha256=rCusk07yQoTviisunY5X7vhKwdaUO00R5cnoWE3Aacg,411388
23
+ cv2/data/haarcascade_frontalcatface_extended.xml,sha256=_9DR0o8H0DdsidtMmEUAnChVzHbIz_dj1TMdyTYdqFQ,382918
24
+ cv2/data/haarcascade_frontalface_alt.xml,sha256=YoHfE0Wcwhj_BH0Csq44WbEv8UqT_-iVL3sz-te5aXs,676709
25
+ cv2/data/haarcascade_frontalface_alt2.xml,sha256=ewyWfZq7373gJeuceGlH0VG2QmBA0HqPlWLtj9kHJLQ,540616
26
+ cv2/data/haarcascade_frontalface_alt_tree.xml,sha256=Dl7kfswTJp1U3XpV-LU3UhZ8Ulh3IId3MjiPsHigSAo,2689040
27
+ cv2/data/haarcascade_frontalface_default.xml,sha256=D31FJ4ROtRTUpJSOgi2pD7sWo0oLu7xq3GSYdHpar7A,930127
28
+ cv2/data/haarcascade_fullbody.xml,sha256=BBdFxx7vG1yGrvIk8XznWwQtMzFMyPZ1dCT4vYzTCqE,476827
29
+ cv2/data/haarcascade_lefteye_2splits.xml,sha256=dMMjx4yBR1_JFY-sv7hmuwzKBr5B9XHfR9SsjQH5zkw,195369
30
+ cv2/data/haarcascade_license_plate_rus_16stages.xml,sha256=TRxEv3obxOIE-iWwRu0Kz_1_cTzBP-KVi2l3Elxg3eo,47775
31
+ cv2/data/haarcascade_lowerbody.xml,sha256=HmluHHxmxDmuIpz_-IcfQgN8NX6eHgkKK1nrwfj_XLs,395322
32
+ cv2/data/haarcascade_profileface.xml,sha256=s5pKO-RVOdsUan_B0-dhopLBluuIQhGF5qYVswVeYS0,828514
33
+ cv2/data/haarcascade_righteye_2splits.xml,sha256=TPDXK-pzB-mvfrmdSsvhXXEBpnwi_Nz77v1pKtN893Y,196170
34
+ cv2/data/haarcascade_russian_plate_number.xml,sha256=gUy1lUaCr1cOWDYfnl-LW1E6QRJ3a7nsrO-fDkymwtc,75482
35
+ cv2/data/haarcascade_smile.xml,sha256=TKHzBOq9C1rjAYDIGstT4Walhn5b4Xsxa9PzLP34fYo,188506
36
+ cv2/data/haarcascade_upperbody.xml,sha256=cyirT9sVkvU9mNfqWxudkOAa9dlfISrzeMfrV5BIu18,785819
37
+ cv2/detail/__init__.pyi,sha256=FXndW6oxsE46hjgKBezLvqJ_iEAcOCmNOAZSpbSM_-8,22374
38
+ cv2/dnn/__init__.pyi,sha256=lmSN_5FJ1DrbXpIPe8iZijAK2LJDeVb-_3VOJswt6gc,22683
39
+ cv2/fisheye/__init__.pyi,sha256=Oo9DhbwF5E2wsousnSQ9m4z6p-dMQGSxsTsDoO1Ni8g,7815
40
+ cv2/flann/__init__.pyi,sha256=ZxYG07bhFyFRA2d1lbPmAm_KEknsTcE1_NNw_Ksz1HQ,2677
41
+ cv2/gapi/__init__.py,sha256=6WBAjfq1FCiRADgYXGAKITHdBB6t0_jZ8hkTU8Biz-M,10298
42
+ cv2/gapi/__init__.pyi,sha256=zCLTsHvmbiGmlDUXPWqOGdgFcj66_iw7FXiTr4Y91m0,14636
43
+ cv2/gapi/__pycache__/__init__.cpython-38.pyc,,
44
+ cv2/gapi/core/__init__.pyi,sha256=_3OM_ITOrZomn7gs4HM-DRk8ngbjWkdr26KrmH3t4ks,142
45
+ cv2/gapi/core/cpu/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
46
+ cv2/gapi/core/fluid/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
47
+ cv2/gapi/core/ocl/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
48
+ cv2/gapi/ie/__init__.pyi,sha256=rbOXOU39Wpt9Lhh1o1qr7Zj7qljqAu6aqoYsm4433yQ,1117
49
+ cv2/gapi/ie/detail/__init__.pyi,sha256=hGTS3yIiIq1B-djXgSQIPmeF7VDyeyucUuZOnd4O0OQ,269
50
+ cv2/gapi/imgproc/__init__.pyi,sha256=UUtPJcDK_UaE_TKN8K9Oz1TEChCQHDDB_eTI08mVXmU,71
51
+ cv2/gapi/imgproc/fluid/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
52
+ cv2/gapi/oak/__init__.pyi,sha256=Tb7YXytKxnBFZZ8qTqHSZsDEpRt2937NXtbOQK23Ksc,1734
53
+ cv2/gapi/onnx/__init__.pyi,sha256=XAQ4M2p7kcm0gSL_2OJkjoI8h5AzlHQh6xDQEX7z5e4,1344
54
+ cv2/gapi/onnx/ep/__init__.pyi,sha256=3a7nzkkajyZBbTM2FKOI2eGM3GKTNGId_ZNEhghdrkU,1239
55
+ cv2/gapi/ot/__init__.pyi,sha256=XTMT90lnElxl_KfhFi5xDwQWvB0g5N8tf7Cgb8VHcAY,720
56
+ cv2/gapi/ot/cpu/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
57
+ cv2/gapi/ov/__init__.pyi,sha256=3BqKzC_lV-wzhwu2cawCBvGbMG_zxt5D6anjhORXvuM,2647
58
+ cv2/gapi/own/__init__.pyi,sha256=GzL91pOQQNsGcBGmZ_XDAXaLoF4N9qVgj_IaYzduSNc,69
59
+ cv2/gapi/own/detail/__init__.pyi,sha256=sTC8JFcjDcVxnaFfFc-VmuxjHBg6RMzfafFHtS8yrFU,140
60
+ cv2/gapi/render/__init__.pyi,sha256=S4FWzy_CJqqs3dPYl3bXJoLQSGeVZdoBK7EmHvbPVOM,66
61
+ cv2/gapi/render/ocv/__init__.pyi,sha256=MfRTDEPtcQekGnrvoaSSadxyylXPfa2lz8ucAkzjmh8,93
62
+ cv2/gapi/streaming/__init__.pyi,sha256=qIOndKlPMevrSglTW-vVugzy_n7nITT6lr_zrlUv9cI,813
63
+ cv2/gapi/video/__init__.pyi,sha256=V0Emspufw7x2-knfd7kE8LnLjY_ujIz_TaxR_oIyAps,150
64
+ cv2/gapi/wip/__init__.pyi,sha256=f7mz60ehM9yrK0_Vt28NP--WietDE65EjM5O91LVx5M,1086
65
+ cv2/gapi/wip/draw/__init__.pyi,sha256=x2BhywI5C-uMHF1H6L9AwrgjRtKHFr032TOnqtE9a9Q,3162
66
+ cv2/gapi/wip/gst/__init__.pyi,sha256=8VtSKP9duTmY7ETAACwzVEWP9xdDW0pW82UtL_8Z7Aw,467
67
+ cv2/gapi/wip/onevpl/__init__.pyi,sha256=eLbVPey7JCU5YdRSUH6lLlD1eT-1s7YqZrQh6xNdIlo,397
68
+ cv2/ipp/__init__.pyi,sha256=WSHVIqIT97vmudtuJjhOJYiZ0iBdYx4AtB0iJqtdD0o,223
69
+ cv2/load_config_py2.py,sha256=xP_h2pObzfbN8tONV7CAQmGh94fQ-0t0HysrXDDlt_Q,151
70
+ cv2/load_config_py3.py,sha256=A9wfETdKZnybfbEN1SdtZAsMLVsueGa0zO93JzK9OFI,262
71
+ cv2/mat_wrapper/__init__.py,sha256=LGhjbGujJyyQ4A5WOOtOES5WbbbpmP-_aHUPQ2Vvnec,1099
72
+ cv2/mat_wrapper/__pycache__/__init__.cpython-38.pyc,,
73
+ cv2/misc/__init__.py,sha256=yr9PkxKslxRc87hhtIJRn5RommP9jaqksYr-ZDuj7cU,37
74
+ cv2/misc/__pycache__/__init__.cpython-38.pyc,,
75
+ cv2/misc/__pycache__/version.cpython-38.pyc,,
76
+ cv2/misc/version.py,sha256=iTExq1jwGgAv3jtYQHRI8pSpmfzPsjkG9brsH0bdYhk,90
77
+ cv2/ml/__init__.pyi,sha256=KGiSrNBU8YWqJzhV3owS_b_nKl_40EXwdGrmC1e41J4,22803
78
+ cv2/ocl/__init__.pyi,sha256=HpZMIJBr7LcDWOvXAlfDd5aDzMCyDH3OVUcNtU6Zkfw,5455
79
+ cv2/ogl/__init__.pyi,sha256=KxTX9DHYyXg2ipvOJiFeAsRivAjmvBkqeiLZV-0snII,1472
80
+ cv2/parallel/__init__.pyi,sha256=tc5nNoWrTkD7VAfhbajumKF79LBolpqlKjYX-lY2__8,129
81
+ cv2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
+ cv2/qt/fonts/DejaVuSans-Bold.ttf,sha256=5JIhu_F8A2EnQUMWn35vFthxX2XUnx2fIW6z1mFAAwg,672300
83
+ cv2/qt/fonts/DejaVuSans-BoldOblique.ttf,sha256=c1891zgbb04MpRm3JiDHWqlT7o7om4jL5POMo8I9ank,611212
84
+ cv2/qt/fonts/DejaVuSans-ExtraLight.ttf,sha256=kI1uyALygVXI3oYZK1p3qftBeS8HLgNSbwU2wjTz6aA,345204
85
+ cv2/qt/fonts/DejaVuSans-Oblique.ttf,sha256=O8nAL-_K3VF-WhWLLzQjPdNU1n9DAkhtiOhLykZ9HUM,611556
86
+ cv2/qt/fonts/DejaVuSans.ttf,sha256=FdotjxLmlQABscyCJcG6ct3OGTiDfTdwL_Ppv215vV4,720012
87
+ cv2/qt/fonts/DejaVuSansCondensed-Bold.ttf,sha256=-GxtQKUv_itA8Z0Llyykuc40f8BNz8TQtOkneocSwN0,631992
88
+ cv2/qt/fonts/DejaVuSansCondensed-BoldOblique.ttf,sha256=ZldpOhjs7-4mZ9nQ7LGrtoUk1ieZm9g2UDnBngS0I4E,580168
89
+ cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf,sha256=SPmU6BKEZmq0v4nvTXMIWwf65sLH4oggqyQ-mUHEgp4,576004
90
+ cv2/qt/fonts/DejaVuSansCondensed.ttf,sha256=afE1XJ7vCj0RpsBvPL8dRuq_2tzJk1iaO-k6RO2GeLQ,643852
91
+ cv2/qt/plugins/platforms/libqxcb.so,sha256=b2rHw7D7EZh7mHrUsrhrQYsQDiF0sFYEoPlJkSAFdjE,29305
92
+ cv2/samples/__init__.pyi,sha256=cjSW5vo2oMpIWHwP-3IY4hWjlKUTz8gd1MX7pLOCWKo,324
93
+ cv2/segmentation/__init__.pyi,sha256=jwKBUCRaXhHAM3FdzpLuGucGfNLWxWu5CDfLOpkcan4,1739
94
+ cv2/typing/__init__.py,sha256=HoaylcwVD3ZH1MQ0oTxswFmYExDxFkS7sAWW6BWOUg4,5137
95
+ cv2/typing/__pycache__/__init__.cpython-38.pyc,,
96
+ cv2/utils/__init__.py,sha256=fuw4GHHOXsxxKc-AadAEOKQq_I1Gr4G3yMlRvAbTP30,330
97
+ cv2/utils/__init__.pyi,sha256=q7PpnVUH597R_sF7AGrsRVDOIGKflT0b77ll-mkmb7g,3592
98
+ cv2/utils/__pycache__/__init__.cpython-38.pyc,,
99
+ cv2/utils/fs/__init__.pyi,sha256=lu2cK1Dbd7wRTOTju_kVVCvU4mNB5v5hSVpBxSXXvJg,87
100
+ cv2/utils/nested/__init__.pyi,sha256=n2J3aSxC2MrPKaKb4igY_d49luuuQqW7A_YTx6eZz9Q,573
101
+ cv2/version.py,sha256=zQjL6v-wBgcqGFq6XRAWaa39FmWEY4sLTKCZ3hptMlI,92
102
+ cv2/videoio_registry/__init__.pyi,sha256=h-7AlM3cFG5xxcPwZiVQ3n3ibe7BpGPlhgDcWOqZPA4,783
103
+ opencv_python-4.9.0.80.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
104
+ opencv_python-4.9.0.80.dist-info/LICENSE-3RD-PARTY.txt,sha256=ZKuaw_WkYDDW2Lvnl7aukYqNLQGG5HyiBLc0MbHrHMw,151157
105
+ opencv_python-4.9.0.80.dist-info/LICENSE.txt,sha256=CdcZBY54Kse8cbohyUThE2zeK7lXwOiIEh8CGNa18Cw,1070
106
+ opencv_python-4.9.0.80.dist-info/METADATA,sha256=4D_lTzaeG8QbRfiIFZ3bazaqGAHPb_09sMx-2sGJPeM,20251
107
+ opencv_python-4.9.0.80.dist-info/RECORD,,
108
+ opencv_python-4.9.0.80.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
109
+ opencv_python-4.9.0.80.dist-info/WHEEL,sha256=EAtJEfUIJ_UiIhsbDQlddQslMIo1TSnut_vtG8YV5KA,142
110
+ opencv_python-4.9.0.80.dist-info/top_level.txt,sha256=SY8vrf_sYOg99OP9euhz7q36pPy_2VK5vbeEWXwwSoc,4
111
+ opencv_python.libs/libQt5Core-39545cc7.so.5.15.0,sha256=8bqvorqRNim_5obrAiVWgcIehuvtrDQJoykWMJecCVs,7333489
112
+ opencv_python.libs/libQt5Gui-a7aedf18.so.5.15.0,sha256=_1PQrJUh1At_pGBnbLVhoKKObEU5CneH8jgSk5iyZOU,8856273
113
+ opencv_python.libs/libQt5Test-c38a5234.so.5.15.0,sha256=ul3nnDMWl-Rqf3wtgIlxvcyDaR9t5MjxlYSssH2rZpE,419025
114
+ opencv_python.libs/libQt5Widgets-e69d94fb.so.5.15.0,sha256=DHPfscZnGZ9rNQSDDdKfq0-96FuVq6V1xTLR7AuqGCY,8909633
115
+ opencv_python.libs/libQt5XcbQpa-5b2d853e.so.5.15.0,sha256=lDdFucfIAfgXK3dA-pYKsYiCEnr0Fas3BZ1v0Wd3OWo,1796105
116
+ opencv_python.libs/libX11-xcb-69166bdf.so.1.0.0,sha256=oIMXcY6MB8aPh8ogBGhuTdaxyFy06iw8-mTmO8o9WzU,8873
117
+ opencv_python.libs/libXau-00ec42fe.so.6.0.0,sha256=JjysEtjYterX3CORw1X-n8k5lA4eoi7ZjuVqjLYc5oQ,17049
118
+ opencv_python.libs/libavcodec-512f0acb.so.59.37.100,sha256=1rkzomPrXGOCDSOalbI613J191jX46XOmWT-kaRSr5E,13448513
119
+ opencv_python.libs/libavformat-3ff1be5b.so.59.27.100,sha256=xxzCzbu3y-zOVzReNrHi53HygUswkSyV4hd6FGpbK_E,2571489
120
+ opencv_python.libs/libavutil-a0a0531e.so.57.28.100,sha256=_HhiKqfwZH7fZ95HlYWD9p3ANOucUPLvqFPHvhxTq6Y,844673
121
+ opencv_python.libs/libcrypto-0c9efecc.so.1.1,sha256=aQ4FPG5fBY-U4QjCcI7gc2DQ5dGkokgE9N6ZXkugD9s,3481345
122
+ opencv_python.libs/libgfortran-91cc3cb1.so.3.0.0,sha256=VePrZzBsL_F-b4oIEOqg3LJulM2DkkxQZdUEDoeBRgg,1259665
123
+ opencv_python.libs/libopenblas-r0-f650aae0.3.3.so,sha256=eewCtT9XPNcRaonwTDl0cwGOf9oFcgs1TUNQXBnUeVg,37325001
124
+ opencv_python.libs/libpng16-7379b3c3.so.16.40.0,sha256=-ChEJT9joQEfhf4MZ0rIuU4N21x2VcTyBLh38q2m9ZA,1097009
125
+ opencv_python.libs/libquadmath-96973f99.so.0.0.0,sha256=k0wi3tDn0WnE1GeIdslgUa3z2UVF2pYvYLQWWbB12js,247609
126
+ opencv_python.libs/libssl-28bef1ac.so.1.1,sha256=3MoD1DoDL-u-QgZxgTrlZJpDvlPBflLEGO6RzVctbhE,736177
127
+ opencv_python.libs/libswresample-2ec4394e.so.4.7.100,sha256=53S-M_Gn06zoAaUbYkdaMuLvXEWu2Mv1_YLkiW2oJ9I,132417
128
+ opencv_python.libs/libswscale-2c3c8be7.so.6.7.100,sha256=Lp2HzwvDYmIHUUay0z4VqLo5jICmVQr3Z4uD1C1IXVA,619945
129
+ opencv_python.libs/libvpx-e947aa05.so.8.0.1,sha256=WmdWKHzsqUpZZiaf6-C28vIJJ9CwK_yMkaR_eAz274g,3291065
130
+ opencv_python.libs/libxcb-icccm-413c9f41.so.4.0.0,sha256=KrtUIHu46x9mIwMEkEYflhOFmYFjvUB3Ok1Dn9936eI,24377
131
+ opencv_python.libs/libxcb-image-e82a276d.so.0.0.0,sha256=QYC_KsToCXKQ2u87uOb2WJmK6Z-S4yynjqYWiI3stTY,25601
132
+ opencv_python.libs/libxcb-keysyms-21015570.so.1.0.0,sha256=PjX3WLcXNZucKONqtqBW4wPbmcaukPVyLPu2JCXZ7QQ,13209
133
+ opencv_python.libs/libxcb-randr-a96a5a87.so.0.1.0,sha256=LZmVHqS5soTrAUfIJ4cy0BKHrBk0Q8cy7IBJFbhsHvY,93921
134
+ opencv_python.libs/libxcb-render-637b984a.so.0.0.0,sha256=COOiubLk9Kv2S4wVA5QaRzgllJYpLLGXjYQAKM3hs2c,78105
135
+ opencv_python.libs/libxcb-render-util-43ce00f5.so.0.0.0,sha256=N0OPbas7C-jZx7kb3--foJiJPc5odPSj-hdma1yRG2E,22161
136
+ opencv_python.libs/libxcb-shape-25c2b258.so.0.0.0,sha256=8xHTe9DQmFzk-5HtT33th8bvgCroLJiEvXdAiN3i1io,21769
137
+ opencv_python.libs/libxcb-shm-7a199f70.so.0.0.0,sha256=XrF9nlIKkNrLG9HkXnn_XIeIHPwr20hRrTWETbzVGwE,21377
138
+ opencv_python.libs/libxcb-sync-89374f40.so.1.0.0,sha256=-w1wV0pfEQbSmW-QGzsRSADRNReahcQtlYgqIjKgHeE,35673
139
+ opencv_python.libs/libxcb-util-4d666913.so.1.0.0,sha256=44mg7PRdg-AK2vHz0GT1yzW0iN8d_GUFvhFGlrLtMo8,26281
140
+ opencv_python.libs/libxcb-xfixes-9be3ba6f.so.0.0.0,sha256=n5_94_1LwyIvg9S1I1dbu6a3ROBn28MQgT-maLnRtFM,45337
141
+ opencv_python.libs/libxcb-xinerama-ae147f87.so.0.0.0,sha256=iUXAB0Ox6t7vVAJOQEzTK4GVjW3AbnHOFsWyxml6RNo,17529
142
+ opencv_python.libs/libxcb-xkb-9ba31ab3.so.1.0.0,sha256=4toATK-D72nN4FjDv7ZCXjkMpU1Giroj5hr2ebVlOjk,157921
143
+ opencv_python.libs/libxkbcommon-71ae2972.so.0.0.0,sha256=H8s4pka9HOHar2gq0pty5lv99noGM1snj46Z0LdTAhI,269865
144
+ opencv_python.libs/libxkbcommon-x11-c65ed502.so.0.0.0,sha256=rW0xj4RCtgOJ1WRD7nOSLHXJdiSAPX0bafb8U52HQ4U,48105
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: skbuild 0.17.6
3
+ Root-Is-Purelib: false
4
+ Tag: cp37-abi3-manylinux_2_17_x86_64
5
+ Tag: cp37-abi3-manylinux2014_x86_64
6
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/opencv_python-4.9.0.80.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ cv2
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ # https://www.python.org/dev/peps/pep-0396/
4
+ __version__ = '0.4.8'
5
+
6
+ if sys.version_info[:2] < (2, 4):
7
+ raise RuntimeError('PyASN1 requires Python 2.4 or later')
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/debug.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # This file is part of pyasn1 software.
3
+ #
4
+ # Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
5
+ # License: http://snmplabs.com/pyasn1/license.html
6
+ #
7
+ import logging
8
+ import sys
9
+
10
+ from pyasn1 import __version__
11
+ from pyasn1 import error
12
+ from pyasn1.compat.octets import octs2ints
13
+
14
+ __all__ = ['Debug', 'setLogger', 'hexdump']
15
+
16
+ DEBUG_NONE = 0x0000
17
+ DEBUG_ENCODER = 0x0001
18
+ DEBUG_DECODER = 0x0002
19
+ DEBUG_ALL = 0xffff
20
+
21
+ FLAG_MAP = {
22
+ 'none': DEBUG_NONE,
23
+ 'encoder': DEBUG_ENCODER,
24
+ 'decoder': DEBUG_DECODER,
25
+ 'all': DEBUG_ALL
26
+ }
27
+
28
+ LOGGEE_MAP = {}
29
+
30
+
31
+ class Printer(object):
32
+ # noinspection PyShadowingNames
33
+ def __init__(self, logger=None, handler=None, formatter=None):
34
+ if logger is None:
35
+ logger = logging.getLogger('pyasn1')
36
+
37
+ logger.setLevel(logging.DEBUG)
38
+
39
+ if handler is None:
40
+ handler = logging.StreamHandler()
41
+
42
+ if formatter is None:
43
+ formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
44
+
45
+ handler.setFormatter(formatter)
46
+ handler.setLevel(logging.DEBUG)
47
+ logger.addHandler(handler)
48
+
49
+ self.__logger = logger
50
+
51
+ def __call__(self, msg):
52
+ self.__logger.debug(msg)
53
+
54
+ def __str__(self):
55
+ return '<python logging>'
56
+
57
+
58
+ if hasattr(logging, 'NullHandler'):
59
+ NullHandler = logging.NullHandler
60
+
61
+ else:
62
+ # Python 2.6 and older
63
+ class NullHandler(logging.Handler):
64
+ def emit(self, record):
65
+ pass
66
+
67
+
68
+ class Debug(object):
69
+ defaultPrinter = Printer()
70
+
71
+ def __init__(self, *flags, **options):
72
+ self._flags = DEBUG_NONE
73
+
74
+ if 'loggerName' in options:
75
+ # route our logs to parent logger
76
+ self._printer = Printer(
77
+ logger=logging.getLogger(options['loggerName']),
78
+ handler=NullHandler()
79
+ )
80
+
81
+ elif 'printer' in options:
82
+ self._printer = options.get('printer')
83
+
84
+ else:
85
+ self._printer = self.defaultPrinter
86
+
87
+ self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
88
+
89
+ for flag in flags:
90
+ inverse = flag and flag[0] in ('!', '~')
91
+ if inverse:
92
+ flag = flag[1:]
93
+ try:
94
+ if inverse:
95
+ self._flags &= ~FLAG_MAP[flag]
96
+ else:
97
+ self._flags |= FLAG_MAP[flag]
98
+ except KeyError:
99
+ raise error.PyAsn1Error('bad debug flag %s' % flag)
100
+
101
+ self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
102
+
103
+ def __str__(self):
104
+ return 'logger %s, flags %x' % (self._printer, self._flags)
105
+
106
+ def __call__(self, msg):
107
+ self._printer(msg)
108
+
109
+ def __and__(self, flag):
110
+ return self._flags & flag
111
+
112
+ def __rand__(self, flag):
113
+ return flag & self._flags
114
+
115
+ _LOG = DEBUG_NONE
116
+
117
+
118
+ def setLogger(userLogger):
119
+ global _LOG
120
+
121
+ if userLogger:
122
+ _LOG = userLogger
123
+ else:
124
+ _LOG = DEBUG_NONE
125
+
126
+ # Update registered logging clients
127
+ for module, (name, flags) in LOGGEE_MAP.items():
128
+ setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE)
129
+
130
+
131
+ def registerLoggee(module, name='LOG', flags=DEBUG_NONE):
132
+ LOGGEE_MAP[sys.modules[module]] = name, flags
133
+ setLogger(_LOG)
134
+ return _LOG
135
+
136
+
137
+ def hexdump(octets):
138
+ return ' '.join(
139
+ ['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
140
+ for n, x in zip(range(len(octets)), octs2ints(octets))]
141
+ )
142
+
143
+
144
+ class Scope(object):
145
+ def __init__(self):
146
+ self._list = []
147
+
148
+ def __str__(self): return '.'.join(self._list)
149
+
150
+ def push(self, token):
151
+ self._list.append(token)
152
+
153
+ def pop(self):
154
+ return self._list.pop()
155
+
156
+
157
+ scope = Scope()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyasn1/error.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # This file is part of pyasn1 software.
3
+ #
4
+ # Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
5
+ # License: http://snmplabs.com/pyasn1/license.html
6
+ #
7
+
8
+
9
+ class PyAsn1Error(Exception):
10
+ """Base pyasn1 exception
11
+
12
+ `PyAsn1Error` is the base exception class (based on
13
+ :class:`Exception`) that represents all possible ASN.1 related
14
+ errors.
15
+ """
16
+
17
+
18
+ class ValueConstraintError(PyAsn1Error):
19
+ """ASN.1 type constraints violation exception
20
+
21
+ The `ValueConstraintError` exception indicates an ASN.1 value
22
+ constraint violation.
23
+
24
+ It might happen on value object instantiation (for scalar types) or on
25
+ serialization (for constructed types).
26
+ """
27
+
28
+
29
+ class SubstrateUnderrunError(PyAsn1Error):
30
+ """ASN.1 data structure deserialization error
31
+
32
+ The `SubstrateUnderrunError` exception indicates insufficient serialised
33
+ data on input of a de-serialization codec.
34
+ """
35
+
36
+
37
+ class PyAsn1UnicodeError(PyAsn1Error, UnicodeError):
38
+ """Unicode text processing error
39
+
40
+ The `PyAsn1UnicodeError` exception is a base class for errors relating to
41
+ unicode text de/serialization.
42
+
43
+ Apart from inheriting from :class:`PyAsn1Error`, it also inherits from
44
+ :class:`UnicodeError` to help the caller catching unicode-related errors.
45
+ """
46
+ def __init__(self, message, unicode_error=None):
47
+ if isinstance(unicode_error, UnicodeError):
48
+ UnicodeError.__init__(self, *unicode_error.args)
49
+ PyAsn1Error.__init__(self, message)
50
+
51
+
52
+ class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError):
53
+ """Unicode text decoding error
54
+
55
+ The `PyAsn1UnicodeDecodeError` exception represents a failure to
56
+ deserialize unicode text.
57
+
58
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
59
+ from :class:`UnicodeDecodeError` to help the caller catching unicode-related
60
+ errors.
61
+ """
62
+
63
+
64
+ class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError):
65
+ """Unicode text encoding error
66
+
67
+ The `PyAsn1UnicodeEncodeError` exception represents a failure to
68
+ serialize unicode text.
69
+
70
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
71
+ from :class:`UnicodeEncodeError` to help the caller catching
72
+ unicode-related errors.
73
+ """
74
+
75
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/__init__.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ # Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
4
+ # Copyright (c) 2012-2020 The PyWavelets Developers
5
+ # <https://github.com/PyWavelets/pywt>
6
+ # See LICENSE for more details.
7
+
8
+ """
9
+ Discrete forward and inverse wavelet transform, stationary wavelet transform,
10
+ wavelet packets signal decomposition and reconstruction module.
11
+ """
12
+
13
+ from __future__ import division, print_function, absolute_import
14
+
15
+ from ._extensions._pywt import *
16
+ from ._functions import *
17
+ from ._multilevel import *
18
+ from ._multidim import *
19
+ from ._thresholding import *
20
+ from ._wavelet_packets import *
21
+ from ._dwt import *
22
+ from ._swt import *
23
+ from ._cwt import *
24
+ from ._mra import *
25
+
26
+ from . import data
27
+
28
+ __all__ = [s for s in dir() if not s.startswith('_')]
29
+ try:
30
+ # In Python 2.x the name of the tempvar leaks out of the list
31
+ # comprehension. Delete it to not make it show up in the main namespace.
32
+ del s
33
+ except NameError:
34
+ pass
35
+
36
+ from pywt.version import version as __version__
37
+
38
+ from ._pytesttester import PytestTester
39
+ test = PytestTester(__name__)
40
+ del PytestTester
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_c99_config.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Autogenerated file containing compile-time definitions
2
+
3
+ _have_c99_complex = 1
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_cwt.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import floor, ceil
2
+
3
+ from ._extensions._pywt import (DiscreteContinuousWavelet, ContinuousWavelet,
4
+ Wavelet, _check_dtype)
5
+ from ._functions import integrate_wavelet, scale2frequency
6
+
7
+
8
+ __all__ = ["cwt"]
9
+
10
+
11
+ import numpy as np
12
+
13
+ try:
14
+ # Prefer scipy.fft (new in SciPy 1.4)
15
+ import scipy.fft
16
+ fftmodule = scipy.fft
17
+ next_fast_len = fftmodule.next_fast_len
18
+ except ImportError:
19
+ try:
20
+ import scipy.fftpack
21
+ fftmodule = scipy.fftpack
22
+ next_fast_len = fftmodule.next_fast_len
23
+ except ImportError:
24
+ fftmodule = np.fft
25
+
26
+ # provide a fallback so scipy is an optional requirement
27
+ def next_fast_len(n):
28
+ """Round up size to the nearest power of two.
29
+
30
+ Given a number of samples `n`, returns the next power of two
31
+ following this number to take advantage of FFT speedup.
32
+ This fallback is less efficient than `scipy.fftpack.next_fast_len`
33
+ """
34
+ return 2**ceil(np.log2(n))
35
+
36
+
37
+ def cwt(data, scales, wavelet, sampling_period=1., method='conv', axis=-1):
38
+ """
39
+ cwt(data, scales, wavelet)
40
+
41
+ One dimensional Continuous Wavelet Transform.
42
+
43
+ Parameters
44
+ ----------
45
+ data : array_like
46
+ Input signal
47
+ scales : array_like
48
+ The wavelet scales to use. One can use
49
+ ``f = scale2frequency(wavelet, scale)/sampling_period`` to determine
50
+ what physical frequency, ``f``. Here, ``f`` is in hertz when the
51
+ ``sampling_period`` is given in seconds.
52
+ wavelet : Wavelet object or name
53
+ Wavelet to use
54
+ sampling_period : float
55
+ Sampling period for the frequencies output (optional).
56
+ The values computed for ``coefs`` are independent of the choice of
57
+ ``sampling_period`` (i.e. ``scales`` is not scaled by the sampling
58
+ period).
59
+ method : {'conv', 'fft'}, optional
60
+ The method used to compute the CWT. Can be any of:
61
+ - ``conv`` uses ``numpy.convolve``.
62
+ - ``fft`` uses frequency domain convolution.
63
+ - ``auto`` uses automatic selection based on an estimate of the
64
+ computational complexity at each scale.
65
+
66
+ The ``conv`` method complexity is ``O(len(scale) * len(data))``.
67
+ The ``fft`` method is ``O(N * log2(N))`` with
68
+ ``N = len(scale) + len(data) - 1``. It is well suited for large size
69
+ signals but slightly slower than ``conv`` on small ones.
70
+ axis: int, optional
71
+ Axis over which to compute the CWT. If not given, the last axis is
72
+ used.
73
+
74
+ Returns
75
+ -------
76
+ coefs : array_like
77
+ Continuous wavelet transform of the input signal for the given scales
78
+ and wavelet. The first axis of ``coefs`` corresponds to the scales.
79
+ The remaining axes match the shape of ``data``.
80
+ frequencies : array_like
81
+ If the unit of sampling period are seconds and given, than frequencies
82
+ are in hertz. Otherwise, a sampling period of 1 is assumed.
83
+
84
+ Notes
85
+ -----
86
+ Size of coefficients arrays depends on the length of the input array and
87
+ the length of given scales.
88
+
89
+ Examples
90
+ --------
91
+ >>> import pywt
92
+ >>> import numpy as np
93
+ >>> import matplotlib.pyplot as plt
94
+ >>> x = np.arange(512)
95
+ >>> y = np.sin(2*np.pi*x/32)
96
+ >>> coef, freqs=pywt.cwt(y,np.arange(1,129),'gaus1')
97
+ >>> plt.matshow(coef) # doctest: +SKIP
98
+ >>> plt.show() # doctest: +SKIP
99
+ ----------
100
+ >>> import pywt
101
+ >>> import numpy as np
102
+ >>> import matplotlib.pyplot as plt
103
+ >>> t = np.linspace(-1, 1, 200, endpoint=False)
104
+ >>> sig = np.cos(2 * np.pi * 7 * t) + np.real(np.exp(-7*(t-0.4)**2)*np.exp(1j*2*np.pi*2*(t-0.4)))
105
+ >>> widths = np.arange(1, 31)
106
+ >>> cwtmatr, freqs = pywt.cwt(sig, widths, 'mexh')
107
+ >>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
108
+ ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) # doctest: +SKIP
109
+ >>> plt.show() # doctest: +SKIP
110
+ """
111
+
112
+ # accept array_like input; make a copy to ensure a contiguous array
113
+ dt = _check_dtype(data)
114
+ data = np.asarray(data, dtype=dt)
115
+ dt_cplx = np.result_type(dt, np.complex64)
116
+ if not isinstance(wavelet, (ContinuousWavelet, Wavelet)):
117
+ wavelet = DiscreteContinuousWavelet(wavelet)
118
+ if np.isscalar(scales):
119
+ scales = np.array([scales])
120
+ if not np.isscalar(axis):
121
+ raise np.AxisError("axis must be a scalar.")
122
+
123
+ dt_out = dt_cplx if wavelet.complex_cwt else dt
124
+ out = np.empty((np.size(scales),) + data.shape, dtype=dt_out)
125
+ precision = 10
126
+ int_psi, x = integrate_wavelet(wavelet, precision=precision)
127
+ int_psi = np.conj(int_psi) if wavelet.complex_cwt else int_psi
128
+
129
+ # convert int_psi, x to the same precision as the data
130
+ dt_psi = dt_cplx if int_psi.dtype.kind == 'c' else dt
131
+ int_psi = np.asarray(int_psi, dtype=dt_psi)
132
+ x = np.asarray(x, dtype=data.real.dtype)
133
+
134
+ if method == 'fft':
135
+ size_scale0 = -1
136
+ fft_data = None
137
+ elif not method == 'conv':
138
+ raise ValueError("method must be 'conv' or 'fft'")
139
+
140
+ if data.ndim > 1:
141
+ # move axis to be transformed last (so it is contiguous)
142
+ data = data.swapaxes(-1, axis)
143
+
144
+ # reshape to (n_batch, data.shape[-1])
145
+ data_shape_pre = data.shape
146
+ data = data.reshape((-1, data.shape[-1]))
147
+
148
+ for i, scale in enumerate(scales):
149
+ step = x[1] - x[0]
150
+ j = np.arange(scale * (x[-1] - x[0]) + 1) / (scale * step)
151
+ j = j.astype(int) # floor
152
+ if j[-1] >= int_psi.size:
153
+ j = np.extract(j < int_psi.size, j)
154
+ int_psi_scale = int_psi[j][::-1]
155
+
156
+ if method == 'conv':
157
+ if data.ndim == 1:
158
+ conv = np.convolve(data, int_psi_scale)
159
+ else:
160
+ # batch convolution via loop
161
+ conv_shape = list(data.shape)
162
+ conv_shape[-1] += int_psi_scale.size - 1
163
+ conv_shape = tuple(conv_shape)
164
+ conv = np.empty(conv_shape, dtype=dt_out)
165
+ for n in range(data.shape[0]):
166
+ conv[n, :] = np.convolve(data[n], int_psi_scale)
167
+ else:
168
+ # The padding is selected for:
169
+ # - optimal FFT complexity
170
+ # - to be larger than the two signals length to avoid circular
171
+ # convolution
172
+ size_scale = next_fast_len(
173
+ data.shape[-1] + int_psi_scale.size - 1
174
+ )
175
+ if size_scale != size_scale0:
176
+ # Must recompute fft_data when the padding size changes.
177
+ fft_data = fftmodule.fft(data, size_scale, axis=-1)
178
+ size_scale0 = size_scale
179
+ fft_wav = fftmodule.fft(int_psi_scale, size_scale, axis=-1)
180
+ conv = fftmodule.ifft(fft_wav * fft_data, axis=-1)
181
+ conv = conv[..., :data.shape[-1] + int_psi_scale.size - 1]
182
+
183
+ coef = - np.sqrt(scale) * np.diff(conv, axis=-1)
184
+ if out.dtype.kind != 'c':
185
+ coef = coef.real
186
+ # transform axis is always -1 due to the data reshape above
187
+ d = (coef.shape[-1] - data.shape[-1]) / 2.
188
+ if d > 0:
189
+ coef = coef[..., floor(d):-ceil(d)]
190
+ elif d < 0:
191
+ raise ValueError(
192
+ "Selected scale of {} too small.".format(scale))
193
+ if data.ndim > 1:
194
+ # restore original data shape and axis position
195
+ coef = coef.reshape(data_shape_pre)
196
+ coef = coef.swapaxes(axis, -1)
197
+ out[i, ...] = coef
198
+
199
+ frequencies = scale2frequency(wavelet, scales, precision)
200
+ if np.isscalar(frequencies):
201
+ frequencies = np.array([frequencies])
202
+ frequencies /= sampling_period
203
+ return out, frequencies
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_doc_utils.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities used to generate various figures in the documentation."""
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ from matplotlib import pyplot as plt
6
+
7
+ from ._dwt import pad
8
+
9
+ __all__ = ['wavedec_keys', 'wavedec2_keys', 'draw_2d_wp_basis',
10
+ 'draw_2d_fswavedecn_basis', 'boundary_mode_subplot']
11
+
12
+
13
+ def wavedec_keys(level):
14
+ """Subband keys corresponding to a wavedec decomposition."""
15
+ approx = ''
16
+ coeffs = {}
17
+ for lev in range(level):
18
+ for k in ['a', 'd']:
19
+ coeffs[approx + k] = None
20
+ approx = 'a' * (lev + 1)
21
+ if lev < level - 1:
22
+ coeffs.pop(approx)
23
+ return list(coeffs.keys())
24
+
25
+
26
+ def wavedec2_keys(level):
27
+ """Subband keys corresponding to a wavedec2 decomposition."""
28
+ approx = ''
29
+ coeffs = {}
30
+ for lev in range(level):
31
+ for k in ['a', 'h', 'v', 'd']:
32
+ coeffs[approx + k] = None
33
+ approx = 'a' * (lev + 1)
34
+ if lev < level - 1:
35
+ coeffs.pop(approx)
36
+ return list(coeffs.keys())
37
+
38
+
39
+ def _box(bl, ur):
40
+ """(x, y) coordinates for the 4 lines making up a rectangular box.
41
+
42
+ Parameters
43
+ ==========
44
+ bl : float
45
+ The bottom left corner of the box
46
+ ur : float
47
+ The upper right corner of the box
48
+
49
+ Returns
50
+ =======
51
+ coords : 2-tuple
52
+ The first and second elements of the tuple are the x and y coordinates
53
+ of the box.
54
+ """
55
+ xl, xr = bl[0], ur[0]
56
+ yb, yt = bl[1], ur[1]
57
+ box_x = [xl, xr,
58
+ xr, xr,
59
+ xr, xl,
60
+ xl, xl]
61
+ box_y = [yb, yb,
62
+ yb, yt,
63
+ yt, yt,
64
+ yt, yb]
65
+ return (box_x, box_y)
66
+
67
+
68
+ def _2d_wp_basis_coords(shape, keys):
69
+ # Coordinates of the lines to be drawn by draw_2d_wp_basis
70
+ coords = []
71
+ centers = {} # retain center of boxes for use in labeling
72
+ for key in keys:
73
+ offset_x = offset_y = 0
74
+ for n, char in enumerate(key):
75
+ if char in ['h', 'd']:
76
+ offset_x += shape[0] // 2**(n + 1)
77
+ if char in ['v', 'd']:
78
+ offset_y += shape[1] // 2**(n + 1)
79
+ sx = shape[0] // 2**(n + 1)
80
+ sy = shape[1] // 2**(n + 1)
81
+ xc, yc = _box((offset_x, -offset_y),
82
+ (offset_x + sx, -offset_y - sy))
83
+ coords.append((xc, yc))
84
+ centers[key] = (offset_x + sx // 2, -offset_y - sy // 2)
85
+ return coords, centers
86
+
87
+
88
+ def draw_2d_wp_basis(shape, keys, fmt='k', plot_kwargs={}, ax=None,
89
+ label_levels=0):
90
+ """Plot a 2D representation of a WaveletPacket2D basis."""
91
+ coords, centers = _2d_wp_basis_coords(shape, keys)
92
+ if ax is None:
93
+ fig, ax = plt.subplots(1, 1)
94
+ else:
95
+ fig = ax.get_figure()
96
+ for coord in coords:
97
+ ax.plot(coord[0], coord[1], fmt)
98
+ ax.set_axis_off()
99
+ ax.axis('square')
100
+ if label_levels > 0:
101
+ for key, c in centers.items():
102
+ if len(key) <= label_levels:
103
+ ax.text(c[0], c[1], key,
104
+ horizontalalignment='center',
105
+ verticalalignment='center')
106
+ return fig, ax
107
+
108
+
109
+ def _2d_fswavedecn_coords(shape, levels):
110
+ coords = []
111
+ centers = {} # retain center of boxes for use in labeling
112
+ for key in product(wavedec_keys(levels), repeat=2):
113
+ (key0, key1) = key
114
+ offsets = [0, 0]
115
+ widths = list(shape)
116
+ for n0, char in enumerate(key0):
117
+ if char in ['d']:
118
+ offsets[0] += shape[0] // 2**(n0 + 1)
119
+ for n1, char in enumerate(key1):
120
+ if char in ['d']:
121
+ offsets[1] += shape[1] // 2**(n1 + 1)
122
+ widths[0] = shape[0] // 2**(n0 + 1)
123
+ widths[1] = shape[1] // 2**(n1 + 1)
124
+ xc, yc = _box((offsets[0], -offsets[1]),
125
+ (offsets[0] + widths[0], -offsets[1] - widths[1]))
126
+ coords.append((xc, yc))
127
+ centers[(key0, key1)] = (offsets[0] + widths[0] / 2,
128
+ -offsets[1] - widths[1] / 2)
129
+ return coords, centers
130
+
131
+
132
+ def draw_2d_fswavedecn_basis(shape, levels, fmt='k', plot_kwargs={}, ax=None,
133
+ label_levels=0):
134
+ """Plot a 2D representation of a WaveletPacket2D basis."""
135
+ coords, centers = _2d_fswavedecn_coords(shape, levels)
136
+ if ax is None:
137
+ fig, ax = plt.subplots(1, 1)
138
+ else:
139
+ fig = ax.get_figure()
140
+ for coord in coords:
141
+ ax.plot(coord[0], coord[1], fmt)
142
+ ax.set_axis_off()
143
+ ax.axis('square')
144
+ if label_levels > 0:
145
+ for key, c in centers.items():
146
+ lev = np.max([len(k) for k in key])
147
+ if lev <= label_levels:
148
+ ax.text(c[0], c[1], key,
149
+ horizontalalignment='center',
150
+ verticalalignment='center')
151
+ return fig, ax
152
+
153
+
154
+ def boundary_mode_subplot(x, mode, ax, symw=True):
155
+ """Plot an illustration of the boundary mode in a subplot axis."""
156
+
157
+ # if odd-length, periodization replicates the last sample to make it even
158
+ if mode == 'periodization' and len(x) % 2 == 1:
159
+ x = np.concatenate((x, (x[-1], )))
160
+
161
+ npad = 2 * len(x)
162
+ t = np.arange(len(x) + 2 * npad)
163
+ xp = pad(x, (npad, npad), mode=mode)
164
+
165
+ ax.plot(t, xp, 'k.')
166
+ ax.set_title(mode)
167
+
168
+ # plot the original signal in red
169
+ if mode == 'periodization':
170
+ ax.plot(t[npad:npad + len(x) - 1], x[:-1], 'r.')
171
+ else:
172
+ ax.plot(t[npad:npad + len(x)], x, 'r.')
173
+
174
+ # add vertical bars indicating points of symmetry or boundary extension
175
+ o2 = np.ones(2)
176
+ left = npad
177
+ if symw:
178
+ step = len(x) - 1
179
+ rng = range(-2, 4)
180
+ else:
181
+ left -= 0.5
182
+ step = len(x)
183
+ rng = range(-2, 4)
184
+ if mode in ['smooth', 'constant', 'zero']:
185
+ rng = range(0, 2)
186
+ for rep in rng:
187
+ ax.plot((left + rep * step) * o2, [xp.min() - .5, xp.max() + .5], 'k-')
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_dwt.py ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numbers import Number
2
+
3
+ import numpy as np
4
+
5
+ from ._c99_config import _have_c99_complex
6
+ from ._extensions._pywt import Wavelet, Modes, _check_dtype, wavelist
7
+ from ._extensions._dwt import (dwt_single, dwt_axis, idwt_single, idwt_axis,
8
+ upcoef as _upcoef, downcoef as _downcoef,
9
+ dwt_max_level as _dwt_max_level,
10
+ dwt_coeff_len as _dwt_coeff_len)
11
+ from ._utils import string_types, _as_wavelet
12
+
13
+
14
+ __all__ = ["dwt", "idwt", "downcoef", "upcoef", "dwt_max_level",
15
+ "dwt_coeff_len", "pad"]
16
+
17
+
18
+ def dwt_max_level(data_len, filter_len):
19
+ r"""
20
+ dwt_max_level(data_len, filter_len)
21
+
22
+ Compute the maximum useful level of decomposition.
23
+
24
+ Parameters
25
+ ----------
26
+ data_len : int
27
+ Input data length.
28
+ filter_len : int, str or Wavelet
29
+ The wavelet filter length. Alternatively, the name of a discrete
30
+ wavelet or a Wavelet object can be specified.
31
+
32
+ Returns
33
+ -------
34
+ max_level : int
35
+ Maximum level.
36
+
37
+ Notes
38
+ -----
39
+ The rational for the choice of levels is the maximum level where at least
40
+ one coefficient in the output is uncorrupted by edge effects caused by
41
+ signal extension. Put another way, decomposition stops when the signal
42
+ becomes shorter than the FIR filter length for a given wavelet. This
43
+ corresponds to:
44
+
45
+ .. max_level = floor(log2(data_len/(filter_len - 1)))
46
+
47
+ .. math::
48
+ \mathtt{max\_level} = \left\lfloor\log_2\left(\mathtt{
49
+ \frac{data\_len}{filter\_len - 1}}\right)\right\rfloor
50
+
51
+ Examples
52
+ --------
53
+ >>> import pywt
54
+ >>> w = pywt.Wavelet('sym5')
55
+ >>> pywt.dwt_max_level(data_len=1000, filter_len=w.dec_len)
56
+ 6
57
+ >>> pywt.dwt_max_level(1000, w)
58
+ 6
59
+ >>> pywt.dwt_max_level(1000, 'sym5')
60
+ 6
61
+ """
62
+ if isinstance(filter_len, Wavelet):
63
+ filter_len = filter_len.dec_len
64
+ elif isinstance(filter_len, string_types):
65
+ if filter_len in wavelist(kind='discrete'):
66
+ filter_len = Wavelet(filter_len).dec_len
67
+ else:
68
+ raise ValueError(
69
+ ("'{}', is not a recognized discrete wavelet. A list of "
70
+ "supported wavelet names can be obtained via "
71
+ "pywt.wavelist(kind='discrete')").format(filter_len))
72
+ elif not (isinstance(filter_len, Number) and filter_len % 1 == 0):
73
+ raise ValueError(
74
+ "filter_len must be an integer, discrete Wavelet object, or the "
75
+ "name of a discrete wavelet.")
76
+
77
+ if filter_len < 2:
78
+ raise ValueError("invalid wavelet filter length")
79
+
80
+ return _dwt_max_level(data_len, filter_len)
81
+
82
+
83
+ def dwt_coeff_len(data_len, filter_len, mode):
84
+ """
85
+ dwt_coeff_len(data_len, filter_len, mode='symmetric')
86
+
87
+ Returns length of dwt output for given data length, filter length and mode
88
+
89
+ Parameters
90
+ ----------
91
+ data_len : int
92
+ Data length.
93
+ filter_len : int
94
+ Filter length.
95
+ mode : str, optional
96
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
97
+
98
+ Returns
99
+ -------
100
+ len : int
101
+ Length of dwt output.
102
+
103
+ Notes
104
+ -----
105
+ For all modes except periodization::
106
+
107
+ len(cA) == len(cD) == floor((len(data) + wavelet.dec_len - 1) / 2)
108
+
109
+ for periodization mode ("per")::
110
+
111
+ len(cA) == len(cD) == ceil(len(data) / 2)
112
+
113
+ """
114
+ if isinstance(filter_len, Wavelet):
115
+ filter_len = filter_len.dec_len
116
+
117
+ return _dwt_coeff_len(data_len, filter_len, Modes.from_object(mode))
118
+
119
+
120
+ def dwt(data, wavelet, mode='symmetric', axis=-1):
121
+ """
122
+ dwt(data, wavelet, mode='symmetric', axis=-1)
123
+
124
+ Single level Discrete Wavelet Transform.
125
+
126
+ Parameters
127
+ ----------
128
+ data : array_like
129
+ Input signal
130
+ wavelet : Wavelet object or name
131
+ Wavelet to use
132
+ mode : str, optional
133
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
134
+ axis: int, optional
135
+ Axis over which to compute the DWT. If not given, the
136
+ last axis is used.
137
+
138
+ Returns
139
+ -------
140
+ (cA, cD) : tuple
141
+ Approximation and detail coefficients.
142
+
143
+ Notes
144
+ -----
145
+ Length of coefficients arrays depends on the selected mode.
146
+ For all modes except periodization:
147
+
148
+ ``len(cA) == len(cD) == floor((len(data) + wavelet.dec_len - 1) / 2)``
149
+
150
+ For periodization mode ("per"):
151
+
152
+ ``len(cA) == len(cD) == ceil(len(data) / 2)``
153
+
154
+ Examples
155
+ --------
156
+ >>> import pywt
157
+ >>> (cA, cD) = pywt.dwt([1, 2, 3, 4, 5, 6], 'db1')
158
+ >>> cA
159
+ array([ 2.12132034, 4.94974747, 7.77817459])
160
+ >>> cD
161
+ array([-0.70710678, -0.70710678, -0.70710678])
162
+
163
+ """
164
+ if not _have_c99_complex and np.iscomplexobj(data):
165
+ data = np.asarray(data)
166
+ cA_r, cD_r = dwt(data.real, wavelet, mode, axis)
167
+ cA_i, cD_i = dwt(data.imag, wavelet, mode, axis)
168
+ return (cA_r + 1j*cA_i, cD_r + 1j*cD_i)
169
+
170
+ # accept array_like input; make a copy to ensure a contiguous array
171
+ dt = _check_dtype(data)
172
+ data = np.asarray(data, dtype=dt, order='C')
173
+ mode = Modes.from_object(mode)
174
+ wavelet = _as_wavelet(wavelet)
175
+
176
+ if axis < 0:
177
+ axis = axis + data.ndim
178
+ if not 0 <= axis < data.ndim:
179
+ raise np.AxisError("Axis greater than data dimensions")
180
+
181
+ if data.ndim == 1:
182
+ cA, cD = dwt_single(data, wavelet, mode)
183
+ # TODO: Check whether this makes a copy
184
+ cA, cD = np.asarray(cA, dt), np.asarray(cD, dt)
185
+ else:
186
+ cA, cD = dwt_axis(data, wavelet, mode, axis=axis)
187
+
188
+ return (cA, cD)
189
+
190
+
191
+ def idwt(cA, cD, wavelet, mode='symmetric', axis=-1):
192
+ """
193
+ idwt(cA, cD, wavelet, mode='symmetric', axis=-1)
194
+
195
+ Single level Inverse Discrete Wavelet Transform.
196
+
197
+ Parameters
198
+ ----------
199
+ cA : array_like or None
200
+ Approximation coefficients. If None, will be set to array of zeros
201
+ with same shape as ``cD``.
202
+ cD : array_like or None
203
+ Detail coefficients. If None, will be set to array of zeros
204
+ with same shape as ``cA``.
205
+ wavelet : Wavelet object or name
206
+ Wavelet to use
207
+ mode : str, optional (default: 'symmetric')
208
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
209
+ axis: int, optional
210
+ Axis over which to compute the inverse DWT. If not given, the
211
+ last axis is used.
212
+
213
+ Returns
214
+ -------
215
+ rec: array_like
216
+ Single level reconstruction of signal from given coefficients.
217
+
218
+ Examples
219
+ --------
220
+ >>> import pywt
221
+ >>> (cA, cD) = pywt.dwt([1,2,3,4,5,6], 'db2', 'smooth')
222
+ >>> pywt.idwt(cA, cD, 'db2', 'smooth')
223
+ array([ 1., 2., 3., 4., 5., 6.])
224
+
225
+ One of the neat features of ``idwt`` is that one of the ``cA`` and ``cD``
226
+ arguments can be set to None. In that situation the reconstruction will be
227
+ performed using only the other one. Mathematically speaking, this is
228
+ equivalent to passing a zero-filled array as one of the arguments.
229
+
230
+ >>> (cA, cD) = pywt.dwt([1,2,3,4,5,6], 'db2', 'smooth')
231
+ >>> A = pywt.idwt(cA, None, 'db2', 'smooth')
232
+ >>> D = pywt.idwt(None, cD, 'db2', 'smooth')
233
+ >>> A + D
234
+ array([ 1., 2., 3., 4., 5., 6.])
235
+
236
+ """
237
+ # TODO: Lots of possible allocations to eliminate (zeros_like, asarray(rec))
238
+ # accept array_like input; make a copy to ensure a contiguous array
239
+
240
+ if cA is None and cD is None:
241
+ raise ValueError("At least one coefficient parameter must be "
242
+ "specified.")
243
+
244
+ # for complex inputs: compute real and imaginary separately then combine
245
+ if not _have_c99_complex and (np.iscomplexobj(cA) or np.iscomplexobj(cD)):
246
+ if cA is None:
247
+ cD = np.asarray(cD)
248
+ cA = np.zeros_like(cD)
249
+ elif cD is None:
250
+ cA = np.asarray(cA)
251
+ cD = np.zeros_like(cA)
252
+ return (idwt(cA.real, cD.real, wavelet, mode, axis) +
253
+ 1j*idwt(cA.imag, cD.imag, wavelet, mode, axis))
254
+
255
+ if cA is not None:
256
+ dt = _check_dtype(cA)
257
+ cA = np.asarray(cA, dtype=dt, order='C')
258
+ if cD is not None:
259
+ dt = _check_dtype(cD)
260
+ cD = np.asarray(cD, dtype=dt, order='C')
261
+
262
+ if cA is not None and cD is not None:
263
+ if cA.dtype != cD.dtype:
264
+ # need to upcast to common type
265
+ if cA.dtype.kind == 'c' or cD.dtype.kind == 'c':
266
+ dtype = np.complex128
267
+ else:
268
+ dtype = np.float64
269
+ cA = cA.astype(dtype)
270
+ cD = cD.astype(dtype)
271
+ elif cA is None:
272
+ cA = np.zeros_like(cD)
273
+ elif cD is None:
274
+ cD = np.zeros_like(cA)
275
+
276
+ # cA and cD should be same dimension by here
277
+ ndim = cA.ndim
278
+
279
+ mode = Modes.from_object(mode)
280
+ wavelet = _as_wavelet(wavelet)
281
+
282
+ if axis < 0:
283
+ axis = axis + ndim
284
+ if not 0 <= axis < ndim:
285
+ raise np.AxisError("Axis greater than coefficient dimensions")
286
+
287
+ if ndim == 1:
288
+ rec = idwt_single(cA, cD, wavelet, mode)
289
+ else:
290
+ rec = idwt_axis(cA, cD, wavelet, mode, axis=axis)
291
+
292
+ return rec
293
+
294
+
295
+ def downcoef(part, data, wavelet, mode='symmetric', level=1):
296
+ """
297
+ downcoef(part, data, wavelet, mode='symmetric', level=1)
298
+
299
+ Partial Discrete Wavelet Transform data decomposition.
300
+
301
+ Similar to ``pywt.dwt``, but computes only one set of coefficients.
302
+ Useful when you need only approximation or only details at the given level.
303
+
304
+ Parameters
305
+ ----------
306
+ part : str
307
+ Coefficients type:
308
+
309
+ * 'a' - approximations reconstruction is performed
310
+ * 'd' - details reconstruction is performed
311
+
312
+ data : array_like
313
+ Input signal.
314
+ wavelet : Wavelet object or name
315
+ Wavelet to use
316
+ mode : str, optional
317
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
318
+ level : int, optional
319
+ Decomposition level. Default is 1.
320
+
321
+ Returns
322
+ -------
323
+ coeffs : ndarray
324
+ 1-D array of coefficients.
325
+
326
+ See Also
327
+ --------
328
+ upcoef
329
+
330
+ """
331
+ if not _have_c99_complex and np.iscomplexobj(data):
332
+ return (downcoef(part, data.real, wavelet, mode, level) +
333
+ 1j*downcoef(part, data.imag, wavelet, mode, level))
334
+ # accept array_like input; make a copy to ensure a contiguous array
335
+ dt = _check_dtype(data)
336
+ data = np.asarray(data, dtype=dt, order='C')
337
+ if data.ndim > 1:
338
+ raise ValueError("downcoef only supports 1d data.")
339
+ if part not in 'ad':
340
+ raise ValueError("Argument 1 must be 'a' or 'd', not '%s'." % part)
341
+ mode = Modes.from_object(mode)
342
+ wavelet = _as_wavelet(wavelet)
343
+ return np.asarray(_downcoef(part == 'a', data, wavelet, mode, level))
344
+
345
+
346
+ def upcoef(part, coeffs, wavelet, level=1, take=0):
347
+ """
348
+ upcoef(part, coeffs, wavelet, level=1, take=0)
349
+
350
+ Direct reconstruction from coefficients.
351
+
352
+ Parameters
353
+ ----------
354
+ part : str
355
+ Coefficients type:
356
+ * 'a' - approximations reconstruction is performed
357
+ * 'd' - details reconstruction is performed
358
+ coeffs : array_like
359
+ Coefficients array to recontruct
360
+ wavelet : Wavelet object or name
361
+ Wavelet to use
362
+ level : int, optional
363
+ Multilevel reconstruction level. Default is 1.
364
+ take : int, optional
365
+ Take central part of length equal to 'take' from the result.
366
+ Default is 0.
367
+
368
+ Returns
369
+ -------
370
+ rec : ndarray
371
+ 1-D array with reconstructed data from coefficients.
372
+
373
+ See Also
374
+ --------
375
+ downcoef
376
+
377
+ Examples
378
+ --------
379
+ >>> import pywt
380
+ >>> data = [1,2,3,4,5,6]
381
+ >>> (cA, cD) = pywt.dwt(data, 'db2', 'smooth')
382
+ >>> pywt.upcoef('a', cA, 'db2') + pywt.upcoef('d', cD, 'db2')
383
+ array([-0.25 , -0.4330127 , 1. , 2. , 3. ,
384
+ 4. , 5. , 6. , 1.78589838, -1.03108891])
385
+ >>> n = len(data)
386
+ >>> pywt.upcoef('a', cA, 'db2', take=n) + pywt.upcoef('d', cD, 'db2', take=n)
387
+ array([ 1., 2., 3., 4., 5., 6.])
388
+
389
+ """
390
+ if not _have_c99_complex and np.iscomplexobj(coeffs):
391
+ return (upcoef(part, coeffs.real, wavelet, level, take) +
392
+ 1j*upcoef(part, coeffs.imag, wavelet, level, take))
393
+ # accept array_like input; make a copy to ensure a contiguous array
394
+ dt = _check_dtype(coeffs)
395
+ coeffs = np.asarray(coeffs, dtype=dt, order='C')
396
+ if coeffs.ndim > 1:
397
+ raise ValueError("upcoef only supports 1d coeffs.")
398
+ wavelet = _as_wavelet(wavelet)
399
+ if part not in 'ad':
400
+ raise ValueError("Argument 1 must be 'a' or 'd', not '%s'." % part)
401
+ return np.asarray(_upcoef(part == 'a', coeffs, wavelet, level, take))
402
+
403
+
404
+ def pad(x, pad_widths, mode):
405
+ """Extend a 1D signal using a given boundary mode.
406
+
407
+ This function operates like :func:`numpy.pad` but supports all signal
408
+ extension modes that can be used by PyWavelets discrete wavelet transforms.
409
+
410
+ Parameters
411
+ ----------
412
+ x : ndarray
413
+ The array to pad
414
+ pad_widths : {sequence, array_like, int}
415
+ Number of values padded to the edges of each axis.
416
+ ``((before_1, after_1), … (before_N, after_N))`` unique pad widths for
417
+ each axis. ``((before, after),)`` yields same before and after pad for
418
+ each axis. ``(pad,)`` or int is a shortcut for
419
+ ``before = after = pad width`` for all axes.
420
+ mode : str, optional
421
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
422
+
423
+ Returns
424
+ -------
425
+ pad : ndarray
426
+ Padded array of rank equal to array with shape increased according to
427
+ ``pad_widths``.
428
+
429
+ Notes
430
+ -----
431
+ The performance of padding in dimensions > 1 may be substantially slower
432
+ for modes ``'smooth'`` and ``'antisymmetric'`` as these modes are not
433
+ supported efficiently by the underlying :func:`numpy.pad` function.
434
+
435
+ Note that the behavior of the ``'constant'`` mode here follows the
436
+ PyWavelets convention which is different from NumPy (it is equivalent to
437
+ ``mode='edge'`` in :func:`numpy.pad`).
438
+ """
439
+ x = np.asanyarray(x)
440
+
441
+ # process pad_widths exactly as in numpy.pad
442
+ pad_widths = np.array(pad_widths)
443
+ pad_widths = np.round(pad_widths).astype(np.intp, copy=False)
444
+ if pad_widths.min() < 0:
445
+ raise ValueError("pad_widths must be > 0")
446
+ pad_widths = np.broadcast_to(pad_widths, (x.ndim, 2)).tolist()
447
+
448
+ if mode in ['symmetric', 'reflect']:
449
+ xp = np.pad(x, pad_widths, mode=mode)
450
+ elif mode in ['periodic', 'periodization']:
451
+ if mode == 'periodization':
452
+ # Promote odd-sized dimensions to even length by duplicating the
453
+ # last value.
454
+ edge_pad_widths = [(0, x.shape[ax] % 2)
455
+ for ax in range(x.ndim)]
456
+ x = np.pad(x, edge_pad_widths, mode='edge')
457
+ xp = np.pad(x, pad_widths, mode='wrap')
458
+ elif mode == 'zero':
459
+ xp = np.pad(x, pad_widths, mode='constant', constant_values=0)
460
+ elif mode == 'constant':
461
+ xp = np.pad(x, pad_widths, mode='edge')
462
+ elif mode == 'smooth':
463
+ def pad_smooth(vector, pad_width, iaxis, kwargs):
464
+ # smooth extension to left
465
+ left = vector[pad_width[0]]
466
+ slope_left = (left - vector[pad_width[0] + 1])
467
+ vector[:pad_width[0]] = \
468
+ left + np.arange(pad_width[0], 0, -1) * slope_left
469
+
470
+ # smooth extension to right
471
+ right = vector[-pad_width[1] - 1]
472
+ slope_right = (right - vector[-pad_width[1] - 2])
473
+ vector[-pad_width[1]:] = \
474
+ right + np.arange(1, pad_width[1] + 1) * slope_right
475
+ return vector
476
+ xp = np.pad(x, pad_widths, pad_smooth)
477
+ elif mode == 'antisymmetric':
478
+ def pad_antisymmetric(vector, pad_width, iaxis, kwargs):
479
+ # smooth extension to left
480
+ # implement by flipping portions symmetric padding
481
+ npad_l, npad_r = pad_width
482
+ vsize_nonpad = vector.size - npad_l - npad_r
483
+ # Note: must modify vector in-place
484
+ vector[:] = np.pad(vector[pad_width[0]:-pad_width[-1]],
485
+ pad_width, mode='symmetric')
486
+ vp = vector
487
+ r_edge = npad_l + vsize_nonpad - 1
488
+ l_edge = npad_l
489
+ # width of each reflected segment
490
+ seg_width = vsize_nonpad
491
+ # flip reflected segments on the right of the original signal
492
+ n = 1
493
+ while r_edge <= vp.size:
494
+ segment_slice = slice(r_edge + 1,
495
+ min(r_edge + 1 + seg_width, vp.size))
496
+ if n % 2:
497
+ vp[segment_slice] *= -1
498
+ r_edge += seg_width
499
+ n += 1
500
+
501
+ # flip reflected segments on the left of the original signal
502
+ n = 1
503
+ while l_edge >= 0:
504
+ segment_slice = slice(max(0, l_edge - seg_width), l_edge)
505
+ if n % 2:
506
+ vp[segment_slice] *= -1
507
+ l_edge -= seg_width
508
+ n += 1
509
+ return vector
510
+ xp = np.pad(x, pad_widths, pad_antisymmetric)
511
+ elif mode == 'antireflect':
512
+ xp = np.pad(x, pad_widths, mode='reflect', reflect_type='odd')
513
+ else:
514
+ raise ValueError(
515
+ ("unsupported mode: {}. The supported modes are {}").format(
516
+ mode, Modes.modes))
517
+ return xp
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_functions.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
2
+ # Copyright (c) 2012-2016 The PyWavelets Developers
3
+ # <https://github.com/PyWavelets/pywt>
4
+ # See COPYING for license details.
5
+
6
+ """
7
+ Other wavelet related functions.
8
+ """
9
+
10
+ from __future__ import division, print_function, absolute_import
11
+
12
+ import warnings
13
+
14
+ import numpy as np
15
+ from numpy.fft import fft
16
+
17
+ from ._extensions._pywt import DiscreteContinuousWavelet, Wavelet, ContinuousWavelet
18
+
19
+
20
+ __all__ = ["integrate_wavelet", "central_frequency", "scale2frequency", "qmf",
21
+ "orthogonal_filter_bank",
22
+ "intwave", "centrfrq", "scal2frq", "orthfilt"]
23
+
24
+
25
+ _DEPRECATION_MSG = ("`{old}` has been renamed to `{new}` and will "
26
+ "be removed in a future version of pywt.")
27
+
28
+
29
+ def _integrate(arr, step):
30
+ integral = np.cumsum(arr)
31
+ integral *= step
32
+ return integral
33
+
34
+
35
+ def intwave(*args, **kwargs):
36
+ msg = _DEPRECATION_MSG.format(old='intwave', new='integrate_wavelet')
37
+ warnings.warn(msg, DeprecationWarning)
38
+ return integrate_wavelet(*args, **kwargs)
39
+
40
+
41
+ def centrfrq(*args, **kwargs):
42
+ msg = _DEPRECATION_MSG.format(old='centrfrq', new='central_frequency')
43
+ warnings.warn(msg, DeprecationWarning)
44
+ return central_frequency(*args, **kwargs)
45
+
46
+
47
+ def scal2frq(*args, **kwargs):
48
+ msg = _DEPRECATION_MSG.format(old='scal2frq', new='scale2frequency')
49
+ warnings.warn(msg, DeprecationWarning)
50
+ return scale2frequency(*args, **kwargs)
51
+
52
+
53
+ def orthfilt(*args, **kwargs):
54
+ msg = _DEPRECATION_MSG.format(old='orthfilt', new='orthogonal_filter_bank')
55
+ warnings.warn(msg, DeprecationWarning)
56
+ return orthogonal_filter_bank(*args, **kwargs)
57
+
58
+
59
+ def integrate_wavelet(wavelet, precision=8):
60
+ """
61
+ Integrate `psi` wavelet function from -Inf to x using the rectangle
62
+ integration method.
63
+
64
+ Parameters
65
+ ----------
66
+ wavelet : Wavelet instance or str
67
+ Wavelet to integrate. If a string, should be the name of a wavelet.
68
+ precision : int, optional
69
+ Precision that will be used for wavelet function
70
+ approximation computed with the wavefun(level=precision)
71
+ Wavelet's method (default: 8).
72
+
73
+ Returns
74
+ -------
75
+ [int_psi, x] :
76
+ for orthogonal wavelets
77
+ [int_psi_d, int_psi_r, x] :
78
+ for other wavelets
79
+
80
+
81
+ Examples
82
+ --------
83
+ >>> from pywt import Wavelet, integrate_wavelet
84
+ >>> wavelet1 = Wavelet('db2')
85
+ >>> [int_psi, x] = integrate_wavelet(wavelet1, precision=5)
86
+ >>> wavelet2 = Wavelet('bior1.3')
87
+ >>> [int_psi_d, int_psi_r, x] = integrate_wavelet(wavelet2, precision=5)
88
+
89
+ """
90
+ # FIXME: this function should really use scipy.integrate.quad
91
+
92
+ if type(wavelet) in (tuple, list):
93
+ msg = ("Integration of a general signal is deprecated "
94
+ "and will be removed in a future version of pywt.")
95
+ warnings.warn(msg, DeprecationWarning)
96
+ elif not isinstance(wavelet, (Wavelet, ContinuousWavelet)):
97
+ wavelet = DiscreteContinuousWavelet(wavelet)
98
+
99
+ if type(wavelet) in (tuple, list):
100
+ psi, x = np.asarray(wavelet[0]), np.asarray(wavelet[1])
101
+ step = x[1] - x[0]
102
+ return _integrate(psi, step), x
103
+
104
+ functions_approximations = wavelet.wavefun(precision)
105
+
106
+ if len(functions_approximations) == 2: # continuous wavelet
107
+ psi, x = functions_approximations
108
+ step = x[1] - x[0]
109
+ return _integrate(psi, step), x
110
+
111
+ elif len(functions_approximations) == 3: # orthogonal wavelet
112
+ phi, psi, x = functions_approximations
113
+ step = x[1] - x[0]
114
+ return _integrate(psi, step), x
115
+
116
+ else: # biorthogonal wavelet
117
+ phi_d, psi_d, phi_r, psi_r, x = functions_approximations
118
+ step = x[1] - x[0]
119
+ return _integrate(psi_d, step), _integrate(psi_r, step), x
120
+
121
+
122
+ def central_frequency(wavelet, precision=8):
123
+ """
124
+ Computes the central frequency of the `psi` wavelet function.
125
+
126
+ Parameters
127
+ ----------
128
+ wavelet : Wavelet instance, str or tuple
129
+ Wavelet to integrate. If a string, should be the name of a wavelet.
130
+ precision : int, optional
131
+ Precision that will be used for wavelet function
132
+ approximation computed with the wavefun(level=precision)
133
+ Wavelet's method (default: 8).
134
+
135
+ Returns
136
+ -------
137
+ scalar
138
+
139
+ """
140
+
141
+ if not isinstance(wavelet, (Wavelet, ContinuousWavelet)):
142
+ wavelet = DiscreteContinuousWavelet(wavelet)
143
+
144
+ functions_approximations = wavelet.wavefun(precision)
145
+
146
+ if len(functions_approximations) == 2:
147
+ psi, x = functions_approximations
148
+ else:
149
+ # (psi, x) for (phi, psi, x)
150
+ # (psi_d, x) for (phi_d, psi_d, phi_r, psi_r, x)
151
+ psi, x = functions_approximations[1], functions_approximations[-1]
152
+
153
+ domain = float(x[-1] - x[0])
154
+ assert domain > 0
155
+
156
+ index = np.argmax(abs(fft(psi)[1:])) + 2
157
+ if index > len(psi) / 2:
158
+ index = len(psi) - index + 2
159
+
160
+ return 1.0 / (domain / (index - 1))
161
+
162
+
163
+ def scale2frequency(wavelet, scale, precision=8):
164
+ """
165
+
166
+ Parameters
167
+ ----------
168
+ wavelet : Wavelet instance or str
169
+ Wavelet to integrate. If a string, should be the name of a wavelet.
170
+ scale : scalar
171
+ precision : int, optional
172
+ Precision that will be used for wavelet function approximation computed
173
+ with ``wavelet.wavefun(level=precision)``. Default is 8.
174
+
175
+ Returns
176
+ -------
177
+ freq : scalar
178
+
179
+ """
180
+ return central_frequency(wavelet, precision=precision) / scale
181
+
182
+
183
+ def qmf(filt):
184
+ """
185
+ Returns the Quadrature Mirror Filter(QMF).
186
+
187
+ The magnitude response of QMF is mirror image about `pi/2` of that of the
188
+ input filter.
189
+
190
+ Parameters
191
+ ----------
192
+ filt : array_like
193
+ Input filter for which QMF needs to be computed.
194
+
195
+ Returns
196
+ -------
197
+ qm_filter : ndarray
198
+ Quadrature mirror of the input filter.
199
+
200
+ """
201
+ qm_filter = np.array(filt)[::-1]
202
+ qm_filter[1::2] = -qm_filter[1::2]
203
+ return qm_filter
204
+
205
+
206
+ def orthogonal_filter_bank(scaling_filter):
207
+ """
208
+ Returns the orthogonal filter bank.
209
+
210
+ The orthogonal filter bank consists of the HPFs and LPFs at
211
+ decomposition and reconstruction stage for the input scaling filter.
212
+
213
+ Parameters
214
+ ----------
215
+ scaling_filter : array_like
216
+ Input scaling filter (father wavelet).
217
+
218
+ Returns
219
+ -------
220
+ orth_filt_bank : tuple of 4 ndarrays
221
+ The orthogonal filter bank of the input scaling filter in the order :
222
+ 1] Decomposition LPF
223
+ 2] Decomposition HPF
224
+ 3] Reconstruction LPF
225
+ 4] Reconstruction HPF
226
+
227
+ """
228
+ if not (len(scaling_filter) % 2 == 0):
229
+ raise ValueError("`scaling_filter` length has to be even.")
230
+
231
+ scaling_filter = np.asarray(scaling_filter, dtype=np.float64)
232
+
233
+ rec_lo = np.sqrt(2) * scaling_filter / np.sum(scaling_filter)
234
+ dec_lo = rec_lo[::-1]
235
+
236
+ rec_hi = qmf(rec_lo)
237
+ dec_hi = rec_hi[::-1]
238
+
239
+ orth_filt_bank = (dec_lo, dec_hi, rec_lo, rec_hi)
240
+ return orth_filt_bank
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_mra.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial, reduce
2
+
3
+ import numpy as np
4
+
5
+ from ._multilevel import (_prep_axes_wavedecn, wavedec, wavedec2, wavedecn,
6
+ waverec, waverec2, waverecn)
7
+ from ._swt import iswt, iswt2, iswtn, swt, swt2, swt_max_level, swtn
8
+ from ._utils import _modes_per_axis, _wavelets_per_axis
9
+
10
+ __all__ = ["mra", "mra2", "mran", "imra", "imra2", "imran"]
11
+
12
+
13
+ def mra(data, wavelet, level=None, axis=-1, transform='swt',
14
+ mode='periodization'):
15
+ """Forward 1D multiresolution analysis.
16
+
17
+ It is a projection onto the wavelet subspaces.
18
+
19
+ Parameters
20
+ ----------
21
+ data: array_like
22
+ Input data
23
+ wavelet : Wavelet object or name string
24
+ Wavelet to use
25
+ level : int, optional
26
+ Decomposition level (must be >= 0). If level is None (default) then it
27
+ will be calculated using the `dwt_max_level` function.
28
+ axis: int, optional
29
+ Axis over which to compute the DWT. If not given, the last axis is
30
+ used. Currently only available when ``transform='dwt'``.
31
+ transform : {'dwt', 'swt'}
32
+ Whether to use the DWT or SWT for the transforms.
33
+ mode : str, optional
34
+ Signal extension mode, see `Modes` (default: 'symmetric'). This option
35
+ is only used when transform='dwt'.
36
+
37
+ Returns
38
+ -------
39
+ [cAn, {details_level_n}, ... {details_level_1}] : list
40
+ For more information, see the detailed description in `wavedec`
41
+
42
+ See Also
43
+ --------
44
+ imra, swt
45
+
46
+ Notes
47
+ -----
48
+ This is sometimes referred to as an additive decomposition because the
49
+ inverse transform (``imra``) is just the sum of the coefficient arrays
50
+ [1]_. The decomposition using ``transform='dwt'`` corresponds to section
51
+ 2.2 while that using an undecimated transform (``transform='swt'``) is
52
+ described in section 3.2 and appendix A.
53
+
54
+ This transform does not share the variance partition property of ``swt``
55
+ with `norm=True`. It does however, result in coefficients that are
56
+ temporally aligned regardless of the symmetry of the wavelet used.
57
+
58
+ The redundancy of this transform is ``(level + 1)``.
59
+
60
+ References
61
+ ----------
62
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
63
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
64
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
65
+ https://doi.org/10.2307/2965551
66
+
67
+ """
68
+ if transform == 'swt':
69
+ if mode != 'periodization':
70
+ raise ValueError(
71
+ "transform swt only supports mode='periodization'")
72
+ kwargs = dict(wavelet=wavelet, axis=axis, norm=True)
73
+ forward = partial(swt, level=level, trim_approx=True, **kwargs)
74
+ inverse = partial(iswt, **kwargs)
75
+ is_swt = True
76
+ elif transform == 'dwt':
77
+ kwargs = dict(wavelet=wavelet, mode=mode, axis=axis)
78
+ forward = partial(wavedec, level=level, **kwargs)
79
+ inverse = partial(waverec, **kwargs)
80
+ is_swt = False
81
+ else:
82
+ raise ValueError("unrecognized transform: {}".format(transform))
83
+
84
+ wav_coeffs = forward(data)
85
+
86
+ mra_coeffs = []
87
+ nc = len(wav_coeffs)
88
+
89
+ if is_swt:
90
+ # replicate same zeros array to save memory
91
+ z = np.zeros_like(wav_coeffs[0])
92
+ tmp = [z, ] * nc
93
+ else:
94
+ # zero arrays have variable size in DWT case
95
+ tmp = [np.zeros_like(c) for c in wav_coeffs]
96
+
97
+ for j in range(nc):
98
+ # tmp has arrays of zeros except for the jth entry
99
+ tmp[j] = wav_coeffs[j]
100
+
101
+ # reconstruct
102
+ rec = inverse(tmp)
103
+ if rec.shape != data.shape:
104
+ # trim any excess coefficients
105
+ rec = rec[tuple([slice(sz) for sz in data.shape])]
106
+ mra_coeffs.append(rec)
107
+
108
+ # restore zeros
109
+ if is_swt:
110
+ tmp[j] = z
111
+ else:
112
+ tmp[j] = np.zeros_like(tmp[j])
113
+ return mra_coeffs
114
+
115
+
116
+ def imra(mra_coeffs):
117
+ """Inverse 1D multiresolution analysis via summation.
118
+
119
+ Parameters
120
+ ----------
121
+ mra_coeffs : list of ndarray
122
+ Multiresolution analysis coefficients as returned by `mra`.
123
+
124
+ Returns
125
+ -------
126
+ rec : ndarray
127
+ The reconstructed signal.
128
+
129
+ See Also
130
+ --------
131
+ mra
132
+
133
+ References
134
+ ----------
135
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
136
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
137
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
138
+ https://doi.org/10.2307/2965551
139
+ """
140
+ return reduce(lambda x, y: x + y, mra_coeffs)
141
+
142
+
143
+ def mra2(data, wavelet, level=None, axes=(-2, -1), transform='swt2',
144
+ mode='periodization'):
145
+ """Forward 2D multiresolution analysis.
146
+
147
+ It is a projection onto wavelet subspaces.
148
+
149
+ Parameters
150
+ ----------
151
+ data: array_like
152
+ Input data
153
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
154
+ Wavelet to use. This can also be a tuple containing a wavelet to
155
+ apply along each axis in `axes`.
156
+ level : int, optional
157
+ Decomposition level (must be >= 0). If level is None (default) then it
158
+ will be calculated using the `dwt_max_level` function.
159
+ axes : 2-tuple of ints, optional
160
+ Axes over which to compute the DWT. Repeated elements are not allowed.
161
+ Currently only available when ``transform='dwt2'``.
162
+ transform : {'dwt2', 'swt2'}
163
+ Whether to use the DWT or SWT for the transforms.
164
+ mode : str or 2-tuple of str, optional
165
+ Signal extension mode, see `Modes` (default: 'symmetric'). This option
166
+ is only used when transform='dwt2'.
167
+
168
+ Returns
169
+ -------
170
+ coeffs : list
171
+ For more information, see the detailed description in `wavedec2`
172
+
173
+ Notes
174
+ -----
175
+ This is sometimes referred to as an additive decomposition because the
176
+ inverse transform (``imra2``) is just the sum of the coefficient arrays
177
+ [1]_. The decomposition using ``transform='dwt'`` corresponds to section
178
+ 2.2 while that using an undecimated transform (``transform='swt'``) is
179
+ described in section 3.2 and appendix A.
180
+
181
+ This transform does not share the variance partition property of ``swt2``
182
+ with `norm=True`. It does however, result in coefficients that are
183
+ temporally aligned regardless of the symmetry of the wavelet used.
184
+
185
+ The redundancy of this transform is ``3 * level + 1``.
186
+
187
+ See Also
188
+ --------
189
+ imra2, swt2
190
+
191
+ References
192
+ ----------
193
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
194
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
195
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
196
+ https://doi.org/10.2307/2965551
197
+ """
198
+ if transform == 'swt2':
199
+ if mode != 'periodization':
200
+ raise ValueError(
201
+ "transform swt only supports mode='periodization'")
202
+ if level is None:
203
+ level = min(swt_max_level(s) for s in data.shape)
204
+ kwargs = dict(wavelet=wavelet, axes=axes, norm=True)
205
+ forward = partial(swt2, level=level, trim_approx=True, **kwargs)
206
+ inverse = partial(iswt2, **kwargs)
207
+ elif transform == 'dwt2':
208
+ kwargs = dict(wavelet=wavelet, mode=mode, axes=axes)
209
+ forward = partial(wavedec2, level=level, **kwargs)
210
+ inverse = partial(waverec2, **kwargs)
211
+ else:
212
+ raise ValueError("unrecognized transform: {}".format(transform))
213
+
214
+ wav_coeffs = forward(data)
215
+
216
+ mra_coeffs = []
217
+ nc = len(wav_coeffs)
218
+ z = np.zeros_like(wav_coeffs[0])
219
+ tmp = [z]
220
+ for j in range(1, nc):
221
+ tmp.append([np.zeros_like(c) for c in wav_coeffs[j]])
222
+
223
+ # tmp has arrays of zeros except for the jth entry
224
+ tmp[0] = wav_coeffs[0]
225
+ # reconstruct
226
+ rec = inverse(tmp)
227
+ if rec.shape != data.shape:
228
+ # trim any excess coefficients
229
+ rec = rec[tuple([slice(sz) for sz in data.shape])]
230
+ mra_coeffs.append(rec)
231
+ # restore zeros
232
+ tmp[0] = z
233
+
234
+ for j in range(1, nc):
235
+ dcoeffs = []
236
+ for n in range(3):
237
+ # tmp has arrays of zeros except for the jth entry
238
+ z = tmp[j][n]
239
+ tmp[j][n] = wav_coeffs[j][n]
240
+ # reconstruct
241
+ rec = inverse(tmp)
242
+ if rec.shape != data.shape:
243
+ # trim any excess coefficients
244
+ rec = rec[tuple([slice(sz) for sz in data.shape])]
245
+ dcoeffs.append(rec)
246
+ # restore zeros
247
+ tmp[j][n] = z
248
+ mra_coeffs.append(tuple(dcoeffs))
249
+ return mra_coeffs
250
+
251
+
252
+ def imra2(mra_coeffs):
253
+ """Inverse 2D multiresolution analysis via summation.
254
+
255
+ Parameters
256
+ ----------
257
+ mra_coeffs : list
258
+ Multiresolution analysis coefficients as returned by `mra2`.
259
+
260
+ Returns
261
+ -------
262
+ rec : ndarray
263
+ The reconstructed signal.
264
+
265
+ See Also
266
+ --------
267
+ mra2
268
+
269
+ References
270
+ ----------
271
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
272
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
273
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
274
+ https://doi.org/10.2307/2965551
275
+ """
276
+ rec = mra_coeffs[0]
277
+ for j in range(1, len(mra_coeffs)):
278
+ for n in range(3):
279
+ rec += mra_coeffs[j][n]
280
+ return rec
281
+
282
+
283
+ def mran(data, wavelet, level=None, axes=None, transform='swtn',
284
+ mode='periodization'):
285
+ """Forward nD multiresolution analysis.
286
+
287
+ It is a projection onto the wavelet subspaces.
288
+
289
+ Parameters
290
+ ----------
291
+ data: array_like
292
+ Input data
293
+ wavelet : Wavelet object or name string, or tuple of wavelets
294
+ Wavelet to use. This can also be a tuple containing a wavelet to
295
+ apply along each axis in `axes`.
296
+ level : int, optional
297
+ Decomposition level (must be >= 0). If level is None (default) then it
298
+ will be calculated using the `dwt_max_level` function.
299
+ axes : tuple of ints, optional
300
+ Axes over which to compute the DWT. Repeated elements are not allowed.
301
+ transform : {'dwtn', 'swtn'}
302
+ Whether to use the DWT or SWT for the transforms.
303
+ mode : str or tuple of str, optional
304
+ Signal extension mode, see `Modes` (default: 'symmetric'). This option
305
+ is only used when transform='dwtn'.
306
+
307
+ Returns
308
+ -------
309
+ coeffs : list
310
+ For more information, see the detailed description in `wavedecn`.
311
+
312
+ See Also
313
+ --------
314
+ imran, swtn
315
+
316
+ Notes
317
+ -----
318
+ This is sometimes referred to as an additive decomposition because the
319
+ inverse transform (``imran``) is just the sum of the coefficient arrays
320
+ [1]_. The decomposition using ``transform='dwt'`` corresponds to section
321
+ 2.2 while that using an undecimated transform (``transform='swt'``) is
322
+ described in section 3.2 and appendix A.
323
+
324
+ This transform does not share the variance partition property of ``swtn``
325
+ with `norm=True`. It does however, result in coefficients that are
326
+ temporally aligned regardless of the symmetry of the wavelet used.
327
+
328
+ The redundancy of this transform is ``(2**n - 1) * level + 1`` where ``n``
329
+ corresponds to the number of axes transformed.
330
+
331
+ References
332
+ ----------
333
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
334
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
335
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
336
+ https://doi.org/10.2307/2965551
337
+ """
338
+ axes, axes_shapes, ndim_transform = _prep_axes_wavedecn(data.shape, axes)
339
+ wavelets = _wavelets_per_axis(wavelet, axes)
340
+
341
+ if transform == 'swtn':
342
+ if mode != 'periodization':
343
+ raise ValueError(
344
+ "transform swt only supports mode='periodization'")
345
+ if level is None:
346
+ level = min(swt_max_level(s) for s in data.shape)
347
+ kwargs = dict(wavelet=wavelets, axes=axes, norm=True)
348
+ forward = partial(swtn, level=level, trim_approx=True, **kwargs)
349
+ inverse = partial(iswtn, **kwargs)
350
+ elif transform == 'dwtn':
351
+ modes = _modes_per_axis(mode, axes)
352
+ kwargs = dict(wavelet=wavelets, mode=modes, axes=axes)
353
+ forward = partial(wavedecn, level=level, **kwargs)
354
+ inverse = partial(waverecn, **kwargs)
355
+ else:
356
+ raise ValueError("unrecognized transform: {}".format(transform))
357
+
358
+ wav_coeffs = forward(data)
359
+
360
+ mra_coeffs = []
361
+ nc = len(wav_coeffs)
362
+ z = np.zeros_like(wav_coeffs[0])
363
+ tmp = [z]
364
+ for j in range(1, nc):
365
+ tmp.append({k: np.zeros_like(v) for k, v in wav_coeffs[j].items()})
366
+
367
+ # tmp has arrays of zeros except for the jth entry
368
+ tmp[0] = wav_coeffs[0]
369
+ # reconstruct
370
+ rec = inverse(tmp)
371
+ if rec.shape != data.shape:
372
+ # trim any excess coefficients
373
+ rec = rec[tuple([slice(sz) for sz in data.shape])]
374
+ mra_coeffs.append(rec)
375
+ # restore zeros
376
+ tmp[0] = z
377
+
378
+ for j in range(1, nc):
379
+ dcoeffs = {}
380
+ dkeys = list(wav_coeffs[j].keys())
381
+ for k in dkeys:
382
+ # tmp has arrays of zeros except for the jth entry
383
+ z = tmp[j][k]
384
+ tmp[j][k] = wav_coeffs[j][k]
385
+ # tmp[j]['a' * len(k)] = z
386
+ # reconstruct
387
+ rec = inverse(tmp)
388
+ if rec.shape != data.shape:
389
+ # trim any excess coefficients
390
+ rec = rec[tuple([slice(sz) for sz in data.shape])]
391
+ dcoeffs[k] = rec
392
+ # restore zeros
393
+ tmp[j][k] = z
394
+ # tmp[j].pop('a' * len(k))
395
+ mra_coeffs.append(dcoeffs)
396
+ return mra_coeffs
397
+
398
+
399
+ def imran(mra_coeffs):
400
+ """Inverse nD multiresolution analysis via summation.
401
+
402
+ Parameters
403
+ ----------
404
+ mra_coeffs : list
405
+ Multiresolution analysis coefficients as returned by `mra2`.
406
+
407
+ Returns
408
+ -------
409
+ rec : ndarray
410
+ The reconstructed signal.
411
+
412
+ See Also
413
+ --------
414
+ mran
415
+
416
+ References
417
+ ----------
418
+ .. [1] Donald B. Percival and Harold O. Mofjeld. Analysis of Subtidal
419
+ Coastal Sea Level Fluctuations Using Wavelets. Journal of the American
420
+ Statistical Association Vol. 92, No. 439 (Sep., 1997), pp. 868-880.
421
+ https://doi.org/10.2307/2965551
422
+ """
423
+ rec = mra_coeffs[0]
424
+ for j in range(1, len(mra_coeffs)):
425
+ for k, v in mra_coeffs[j].items():
426
+ rec += v
427
+ return rec
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_multidim.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
2
+ # Copyright (c) 2012-2016 The PyWavelets Developers
3
+ # <https://github.com/PyWavelets/pywt>
4
+ # See COPYING for license details.
5
+
6
+ """
7
+ 2D and nD Discrete Wavelet Transforms and Inverse Discrete Wavelet Transforms.
8
+ """
9
+
10
+ from __future__ import division, print_function, absolute_import
11
+
12
+ from itertools import product
13
+
14
+ import numpy as np
15
+
16
+ from ._c99_config import _have_c99_complex
17
+ from ._extensions._dwt import dwt_axis, idwt_axis
18
+ from ._utils import _wavelets_per_axis, _modes_per_axis
19
+
20
+
21
+ __all__ = ['dwt2', 'idwt2', 'dwtn', 'idwtn']
22
+
23
+
24
+ def dwt2(data, wavelet, mode='symmetric', axes=(-2, -1)):
25
+ """
26
+ 2D Discrete Wavelet Transform.
27
+
28
+ Parameters
29
+ ----------
30
+ data : array_like
31
+ 2D array with input data
32
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
33
+ Wavelet to use. This can also be a tuple containing a wavelet to
34
+ apply along each axis in ``axes``.
35
+ mode : str or 2-tuple of strings, optional
36
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
37
+ also be a tuple of modes specifying the mode to use on each axis in
38
+ ``axes``.
39
+ axes : 2-tuple of ints, optional
40
+ Axes over which to compute the DWT. Repeated elements mean the DWT will
41
+ be performed multiple times along these axes.
42
+
43
+ Returns
44
+ -------
45
+ (cA, (cH, cV, cD)) : tuple
46
+ Approximation, horizontal detail, vertical detail and diagonal
47
+ detail coefficients respectively. Horizontal refers to array axis 0
48
+ (or ``axes[0]`` for user-specified ``axes``).
49
+
50
+ Examples
51
+ --------
52
+ >>> import numpy as np
53
+ >>> import pywt
54
+ >>> data = np.ones((4,4), dtype=np.float64)
55
+ >>> coeffs = pywt.dwt2(data, 'haar')
56
+ >>> cA, (cH, cV, cD) = coeffs
57
+ >>> cA
58
+ array([[ 2., 2.],
59
+ [ 2., 2.]])
60
+ >>> cV
61
+ array([[ 0., 0.],
62
+ [ 0., 0.]])
63
+
64
+ """
65
+ axes = tuple(axes)
66
+ data = np.asarray(data)
67
+ if len(axes) != 2:
68
+ raise ValueError("Expected 2 axes")
69
+ if data.ndim < len(np.unique(axes)):
70
+ raise ValueError("Input array has fewer dimensions than the specified "
71
+ "axes")
72
+
73
+ coefs = dwtn(data, wavelet, mode, axes)
74
+ return coefs['aa'], (coefs['da'], coefs['ad'], coefs['dd'])
75
+
76
+
77
+ def idwt2(coeffs, wavelet, mode='symmetric', axes=(-2, -1)):
78
+ """
79
+ 2-D Inverse Discrete Wavelet Transform.
80
+
81
+ Reconstructs data from coefficient arrays.
82
+
83
+ Parameters
84
+ ----------
85
+ coeffs : tuple
86
+ (cA, (cH, cV, cD)) A tuple with approximation coefficients and three
87
+ details coefficients 2D arrays like from ``dwt2``. If any of these
88
+ components are set to ``None``, it will be treated as zeros.
89
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
90
+ Wavelet to use. This can also be a tuple containing a wavelet to
91
+ apply along each axis in ``axes``.
92
+ mode : str or 2-tuple of strings, optional
93
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
94
+ also be a tuple of modes specifying the mode to use on each axis in
95
+ ``axes``.
96
+ axes : 2-tuple of ints, optional
97
+ Axes over which to compute the IDWT. Repeated elements mean the IDWT
98
+ will be performed multiple times along these axes.
99
+
100
+ Examples
101
+ --------
102
+ >>> import numpy as np
103
+ >>> import pywt
104
+ >>> data = np.array([[1,2], [3,4]], dtype=np.float64)
105
+ >>> coeffs = pywt.dwt2(data, 'haar')
106
+ >>> pywt.idwt2(coeffs, 'haar')
107
+ array([[ 1., 2.],
108
+ [ 3., 4.]])
109
+
110
+ """
111
+ # L -low-pass data, H - high-pass data
112
+ LL, (HL, LH, HH) = coeffs
113
+ axes = tuple(axes)
114
+ if len(axes) != 2:
115
+ raise ValueError("Expected 2 axes")
116
+
117
+ coeffs = {'aa': LL, 'da': HL, 'ad': LH, 'dd': HH}
118
+ return idwtn(coeffs, wavelet, mode, axes)
119
+
120
+
121
+ def dwtn(data, wavelet, mode='symmetric', axes=None):
122
+ """
123
+ Single-level n-dimensional Discrete Wavelet Transform.
124
+
125
+ Parameters
126
+ ----------
127
+ data : array_like
128
+ n-dimensional array with input data.
129
+ wavelet : Wavelet object or name string, or tuple of wavelets
130
+ Wavelet to use. This can also be a tuple containing a wavelet to
131
+ apply along each axis in ``axes``.
132
+ mode : str or tuple of string, optional
133
+ Signal extension mode used in the decomposition,
134
+ see :ref:`Modes <ref-modes>`. This can also be a tuple of modes
135
+ specifying the mode to use on each axis in ``axes``.
136
+ axes : sequence of ints, optional
137
+ Axes over which to compute the DWT. Repeated elements mean the DWT will
138
+ be performed multiple times along these axes. A value of ``None`` (the
139
+ default) selects all axes.
140
+
141
+ Axes may be repeated, but information about the original size may be
142
+ lost if it is not divisible by ``2 ** nrepeats``. The reconstruction
143
+ will be larger, with additional values derived according to the
144
+ ``mode`` parameter. ``pywt.wavedecn`` should be used for multilevel
145
+ decomposition.
146
+
147
+ Returns
148
+ -------
149
+ coeffs : dict
150
+ Results are arranged in a dictionary, where key specifies
151
+ the transform type on each dimension and value is a n-dimensional
152
+ coefficients array.
153
+
154
+ For example, for a 2D case the result will look something like this::
155
+
156
+ {'aa': <coeffs> # A(LL) - approx. on 1st dim, approx. on 2nd dim
157
+ 'ad': <coeffs> # V(LH) - approx. on 1st dim, det. on 2nd dim
158
+ 'da': <coeffs> # H(HL) - det. on 1st dim, approx. on 2nd dim
159
+ 'dd': <coeffs> # D(HH) - det. on 1st dim, det. on 2nd dim
160
+ }
161
+
162
+ For user-specified ``axes``, the order of the characters in the
163
+ dictionary keys map to the specified ``axes``.
164
+
165
+ """
166
+ data = np.asarray(data)
167
+ if not _have_c99_complex and np.iscomplexobj(data):
168
+ real = dwtn(data.real, wavelet, mode, axes)
169
+ imag = dwtn(data.imag, wavelet, mode, axes)
170
+ return dict((k, real[k] + 1j * imag[k]) for k in real.keys())
171
+
172
+ if data.dtype == np.dtype('object'):
173
+ raise TypeError("Input must be a numeric array-like")
174
+ if data.ndim < 1:
175
+ raise ValueError("Input data must be at least 1D")
176
+
177
+ if axes is None:
178
+ axes = range(data.ndim)
179
+ axes = [a + data.ndim if a < 0 else a for a in axes]
180
+
181
+ modes = _modes_per_axis(mode, axes)
182
+ wavelets = _wavelets_per_axis(wavelet, axes)
183
+
184
+ coeffs = [('', data)]
185
+ for axis, wav, mode in zip(axes, wavelets, modes):
186
+ new_coeffs = []
187
+ for subband, x in coeffs:
188
+ cA, cD = dwt_axis(x, wav, mode, axis)
189
+ new_coeffs.extend([(subband + 'a', cA),
190
+ (subband + 'd', cD)])
191
+ coeffs = new_coeffs
192
+ return dict(coeffs)
193
+
194
+
195
+ def _fix_coeffs(coeffs):
196
+ missing_keys = [k for k, v in coeffs.items() if v is None]
197
+ if missing_keys:
198
+ raise ValueError(
199
+ "The following detail coefficients were set to None:\n"
200
+ "{0}\n"
201
+ "For multilevel transforms, rather than setting\n"
202
+ "\tcoeffs[key] = None\n"
203
+ "use\n"
204
+ "\tcoeffs[key] = np.zeros_like(coeffs[key])\n".format(
205
+ missing_keys))
206
+
207
+ invalid_keys = [k for k, v in coeffs.items() if
208
+ not set(k) <= set('ad')]
209
+ if invalid_keys:
210
+ raise ValueError(
211
+ "The following invalid keys were found in the detail "
212
+ "coefficient dictionary: {}.".format(invalid_keys))
213
+
214
+ key_lengths = [len(k) for k in coeffs.keys()]
215
+ if len(np.unique(key_lengths)) > 1:
216
+ raise ValueError(
217
+ "All detail coefficient names must have equal length.")
218
+
219
+ return dict((k, np.asarray(v)) for k, v in coeffs.items())
220
+
221
+
222
+ def idwtn(coeffs, wavelet, mode='symmetric', axes=None):
223
+ """
224
+ Single-level n-dimensional Inverse Discrete Wavelet Transform.
225
+
226
+ Parameters
227
+ ----------
228
+ coeffs: dict
229
+ Dictionary as in output of ``dwtn``. Missing or ``None`` items
230
+ will be treated as zeros.
231
+ wavelet : Wavelet object or name string, or tuple of wavelets
232
+ Wavelet to use. This can also be a tuple containing a wavelet to
233
+ apply along each axis in ``axes``.
234
+ mode : str or list of string, optional
235
+ Signal extension mode used in the decomposition,
236
+ see :ref:`Modes <ref-modes>`. This can also be a tuple of modes
237
+ specifying the mode to use on each axis in ``axes``.
238
+ axes : sequence of ints, optional
239
+ Axes over which to compute the IDWT. Repeated elements mean the IDWT
240
+ will be performed multiple times along these axes. A value of ``None``
241
+ (the default) selects all axes.
242
+
243
+ For the most accurate reconstruction, the axes should be provided in
244
+ the same order as they were provided to ``dwtn``.
245
+
246
+ Returns
247
+ -------
248
+ data: ndarray
249
+ Original signal reconstructed from input data.
250
+
251
+ """
252
+
253
+ # drop the keys corresponding to value = None
254
+ coeffs = dict((k, v) for k, v in coeffs.items() if v is not None)
255
+
256
+ # drop the keys corresponding to value = None
257
+ coeffs = dict((k, v) for k, v in coeffs.items() if v is not None)
258
+
259
+ # Raise error for invalid key combinations
260
+ coeffs = _fix_coeffs(coeffs)
261
+
262
+ if (not _have_c99_complex and
263
+ any(np.iscomplexobj(v) for v in coeffs.values())):
264
+ real_coeffs = dict((k, v.real) for k, v in coeffs.items())
265
+ imag_coeffs = dict((k, v.imag) for k, v in coeffs.items())
266
+ return (idwtn(real_coeffs, wavelet, mode, axes) +
267
+ 1j * idwtn(imag_coeffs, wavelet, mode, axes))
268
+
269
+ # key length matches the number of axes transformed
270
+ ndim_transform = max(len(key) for key in coeffs.keys())
271
+
272
+ try:
273
+ coeff_shapes = (v.shape for k, v in coeffs.items()
274
+ if v is not None and len(k) == ndim_transform)
275
+ coeff_shape = next(coeff_shapes)
276
+ except StopIteration:
277
+ raise ValueError("`coeffs` must contain at least one non-null wavelet "
278
+ "band")
279
+ if any(s != coeff_shape for s in coeff_shapes):
280
+ raise ValueError("`coeffs` must all be of equal size (or None)")
281
+
282
+ if axes is None:
283
+ axes = range(ndim_transform)
284
+ ndim = ndim_transform
285
+ else:
286
+ ndim = len(coeff_shape)
287
+ axes = [a + ndim if a < 0 else a for a in axes]
288
+
289
+ modes = _modes_per_axis(mode, axes)
290
+ wavelets = _wavelets_per_axis(wavelet, axes)
291
+ for key_length, (axis, wav, mode) in reversed(
292
+ list(enumerate(zip(axes, wavelets, modes)))):
293
+ if axis < 0 or axis >= ndim:
294
+ raise np.AxisError("Axis greater than data dimensions")
295
+
296
+ new_coeffs = {}
297
+ new_keys = [''.join(coef) for coef in product('ad', repeat=key_length)]
298
+
299
+ for key in new_keys:
300
+ L = coeffs.get(key + 'a', None)
301
+ H = coeffs.get(key + 'd', None)
302
+ if L is not None and H is not None:
303
+ if L.dtype != H.dtype:
304
+ # upcast to a common dtype (float64 or complex128)
305
+ if L.dtype.kind == 'c' or H.dtype.kind == 'c':
306
+ dtype = np.complex128
307
+ else:
308
+ dtype = np.float64
309
+ L = np.asarray(L, dtype=dtype)
310
+ H = np.asarray(H, dtype=dtype)
311
+ new_coeffs[key] = idwt_axis(L, H, wav, mode, axis)
312
+ coeffs = new_coeffs
313
+
314
+ return coeffs['']
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_multilevel.py ADDED
@@ -0,0 +1,1561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/>
2
+ # Copyright (c) 2012-2018 The PyWavelets Developers
3
+ # <https://github.com/PyWavelets/pywt>
4
+ # See COPYING for license details.
5
+
6
+ """
7
+ Multilevel 1D and 2D Discrete Wavelet Transform
8
+ and Inverse Discrete Wavelet Transform.
9
+ """
10
+
11
+ from __future__ import division, print_function, absolute_import
12
+
13
+ import numbers
14
+ import warnings
15
+ from itertools import product
16
+ from copy import copy
17
+ import numpy as np
18
+
19
+ from ._extensions._pywt import Wavelet, Modes
20
+ from ._extensions._dwt import dwt_max_level
21
+ from ._dwt import dwt, idwt, dwt_coeff_len
22
+ from ._multidim import dwt2, idwt2, dwtn, idwtn, _fix_coeffs
23
+ from ._utils import _as_wavelet, _wavelets_per_axis, _modes_per_axis
24
+
25
+ __all__ = ['wavedec', 'waverec', 'wavedec2', 'waverec2', 'wavedecn',
26
+ 'waverecn', 'coeffs_to_array', 'array_to_coeffs', 'ravel_coeffs',
27
+ 'unravel_coeffs', 'dwtn_max_level', 'wavedecn_size',
28
+ 'wavedecn_shapes', 'fswavedecn', 'fswaverecn', 'FswavedecnResult']
29
+
30
+
31
+ def _check_level(sizes, dec_lens, level):
32
+ if np.isscalar(sizes):
33
+ sizes = (sizes, )
34
+ if np.isscalar(dec_lens):
35
+ dec_lens = (dec_lens, )
36
+ max_level = np.min([dwt_max_level(s, d) for s, d in zip(sizes, dec_lens)])
37
+ if level is None:
38
+ level = max_level
39
+ elif level < 0:
40
+ raise ValueError(
41
+ "Level value of %d is too low . Minimum level is 0." % level)
42
+ elif level > max_level:
43
+ warnings.warn(
44
+ ("Level value of {} is too high: all coefficients will experience "
45
+ "boundary effects.").format(level))
46
+ return level
47
+
48
+
49
+ def wavedec(data, wavelet, mode='symmetric', level=None, axis=-1):
50
+ """
51
+ Multilevel 1D Discrete Wavelet Transform of data.
52
+
53
+ Parameters
54
+ ----------
55
+ data: array_like
56
+ Input data
57
+ wavelet : Wavelet object or name string
58
+ Wavelet to use
59
+ mode : str, optional
60
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
61
+ level : int, optional
62
+ Decomposition level (must be >= 0). If level is None (default) then it
63
+ will be calculated using the ``dwt_max_level`` function.
64
+ axis: int, optional
65
+ Axis over which to compute the DWT. If not given, the
66
+ last axis is used.
67
+
68
+ Returns
69
+ -------
70
+ [cA_n, cD_n, cD_n-1, ..., cD2, cD1] : list
71
+ Ordered list of coefficients arrays
72
+ where ``n`` denotes the level of decomposition. The first element
73
+ (``cA_n``) of the result is approximation coefficients array and the
74
+ following elements (``cD_n`` - ``cD_1``) are details coefficients
75
+ arrays.
76
+
77
+ Examples
78
+ --------
79
+ >>> from pywt import wavedec
80
+ >>> coeffs = wavedec([1,2,3,4,5,6,7,8], 'db1', level=2)
81
+ >>> cA2, cD2, cD1 = coeffs
82
+ >>> cD1
83
+ array([-0.70710678, -0.70710678, -0.70710678, -0.70710678])
84
+ >>> cD2
85
+ array([-2., -2.])
86
+ >>> cA2
87
+ array([ 5., 13.])
88
+
89
+ """
90
+ data = np.asarray(data)
91
+
92
+ wavelet = _as_wavelet(wavelet)
93
+ try:
94
+ axes_shape = data.shape[axis]
95
+ except IndexError:
96
+ raise np.AxisError("Axis greater than data dimensions")
97
+ level = _check_level(axes_shape, wavelet.dec_len, level)
98
+
99
+ coeffs_list = []
100
+
101
+ a = data
102
+ for i in range(level):
103
+ a, d = dwt(a, wavelet, mode, axis)
104
+ coeffs_list.append(d)
105
+
106
+ coeffs_list.append(a)
107
+ coeffs_list.reverse()
108
+
109
+ return coeffs_list
110
+
111
+
112
+ def waverec(coeffs, wavelet, mode='symmetric', axis=-1):
113
+ """
114
+ Multilevel 1D Inverse Discrete Wavelet Transform.
115
+
116
+ Parameters
117
+ ----------
118
+ coeffs : array_like
119
+ Coefficients list [cAn, cDn, cDn-1, ..., cD2, cD1]
120
+ wavelet : Wavelet object or name string
121
+ Wavelet to use
122
+ mode : str, optional
123
+ Signal extension mode, see :ref:`Modes <ref-modes>`.
124
+ axis: int, optional
125
+ Axis over which to compute the inverse DWT. If not given, the
126
+ last axis is used.
127
+
128
+ Notes
129
+ -----
130
+ It may sometimes be desired to run ``waverec`` with some sets of
131
+ coefficients omitted. This can best be done by setting the corresponding
132
+ arrays to zero arrays of matching shape and dtype. Explicitly removing
133
+ list entries or setting them to None is not supported.
134
+
135
+ Specifically, to ignore detail coefficients at level 2, one could do::
136
+
137
+ coeffs[-2] == np.zeros_like(coeffs[-2])
138
+
139
+ Examples
140
+ --------
141
+ >>> import pywt
142
+ >>> coeffs = pywt.wavedec([1,2,3,4,5,6,7,8], 'db1', level=2)
143
+ >>> pywt.waverec(coeffs, 'db1')
144
+ array([ 1., 2., 3., 4., 5., 6., 7., 8.])
145
+ """
146
+
147
+ if not isinstance(coeffs, (list, tuple)):
148
+ raise ValueError("Expected sequence of coefficient arrays.")
149
+
150
+ if len(coeffs) < 1:
151
+ raise ValueError(
152
+ "Coefficient list too short (minimum 1 arrays required).")
153
+ elif len(coeffs) == 1:
154
+ # level 0 transform (just returns the approximation coefficients)
155
+ return coeffs[0]
156
+
157
+ a, ds = coeffs[0], coeffs[1:]
158
+
159
+ for d in ds:
160
+ if d is not None and not isinstance(d, np.ndarray):
161
+ raise ValueError((
162
+ "Unexpected detail coefficient type: {}. Detail coefficients "
163
+ "must be arrays as returned by wavedec. If you are using "
164
+ "pywt.array_to_coeffs or pywt.unravel_coeffs, please specify "
165
+ "output_format='wavedec'").format(type(d)))
166
+ if (a is not None) and (d is not None):
167
+ try:
168
+ if a.shape[axis] == d.shape[axis] + 1:
169
+ a = a[tuple(slice(s) for s in d.shape)]
170
+ elif a.shape[axis] != d.shape[axis]:
171
+ raise ValueError("coefficient shape mismatch")
172
+ except IndexError:
173
+ raise np.AxisError("Axis greater than coefficient dimensions")
174
+ a = idwt(a, d, wavelet, mode, axis)
175
+
176
+ return a
177
+
178
+
179
+ def wavedec2(data, wavelet, mode='symmetric', level=None, axes=(-2, -1)):
180
+ """
181
+ Multilevel 2D Discrete Wavelet Transform.
182
+
183
+ Parameters
184
+ ----------
185
+ data : ndarray
186
+ 2D input data
187
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
188
+ Wavelet to use. This can also be a tuple containing a wavelet to
189
+ apply along each axis in ``axes``.
190
+ mode : str or 2-tuple of str, optional
191
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
192
+ also be a tuple containing a mode to apply along each axis in ``axes``.
193
+ level : int, optional
194
+ Decomposition level (must be >= 0). If level is None (default) then it
195
+ will be calculated using the ``dwt_max_level`` function.
196
+ axes : 2-tuple of ints, optional
197
+ Axes over which to compute the DWT. Repeated elements are not allowed.
198
+
199
+ Returns
200
+ -------
201
+ [cAn, (cHn, cVn, cDn), ... (cH1, cV1, cD1)] : list
202
+ Coefficients list. For user-specified ``axes``, ``cH*``
203
+ corresponds to ``axes[0]`` while ``cV*`` corresponds to ``axes[1]``.
204
+ The first element returned is the approximation coefficients for the
205
+ nth level of decomposition. Remaining elements are tuples of detail
206
+ coefficients in descending order of decomposition level.
207
+ (i.e. ``cH1`` are the horizontal detail coefficients at the first
208
+ level)
209
+
210
+ Examples
211
+ --------
212
+ >>> import pywt
213
+ >>> import numpy as np
214
+ >>> coeffs = pywt.wavedec2(np.ones((4,4)), 'db1')
215
+ >>> # Levels:
216
+ >>> len(coeffs)-1
217
+ 2
218
+ >>> pywt.waverec2(coeffs, 'db1')
219
+ array([[ 1., 1., 1., 1.],
220
+ [ 1., 1., 1., 1.],
221
+ [ 1., 1., 1., 1.],
222
+ [ 1., 1., 1., 1.]])
223
+ """
224
+ data = np.asarray(data)
225
+ if data.ndim < 2:
226
+ raise ValueError("Expected input data to have at least 2 dimensions.")
227
+
228
+ axes = tuple(axes)
229
+ if len(axes) != 2:
230
+ raise ValueError("Expected 2 axes")
231
+ if len(axes) != len(set(axes)):
232
+ raise ValueError("The axes passed to wavedec2 must be unique.")
233
+ try:
234
+ axes_sizes = [data.shape[ax] for ax in axes]
235
+ except IndexError:
236
+ raise np.AxisError("Axis greater than data dimensions")
237
+
238
+ wavelets = _wavelets_per_axis(wavelet, axes)
239
+ dec_lengths = [w.dec_len for w in wavelets]
240
+
241
+ level = _check_level(axes_sizes, dec_lengths, level)
242
+
243
+ coeffs_list = []
244
+
245
+ a = data
246
+ for i in range(level):
247
+ a, ds = dwt2(a, wavelet, mode, axes)
248
+ coeffs_list.append(ds)
249
+
250
+ coeffs_list.append(a)
251
+ coeffs_list.reverse()
252
+
253
+ return coeffs_list
254
+
255
+
256
+ def waverec2(coeffs, wavelet, mode='symmetric', axes=(-2, -1)):
257
+ """
258
+ Multilevel 2D Inverse Discrete Wavelet Transform.
259
+
260
+ coeffs : list or tuple
261
+ Coefficients list [cAn, (cHn, cVn, cDn), ... (cH1, cV1, cD1)]
262
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
263
+ Wavelet to use. This can also be a tuple containing a wavelet to
264
+ apply along each axis in ``axes``.
265
+ mode : str or 2-tuple of str, optional
266
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
267
+ also be a tuple containing a mode to apply along each axis in ``axes``.
268
+ axes : 2-tuple of ints, optional
269
+ Axes over which to compute the IDWT. Repeated elements are not allowed.
270
+
271
+ Returns
272
+ -------
273
+ 2D array of reconstructed data.
274
+
275
+ Notes
276
+ -----
277
+ It may sometimes be desired to run ``waverec2`` with some sets of
278
+ coefficients omitted. This can best be done by setting the corresponding
279
+ arrays to zero arrays of matching shape and dtype. Explicitly removing
280
+ list or tuple entries or setting them to None is not supported.
281
+
282
+ Specifically, to ignore all detail coefficients at level 2, one could do::
283
+
284
+ coeffs[-2] == tuple([np.zeros_like(v) for v in coeffs[-2]])
285
+
286
+ Examples
287
+ --------
288
+ >>> import pywt
289
+ >>> import numpy as np
290
+ >>> coeffs = pywt.wavedec2(np.ones((4,4)), 'db1')
291
+ >>> # Levels:
292
+ >>> len(coeffs)-1
293
+ 2
294
+ >>> pywt.waverec2(coeffs, 'db1')
295
+ array([[ 1., 1., 1., 1.],
296
+ [ 1., 1., 1., 1.],
297
+ [ 1., 1., 1., 1.],
298
+ [ 1., 1., 1., 1.]])
299
+ """
300
+ if not isinstance(coeffs, (list, tuple)):
301
+ raise ValueError("Expected sequence of coefficient arrays.")
302
+
303
+ if len(axes) != len(set(axes)):
304
+ raise ValueError("The axes passed to waverec2 must be unique.")
305
+
306
+ if len(coeffs) < 1:
307
+ raise ValueError(
308
+ "Coefficient list too short (minimum 1 array required).")
309
+ elif len(coeffs) == 1:
310
+ # level 0 transform (just returns the approximation coefficients)
311
+ return coeffs[0]
312
+
313
+ a, ds = coeffs[0], coeffs[1:]
314
+ a = np.asarray(a)
315
+
316
+ for d in ds:
317
+ if not isinstance(d, (list, tuple)) or len(d) != 3:
318
+ raise ValueError((
319
+ "Unexpected detail coefficient type: {}. Detail coefficients "
320
+ "must be a 3-tuple of arrays as returned by wavedec2. If you "
321
+ "are using pywt.array_to_coeffs or pywt.unravel_coeffs, "
322
+ "please specify output_format='wavedec2'").format(type(d)))
323
+ d = tuple(np.asarray(coeff) if coeff is not None else None
324
+ for coeff in d)
325
+ d_shapes = (coeff.shape for coeff in d if coeff is not None)
326
+ try:
327
+ d_shape = next(d_shapes)
328
+ except StopIteration:
329
+ idxs = slice(None), slice(None)
330
+ else:
331
+ if not all(s == d_shape for s in d_shapes):
332
+ raise ValueError("All detail shapes must be the same length.")
333
+ idxs = tuple(slice(None, -1 if a_len == d_len + 1 else None)
334
+ for a_len, d_len in zip(a.shape, d_shape))
335
+ a = idwt2((a[idxs], d), wavelet, mode, axes)
336
+
337
+ return a
338
+
339
+
340
+ def _prep_axes_wavedecn(shape, axes):
341
+ if len(shape) < 1:
342
+ raise ValueError("Expected at least 1D input data.")
343
+ ndim = len(shape)
344
+ if np.isscalar(axes):
345
+ axes = (axes, )
346
+ if axes is None:
347
+ axes = range(ndim)
348
+ else:
349
+ axes = tuple(axes)
350
+ if len(axes) != len(set(axes)):
351
+ raise ValueError("The axes passed to wavedecn must be unique.")
352
+ try:
353
+ axes_shapes = [shape[ax] for ax in axes]
354
+ except IndexError:
355
+ raise np.AxisError("Axis greater than data dimensions")
356
+ ndim_transform = len(axes)
357
+ return axes, axes_shapes, ndim_transform
358
+
359
+
360
+ def wavedecn(data, wavelet, mode='symmetric', level=None, axes=None):
361
+ """
362
+ Multilevel nD Discrete Wavelet Transform.
363
+
364
+ Parameters
365
+ ----------
366
+ data : ndarray
367
+ nD input data
368
+ wavelet : Wavelet object or name string, or tuple of wavelets
369
+ Wavelet to use. This can also be a tuple containing a wavelet to
370
+ apply along each axis in ``axes``.
371
+ mode : str or tuple of str, optional
372
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
373
+ also be a tuple containing a mode to apply along each axis in ``axes``.
374
+ level : int, optional
375
+ Decomposition level (must be >= 0). If level is None (default) then it
376
+ will be calculated using the ``dwt_max_level`` function.
377
+ axes : sequence of ints, optional
378
+ Axes over which to compute the DWT. Axes may not be repeated. The
379
+ default is None, which means transform all axes
380
+ (``axes = range(data.ndim)``).
381
+
382
+ Returns
383
+ -------
384
+ [cAn, {details_level_n}, ... {details_level_1}] : list
385
+ Coefficients list. Coefficients are listed in descending order of
386
+ decomposition level. ``cAn`` are the approximation coefficients at
387
+ level ``n``. Each ``details_level_i`` element is a dictionary
388
+ containing detail coefficients at level ``i`` of the decomposition. As
389
+ a concrete example, a 3D decomposition would have the following set of
390
+ keys in each ``details_level_i`` dictionary::
391
+
392
+ {'aad', 'ada', 'daa', 'add', 'dad', 'dda', 'ddd'}
393
+
394
+ where the order of the characters in each key map to the specified
395
+ ``axes``.
396
+
397
+ Examples
398
+ --------
399
+ >>> import numpy as np
400
+ >>> from pywt import wavedecn, waverecn
401
+ >>> coeffs = wavedecn(np.ones((4, 4, 4)), 'db1')
402
+ >>> # Levels:
403
+ >>> len(coeffs)-1
404
+ 2
405
+ >>> waverecn(coeffs, 'db1') # doctest: +NORMALIZE_WHITESPACE
406
+ array([[[ 1., 1., 1., 1.],
407
+ [ 1., 1., 1., 1.],
408
+ [ 1., 1., 1., 1.],
409
+ [ 1., 1., 1., 1.]],
410
+ [[ 1., 1., 1., 1.],
411
+ [ 1., 1., 1., 1.],
412
+ [ 1., 1., 1., 1.],
413
+ [ 1., 1., 1., 1.]],
414
+ [[ 1., 1., 1., 1.],
415
+ [ 1., 1., 1., 1.],
416
+ [ 1., 1., 1., 1.],
417
+ [ 1., 1., 1., 1.]],
418
+ [[ 1., 1., 1., 1.],
419
+ [ 1., 1., 1., 1.],
420
+ [ 1., 1., 1., 1.],
421
+ [ 1., 1., 1., 1.]]])
422
+
423
+ """
424
+ data = np.asarray(data)
425
+ axes, axes_shapes, ndim_transform = _prep_axes_wavedecn(data.shape, axes)
426
+ wavelets = _wavelets_per_axis(wavelet, axes)
427
+ dec_lengths = [w.dec_len for w in wavelets]
428
+
429
+ level = _check_level(axes_shapes, dec_lengths, level)
430
+
431
+ coeffs_list = []
432
+
433
+ a = data
434
+ for i in range(level):
435
+ coeffs = dwtn(a, wavelet, mode, axes)
436
+ a = coeffs.pop('a' * ndim_transform)
437
+ coeffs_list.append(coeffs)
438
+
439
+ coeffs_list.append(a)
440
+ coeffs_list.reverse()
441
+
442
+ return coeffs_list
443
+
444
+
445
+ def _match_coeff_dims(a_coeff, d_coeff_dict):
446
+ # For each axis, compare the approximation coeff shape to one of the
447
+ # stored detail coeffs and truncate the last element along the axis
448
+ # if necessary.
449
+ if a_coeff is None:
450
+ return None
451
+ if not d_coeff_dict:
452
+ return a_coeff
453
+ d_coeff = d_coeff_dict[next(iter(d_coeff_dict))]
454
+ size_diffs = np.subtract(a_coeff.shape, d_coeff.shape)
455
+ if np.any((size_diffs < 0) | (size_diffs > 1)):
456
+ print(size_diffs)
457
+ raise ValueError("incompatible coefficient array sizes")
458
+ return a_coeff[tuple(slice(s) for s in d_coeff.shape)]
459
+
460
+
461
+ def waverecn(coeffs, wavelet, mode='symmetric', axes=None):
462
+ """
463
+ Multilevel nD Inverse Discrete Wavelet Transform.
464
+
465
+ coeffs : array_like
466
+ Coefficients list [cAn, {details_level_n}, ... {details_level_1}]
467
+ wavelet : Wavelet object or name string, or tuple of wavelets
468
+ Wavelet to use. This can also be a tuple containing a wavelet to
469
+ apply along each axis in ``axes``.
470
+ mode : str or tuple of str, optional
471
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
472
+ also be a tuple containing a mode to apply along each axis in ``axes``.
473
+ axes : sequence of ints, optional
474
+ Axes over which to compute the IDWT. Axes may not be repeated.
475
+
476
+ Returns
477
+ -------
478
+ nD array of reconstructed data.
479
+
480
+ Notes
481
+ -----
482
+ It may sometimes be desired to run ``waverecn`` with some sets of
483
+ coefficients omitted. This can best be done by setting the corresponding
484
+ arrays to zero arrays of matching shape and dtype. Explicitly removing
485
+ list or dictionary entries or setting them to None is not supported.
486
+
487
+ Specifically, to ignore all detail coefficients at level 2, one could do::
488
+
489
+ coeffs[-2] = {k: np.zeros_like(v) for k, v in coeffs[-2].items()}
490
+
491
+ Examples
492
+ --------
493
+ >>> import numpy as np
494
+ >>> from pywt import wavedecn, waverecn
495
+ >>> coeffs = wavedecn(np.ones((4, 4, 4)), 'db1')
496
+ >>> # Levels:
497
+ >>> len(coeffs)-1
498
+ 2
499
+ >>> waverecn(coeffs, 'db1') # doctest: +NORMALIZE_WHITESPACE
500
+ array([[[ 1., 1., 1., 1.],
501
+ [ 1., 1., 1., 1.],
502
+ [ 1., 1., 1., 1.],
503
+ [ 1., 1., 1., 1.]],
504
+ [[ 1., 1., 1., 1.],
505
+ [ 1., 1., 1., 1.],
506
+ [ 1., 1., 1., 1.],
507
+ [ 1., 1., 1., 1.]],
508
+ [[ 1., 1., 1., 1.],
509
+ [ 1., 1., 1., 1.],
510
+ [ 1., 1., 1., 1.],
511
+ [ 1., 1., 1., 1.]],
512
+ [[ 1., 1., 1., 1.],
513
+ [ 1., 1., 1., 1.],
514
+ [ 1., 1., 1., 1.],
515
+ [ 1., 1., 1., 1.]]])
516
+
517
+ """
518
+ if len(coeffs) < 1:
519
+ raise ValueError(
520
+ "Coefficient list too short (minimum 1 array required).")
521
+
522
+ a, ds = coeffs[0], coeffs[1:]
523
+
524
+ # this dictionary check must be prior to the call to _fix_coeffs
525
+ if len(ds) > 0 and not all([isinstance(d, dict) for d in ds]):
526
+ raise ValueError((
527
+ "Unexpected detail coefficient type: {}. Detail coefficients "
528
+ "must be a dicionary of arrays as returned by wavedecn. If "
529
+ "you are using pywt.array_to_coeffs or pywt.unravel_coeffs, "
530
+ "please specify output_format='wavedecn'").format(type(ds[0])))
531
+
532
+ # Raise error for invalid key combinations
533
+ ds = list(map(_fix_coeffs, ds))
534
+
535
+ if not ds:
536
+ # level 0 transform (just returns the approximation coefficients)
537
+ return coeffs[0]
538
+ if a is None and not any(ds):
539
+ raise ValueError(
540
+ "At least one coefficient must contain a valid value.")
541
+
542
+ coeff_ndims = []
543
+ if a is not None:
544
+ a = np.asarray(a)
545
+ coeff_ndims.append(a.ndim)
546
+ for d in ds:
547
+ coeff_ndims += [v.ndim for k, v in d.items()]
548
+
549
+ # test that all coefficients have a matching number of dimensions
550
+ unique_coeff_ndims = np.unique(coeff_ndims)
551
+ if len(unique_coeff_ndims) == 1:
552
+ ndim = unique_coeff_ndims[0]
553
+ else:
554
+ raise ValueError(
555
+ "All coefficients must have a matching number of dimensions")
556
+
557
+ if np.isscalar(axes):
558
+ axes = (axes, )
559
+ if axes is None:
560
+ axes = range(ndim)
561
+ else:
562
+ axes = tuple(axes)
563
+ if len(axes) != len(set(axes)):
564
+ raise ValueError("The axes passed to waverecn must be unique.")
565
+ ndim_transform = len(axes)
566
+
567
+ for idx, d in enumerate(ds):
568
+ if a is None and not d:
569
+ continue
570
+ # The following if statement handles the case where the approximation
571
+ # coefficient returned at the previous level may exceed the size of the
572
+ # stored detail coefficients by 1 on any given axis.
573
+ if idx > 0:
574
+ a = _match_coeff_dims(a, d)
575
+ d['a' * ndim_transform] = a
576
+ a = idwtn(d, wavelet, mode, axes)
577
+
578
+ return a
579
+
580
+
581
+ def _coeffs_wavedec_to_wavedecn(coeffs):
582
+ """Convert wavedec coefficients to the wavedecn format."""
583
+ if len(coeffs) == 0:
584
+ return coeffs
585
+ coeffs = copy(coeffs)
586
+ for n in range(1, len(coeffs)):
587
+ if coeffs[n] is None:
588
+ continue
589
+ if coeffs[n].ndim != 1:
590
+ raise ValueError("expected a 1D coefficient array")
591
+ coeffs[n] = dict(d=coeffs[n])
592
+ return coeffs
593
+
594
+
595
+ def _coeffs_wavedec2_to_wavedecn(coeffs):
596
+ """Convert wavedec2 coefficients to the wavedecn format."""
597
+ if len(coeffs) == 0:
598
+ return coeffs
599
+ coeffs = copy(coeffs)
600
+ for n in range(1, len(coeffs)):
601
+ if not isinstance(coeffs[n], (tuple, list)) or len(coeffs[n]) != 3:
602
+ raise ValueError("expected a 3-tuple of detail coefficients")
603
+ (da, ad, dd) = coeffs[n]
604
+ if da is None or ad is None or dd is None:
605
+ raise ValueError(
606
+ "Expected numpy arrays of detail coefficients. Setting "
607
+ "coefficients to None is not supported.")
608
+ coeffs[n] = dict(ad=ad, da=da, dd=dd)
609
+ return coeffs
610
+
611
+
612
+ def _determine_coeff_array_shape(coeffs, axes):
613
+ arr_shape = np.asarray(coeffs[0].shape)
614
+ axes = np.asarray(axes) # axes that were transformed
615
+ ndim_transform = len(axes)
616
+ ncoeffs = coeffs[0].size
617
+ for d in coeffs[1:]:
618
+ arr_shape[axes] += np.asarray(d['d'*ndim_transform].shape)[axes]
619
+ for k, v in d.items():
620
+ ncoeffs += v.size
621
+ arr_shape = tuple(arr_shape.tolist())
622
+ # if the total number of coefficients doesn't equal the size of the array
623
+ # then tight packing is not possible.
624
+ is_tight_packing = (np.prod(arr_shape) == ncoeffs)
625
+ return arr_shape, is_tight_packing
626
+
627
+
628
+ def _prepare_coeffs_axes(coeffs, axes):
629
+ """Helper function to check type of coeffs and axes.
630
+
631
+ This code is used by both coeffs_to_array and ravel_coeffs.
632
+ """
633
+ if not isinstance(coeffs, list) or len(coeffs) == 0:
634
+ raise ValueError("input must be a list of coefficients from wavedecn")
635
+ if coeffs[0] is None:
636
+ raise ValueError("coeffs_to_array does not support missing "
637
+ "coefficients.")
638
+ if not isinstance(coeffs[0], np.ndarray):
639
+ raise ValueError("first list element must be a numpy array")
640
+ ndim = coeffs[0].ndim
641
+
642
+ if len(coeffs) > 1:
643
+ # convert wavedec or wavedec2 format coefficients to waverecn format
644
+ if isinstance(coeffs[1], dict):
645
+ pass
646
+ elif isinstance(coeffs[1], np.ndarray):
647
+ coeffs = _coeffs_wavedec_to_wavedecn(coeffs)
648
+ elif isinstance(coeffs[1], (tuple, list)):
649
+ coeffs = _coeffs_wavedec2_to_wavedecn(coeffs)
650
+ else:
651
+ raise ValueError("invalid coefficient list")
652
+
653
+ if len(coeffs) == 1:
654
+ # no detail coefficients were found
655
+ return coeffs, axes, ndim, None
656
+
657
+ # Determine the number of dimensions that were transformed via key length
658
+ ndim_transform = len(list(coeffs[1].keys())[0])
659
+ if axes is None:
660
+ if ndim_transform < ndim:
661
+ raise ValueError(
662
+ "coeffs corresponds to a DWT performed over only a subset of "
663
+ "the axes. In this case, axes must be specified.")
664
+ axes = np.arange(ndim)
665
+
666
+ if len(axes) != ndim_transform:
667
+ raise ValueError(
668
+ "The length of axes doesn't match the number of dimensions "
669
+ "transformed.")
670
+
671
+ return coeffs, axes, ndim, ndim_transform
672
+
673
+
674
+ def coeffs_to_array(coeffs, padding=0, axes=None):
675
+ """
676
+ Arrange a wavelet coefficient list from ``wavedecn`` into a single array.
677
+
678
+ Parameters
679
+ ----------
680
+
681
+ coeffs : array-like
682
+ Dictionary of wavelet coefficients as returned by pywt.wavedecn
683
+ padding : float or None, optional
684
+ The value to use for the background if the coefficients cannot be
685
+ tightly packed. If None, raise an error if the coefficients cannot be
686
+ tightly packed.
687
+ axes : sequence of ints, optional
688
+ Axes over which the DWT that created ``coeffs`` was performed. The
689
+ default value of None corresponds to all axes.
690
+
691
+ Returns
692
+ -------
693
+ coeff_arr : array-like
694
+ Wavelet transform coefficient array.
695
+ coeff_slices : list
696
+ List of slices corresponding to each coefficient. As a 2D example,
697
+ ``coeff_arr[coeff_slices[1]['dd']]`` would extract the first level
698
+ detail coefficients from ``coeff_arr``.
699
+
700
+ See Also
701
+ --------
702
+ array_to_coeffs : the inverse of coeffs_to_array
703
+
704
+ Notes
705
+ -----
706
+ Assume a 2D coefficient dictionary, c, from a two-level transform.
707
+
708
+ Then all 2D coefficients will be stacked into a single larger 2D array
709
+ as follows::
710
+
711
+ +---------------+---------------+-------------------------------+
712
+ | | | |
713
+ | c[0] | c[1]['da'] | |
714
+ | | | |
715
+ +---------------+---------------+ c[2]['da'] |
716
+ | | | |
717
+ | c[1]['ad'] | c[1]['dd'] | |
718
+ | | | |
719
+ +---------------+---------------+ ------------------------------+
720
+ | | |
721
+ | | |
722
+ | | |
723
+ | c[2]['ad'] | c[2]['dd'] |
724
+ | | |
725
+ | | |
726
+ | | |
727
+ +-------------------------------+-------------------------------+
728
+
729
+ If the transform was not performed with mode "periodization" or the signal
730
+ length was not a multiple of ``2**level``, coefficients at each subsequent
731
+ scale will not be exactly 1/2 the size of those at the previous level due
732
+ to additional coefficients retained to handle the boundary condition. In
733
+ these cases, the default setting of `padding=0` indicates to pad the
734
+ individual coefficient arrays with 0 as needed so that they can be stacked
735
+ into a single, contiguous array.
736
+
737
+ Examples
738
+ --------
739
+ >>> import pywt
740
+ >>> cam = pywt.data.camera()
741
+ >>> coeffs = pywt.wavedecn(cam, wavelet='db2', level=3)
742
+ >>> arr, coeff_slices = pywt.coeffs_to_array(coeffs)
743
+
744
+ """
745
+
746
+ coeffs, axes, ndim, ndim_transform = _prepare_coeffs_axes(coeffs, axes)
747
+
748
+ # initialize with the approximation coefficients.
749
+ a_coeffs = coeffs[0]
750
+ a_shape = a_coeffs.shape
751
+
752
+ if len(coeffs) == 1:
753
+ # only a single approximation coefficient array was found
754
+ return a_coeffs, [tuple([slice(None)] * ndim)]
755
+
756
+ # determine size of output and if tight packing is possible
757
+ arr_shape, is_tight_packing = _determine_coeff_array_shape(coeffs, axes)
758
+
759
+ # preallocate output array
760
+ if padding is None:
761
+ if not is_tight_packing:
762
+ raise ValueError("array coefficients cannot be tightly packed")
763
+ coeff_arr = np.empty(arr_shape, dtype=a_coeffs.dtype)
764
+ else:
765
+ coeff_arr = np.full(arr_shape, padding, dtype=a_coeffs.dtype)
766
+
767
+ a_slices = tuple([slice(s) for s in a_shape])
768
+ coeff_arr[a_slices] = a_coeffs
769
+
770
+ # initialize list of coefficient slices
771
+ coeff_slices = []
772
+ coeff_slices.append(a_slices)
773
+
774
+ # loop over the detail cofficients, adding them to coeff_arr
775
+ ds = coeffs[1:]
776
+ for coeff_dict in ds:
777
+ coeff_slices.append({}) # new dictionary for detail coefficients
778
+ if np.any([d is None for d in coeff_dict.values()]):
779
+ raise ValueError("coeffs_to_array does not support missing "
780
+ "coefficients.")
781
+ d_shape = coeff_dict['d' * ndim_transform].shape
782
+ for key in coeff_dict.keys():
783
+ d = coeff_dict[key]
784
+ slice_array = [slice(None), ] * ndim
785
+ for i, let in enumerate(key):
786
+ ax_i = axes[i] # axis corresponding to this transform index
787
+ if let == 'a':
788
+ slice_array[ax_i] = slice(d.shape[ax_i])
789
+ elif let == 'd':
790
+ slice_array[ax_i] = slice(a_shape[ax_i],
791
+ a_shape[ax_i] + d.shape[ax_i])
792
+ else:
793
+ raise ValueError("unexpected letter: {}".format(let))
794
+ slice_array = tuple(slice_array)
795
+ coeff_arr[slice_array] = d
796
+ coeff_slices[-1][key] = slice_array
797
+ a_shape = [a_shape[n] + d_shape[n] for n in range(ndim)]
798
+ return coeff_arr, coeff_slices
799
+
800
+
801
+ def array_to_coeffs(arr, coeff_slices, output_format='wavedecn'):
802
+ """
803
+ Convert a combined array of coefficients back to a list compatible with
804
+ ``waverecn``.
805
+
806
+ Parameters
807
+ ----------
808
+
809
+ arr : array-like
810
+ An array containing all wavelet coefficients. This should have been
811
+ generated via ``coeffs_to_array``.
812
+ coeff_slices : list of tuples
813
+ List of slices corresponding to each coefficient as obtained from
814
+ ``array_to_coeffs``.
815
+ output_format : {'wavedec', 'wavedec2', 'wavedecn'}
816
+ Make the form of the coefficients compatible with this type of
817
+ multilevel transform.
818
+
819
+ Returns
820
+ -------
821
+ coeffs: array-like
822
+ Wavelet transform coefficient array.
823
+
824
+ See Also
825
+ --------
826
+ coeffs_to_array : the inverse of array_to_coeffs
827
+
828
+ Notes
829
+ -----
830
+ A single large array containing all coefficients will have subsets stored,
831
+ into a ``waverecn`` list, c, as indicated below::
832
+
833
+ +---------------+---------------+-------------------------------+
834
+ | | | |
835
+ | c[0] | c[1]['da'] | |
836
+ | | | |
837
+ +---------------+---------------+ c[2]['da'] |
838
+ | | | |
839
+ | c[1]['ad'] | c[1]['dd'] | |
840
+ | | | |
841
+ +---------------+---------------+ ------------------------------+
842
+ | | |
843
+ | | |
844
+ | | |
845
+ | c[2]['ad'] | c[2]['dd'] |
846
+ | | |
847
+ | | |
848
+ | | |
849
+ +-------------------------------+-------------------------------+
850
+
851
+ Examples
852
+ --------
853
+ >>> import pywt
854
+ >>> from numpy.testing import assert_array_almost_equal
855
+ >>> cam = pywt.data.camera()
856
+ >>> coeffs = pywt.wavedecn(cam, wavelet='db2', level=3)
857
+ >>> arr, coeff_slices = pywt.coeffs_to_array(coeffs)
858
+ >>> coeffs_from_arr = pywt.array_to_coeffs(arr, coeff_slices,
859
+ ... output_format='wavedecn')
860
+ >>> cam_recon = pywt.waverecn(coeffs_from_arr, wavelet='db2')
861
+ >>> assert_array_almost_equal(cam, cam_recon)
862
+
863
+ """
864
+ arr = np.asarray(arr)
865
+ coeffs = []
866
+ if len(coeff_slices) == 0:
867
+ raise ValueError("empty list of coefficient slices")
868
+ else:
869
+ coeffs.append(arr[coeff_slices[0]])
870
+
871
+ # difference coefficients at each level
872
+ for n in range(1, len(coeff_slices)):
873
+ if output_format == 'wavedec':
874
+ d = arr[coeff_slices[n]['d']]
875
+ elif output_format == 'wavedec2':
876
+ d = (arr[coeff_slices[n]['da']],
877
+ arr[coeff_slices[n]['ad']],
878
+ arr[coeff_slices[n]['dd']])
879
+ elif output_format == 'wavedecn':
880
+ d = {}
881
+ for k, v in coeff_slices[n].items():
882
+ d[k] = arr[v]
883
+ else:
884
+ raise ValueError(
885
+ "Unrecognized output format: {}".format(output_format))
886
+ coeffs.append(d)
887
+ return coeffs
888
+
889
+
890
+ def wavedecn_shapes(shape, wavelet, mode='symmetric', level=None, axes=None):
891
+ """Subband shapes for a multilevel nD discrete wavelet transform.
892
+
893
+ Parameters
894
+ ----------
895
+ shape : sequence of ints
896
+ The shape of the data to be transformed.
897
+ wavelet : Wavelet object or name string, or tuple of wavelets
898
+ Wavelet to use. This can also be a tuple containing a wavelet to
899
+ apply along each axis in ``axes``.
900
+ mode : str or tuple of str, optional
901
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
902
+ also be a tuple containing a mode to apply along each axis in ``axes``.
903
+ level : int, optional
904
+ Decomposition level (must be >= 0). If level is None (default) then it
905
+ will be calculated using the ``dwt_max_level`` function.
906
+ axes : sequence of ints, optional
907
+ Axes over which to compute the DWT. Axes may not be repeated. The
908
+ default is None, which means transform all axes
909
+ (``axes = range(data.ndim)``).
910
+
911
+ Returns
912
+ -------
913
+ shapes : [cAn, {details_level_n}, ... {details_level_1}] : list
914
+ Coefficients shape list. Mirrors the output of ``wavedecn``, except
915
+ it contains only the shapes of the coefficient arrays rather than the
916
+ arrays themselves.
917
+
918
+ Examples
919
+ --------
920
+ >>> import pywt
921
+ >>> pywt.wavedecn_shapes((64, 32), wavelet='db2', level=3, axes=(0, ))
922
+ [(10, 32), {'d': (10, 32)}, {'d': (18, 32)}, {'d': (33, 32)}]
923
+ """
924
+ axes, axes_shapes, ndim_transform = _prep_axes_wavedecn(shape, axes)
925
+ wavelets = _wavelets_per_axis(wavelet, axes)
926
+ modes = _modes_per_axis(mode, axes)
927
+ dec_lengths = [w.dec_len for w in wavelets]
928
+
929
+ level = _check_level(min(axes_shapes), max(dec_lengths), level)
930
+
931
+ shapes = []
932
+ for i in range(level):
933
+ detail_keys = [''.join(c) for c in product('ad', repeat=len(axes))]
934
+ new_shapes = {k: list(shape) for k in detail_keys}
935
+ for axis, wav, mode in zip(axes, wavelets, modes):
936
+ s = dwt_coeff_len(shape[axis], filter_len=wav.dec_len, mode=mode)
937
+ for k in detail_keys:
938
+ new_shapes[k][axis] = s
939
+ for k, v in new_shapes.items():
940
+ new_shapes[k] = tuple(v)
941
+ shapes.append(new_shapes)
942
+ shape = new_shapes.pop('a' * ndim_transform)
943
+ shapes.append(shape)
944
+ shapes.reverse()
945
+ return shapes
946
+
947
+
948
+ def wavedecn_size(shapes):
949
+ """Compute the total number of wavedecn coefficients.
950
+
951
+ Parameters
952
+ ----------
953
+ shapes : list of coefficient shapes
954
+ A set of coefficient shapes as returned by ``wavedecn_shapes``.
955
+ Alternatively, the user can specify a set of coefficients as returned
956
+ by ``wavedecn``.
957
+
958
+ Returns
959
+ -------
960
+ size : int
961
+ The total number of coefficients.
962
+
963
+ Examples
964
+ --------
965
+ >>> import numpy as np
966
+ >>> import pywt
967
+ >>> data_shape = (64, 32)
968
+ >>> shapes = pywt.wavedecn_shapes(data_shape, 'db2', mode='periodization')
969
+ >>> pywt.wavedecn_size(shapes)
970
+ 2048
971
+ >>> coeffs = pywt.wavedecn(np.ones(data_shape), 'sym4', mode='symmetric')
972
+ >>> pywt.wavedecn_size(coeffs)
973
+ 3087
974
+ """
975
+ def _size(x):
976
+ """Size corresponding to ``x`` as either a shape tuple or ndarray."""
977
+ if isinstance(x, np.ndarray):
978
+ return x.size
979
+ else:
980
+ return np.prod(x)
981
+ ncoeffs = _size(shapes[0])
982
+ for d in shapes[1:]:
983
+ for k, v in d.items():
984
+ if v is None:
985
+ raise ValueError(
986
+ "Setting coefficient arrays to None is not supported.")
987
+ ncoeffs += _size(v)
988
+ return ncoeffs
989
+
990
+
991
+ def dwtn_max_level(shape, wavelet, axes=None):
992
+ """Compute the maximum level of decomposition for n-dimensional data.
993
+
994
+ This returns the maximum number of levels of decomposition suitable for use
995
+ with ``wavedec``, ``wavedec2`` or ``wavedecn``.
996
+
997
+ Parameters
998
+ ----------
999
+ shape : sequence of ints
1000
+ Input data shape.
1001
+ wavelet : Wavelet object or name string, or tuple of wavelets
1002
+ Wavelet to use. This can also be a tuple containing a wavelet to
1003
+ apply along each axis in ``axes``.
1004
+ axes : sequence of ints, optional
1005
+ Axes over which to compute the DWT. Axes may not be repeated.
1006
+
1007
+ Returns
1008
+ -------
1009
+ level : int
1010
+ Maximum level.
1011
+
1012
+ Notes
1013
+ -----
1014
+ The level returned is the smallest ``dwt_max_level`` over all axes.
1015
+
1016
+ Examples
1017
+ --------
1018
+ >>> import pywt
1019
+ >>> pywt.dwtn_max_level((64, 32), 'db2')
1020
+ 3
1021
+ """
1022
+ # Determine the axes and shape for the transform
1023
+ axes, axes_shapes, ndim_transform = _prep_axes_wavedecn(shape, axes)
1024
+
1025
+ # initialize a Wavelet object per (transformed) axis
1026
+ wavelets = _wavelets_per_axis(wavelet, axes)
1027
+
1028
+ # maximum level of decomposition per axis
1029
+ max_levels = [dwt_max_level(n, wav.dec_len)
1030
+ for n, wav in zip(axes_shapes, wavelets)]
1031
+ return min(max_levels)
1032
+
1033
+
1034
+ def ravel_coeffs(coeffs, axes=None):
1035
+ """Ravel a set of multilevel wavelet coefficients into a single 1D array.
1036
+
1037
+ Parameters
1038
+ ----------
1039
+ coeffs : array-like
1040
+ A list of multilevel wavelet coefficients as returned by
1041
+ ``wavedec``, ``wavedec2`` or ``wavedecn``. This function is also
1042
+ compatible with the output of ``swt``, ``swt2`` and ``swtn`` if those
1043
+ functions were called with ``trim_approx=True``.
1044
+ axes : sequence of ints, optional
1045
+ Axes over which the DWT that created ``coeffs`` was performed. The
1046
+ default value of None corresponds to all axes.
1047
+
1048
+ Returns
1049
+ -------
1050
+ coeff_arr : array-like
1051
+ Wavelet transform coefficient array. All coefficients have been
1052
+ concatenated into a single array.
1053
+ coeff_slices : list
1054
+ List of slices corresponding to each coefficient. As a 2D example,
1055
+ ``coeff_arr[coeff_slices[1]['dd']]`` would extract the first level
1056
+ detail coefficients from ``coeff_arr``.
1057
+ coeff_shapes : list
1058
+ List of shapes corresponding to each coefficient. For example, in 2D,
1059
+ ``coeff_shapes[1]['dd']`` would contain the original shape of the first
1060
+ level detail coefficients array.
1061
+
1062
+ See Also
1063
+ --------
1064
+ unravel_coeffs : the inverse of ravel_coeffs
1065
+
1066
+ Examples
1067
+ --------
1068
+ >>> import pywt
1069
+ >>> cam = pywt.data.camera()
1070
+ >>> coeffs = pywt.wavedecn(cam, wavelet='db2', level=3)
1071
+ >>> arr, coeff_slices, coeff_shapes = pywt.ravel_coeffs(coeffs)
1072
+
1073
+ """
1074
+ coeffs, axes, ndim, ndim_transform = _prepare_coeffs_axes(coeffs, axes)
1075
+
1076
+ # initialize with the approximation coefficients.
1077
+ a_coeffs = coeffs[0]
1078
+ a_size = a_coeffs.size
1079
+
1080
+ if len(coeffs) == 1:
1081
+ # only a single approximation coefficient array was found
1082
+ return a_coeffs.ravel(), [slice(a_size), ], [a_coeffs.shape, ]
1083
+
1084
+ # preallocate output array
1085
+ arr_size = wavedecn_size(coeffs)
1086
+ coeff_arr = np.empty((arr_size, ), dtype=a_coeffs.dtype)
1087
+
1088
+ a_slice = slice(a_size)
1089
+ coeff_arr[a_slice] = a_coeffs.ravel()
1090
+
1091
+ # initialize list of coefficient slices
1092
+ coeff_slices = []
1093
+ coeff_shapes = []
1094
+ coeff_slices.append(a_slice)
1095
+ coeff_shapes.append(coeffs[0].shape)
1096
+
1097
+ # loop over the detail cofficients, embedding them in coeff_arr
1098
+ ds = coeffs[1:]
1099
+ offset = a_size
1100
+ for coeff_dict in ds:
1101
+ # new dictionaries for detail coefficient slices and shapes
1102
+ coeff_slices.append({})
1103
+ coeff_shapes.append({})
1104
+ if np.any([d is None for d in coeff_dict.values()]):
1105
+ raise ValueError("coeffs_to_array does not support missing "
1106
+ "coefficients.")
1107
+ # sort to make sure key order is consistent across Python versions
1108
+ keys = sorted(coeff_dict.keys())
1109
+ for key in keys:
1110
+ d = coeff_dict[key]
1111
+ sl = slice(offset, offset + d.size)
1112
+ offset += d.size
1113
+ coeff_arr[sl] = d.ravel()
1114
+ coeff_slices[-1][key] = sl
1115
+ coeff_shapes[-1][key] = d.shape
1116
+ return coeff_arr, coeff_slices, coeff_shapes
1117
+
1118
+
1119
+ def unravel_coeffs(arr, coeff_slices, coeff_shapes, output_format='wavedecn'):
1120
+ """Unravel a raveled array of multilevel wavelet coefficients.
1121
+
1122
+ Parameters
1123
+ ----------
1124
+ arr : array-like
1125
+ An array containing all wavelet coefficients. This should have been
1126
+ generated by applying ``ravel_coeffs`` to the output of ``wavedec``,
1127
+ ``wavedec2`` or ``wavedecn`` (or via ``swt``, ``swt2`` or ``swtn``
1128
+ with ``trim_approx=True``).
1129
+ coeff_slices : list of tuples
1130
+ List of slices corresponding to each coefficient as obtained from
1131
+ ``ravel_coeffs``.
1132
+ coeff_shapes : list of tuples
1133
+ List of shapes corresponding to each coefficient as obtained from
1134
+ ``ravel_coeffs``.
1135
+ output_format : {'wavedec', 'wavedec2', 'wavedecn', 'swt', 'swt2', 'swtn'}, optional
1136
+ Make the form of the unraveled coefficients compatible with this type
1137
+ of multilevel transform. The default is ``'wavedecn'``.
1138
+
1139
+ Returns
1140
+ -------
1141
+ coeffs: list
1142
+ List of wavelet transform coefficients. The specific format of the list
1143
+ elements is determined by ``output_format``.
1144
+
1145
+ See Also
1146
+ --------
1147
+ ravel_coeffs : the inverse of unravel_coeffs
1148
+
1149
+ Examples
1150
+ --------
1151
+ >>> import pywt
1152
+ >>> from numpy.testing import assert_array_almost_equal
1153
+ >>> cam = pywt.data.camera()
1154
+ >>> coeffs = pywt.wavedecn(cam, wavelet='db2', level=3)
1155
+ >>> arr, coeff_slices, coeff_shapes = pywt.ravel_coeffs(coeffs)
1156
+ >>> coeffs_from_arr = pywt.unravel_coeffs(arr, coeff_slices, coeff_shapes,
1157
+ ... output_format='wavedecn')
1158
+ >>> cam_recon = pywt.waverecn(coeffs_from_arr, wavelet='db2')
1159
+ >>> assert_array_almost_equal(cam, cam_recon)
1160
+
1161
+ """
1162
+ arr = np.asarray(arr)
1163
+ coeffs = []
1164
+ if len(coeff_slices) == 0:
1165
+ raise ValueError("empty list of coefficient slices")
1166
+ elif len(coeff_shapes) == 0:
1167
+ raise ValueError("empty list of coefficient shapes")
1168
+ elif len(coeff_shapes) != len(coeff_slices):
1169
+ raise ValueError("coeff_shapes and coeff_slices have unequal length")
1170
+ else:
1171
+ coeffs.append(arr[coeff_slices[0]].reshape(coeff_shapes[0]))
1172
+
1173
+ # difference coefficients at each level
1174
+ for n in range(1, len(coeff_slices)):
1175
+ slice_dict = coeff_slices[n]
1176
+ shape_dict = coeff_shapes[n]
1177
+ if output_format in ['wavedec', 'swt']:
1178
+ d = arr[slice_dict['d']].reshape(shape_dict['d'])
1179
+ elif output_format in ['wavedec2', 'swt2']:
1180
+ d = (arr[slice_dict['da']].reshape(shape_dict['da']),
1181
+ arr[slice_dict['ad']].reshape(shape_dict['ad']),
1182
+ arr[slice_dict['dd']].reshape(shape_dict['dd']))
1183
+ elif output_format in ['wavedecn', 'swtn']:
1184
+ d = {}
1185
+ for k, v in coeff_slices[n].items():
1186
+ d[k] = arr[v].reshape(shape_dict[k])
1187
+ else:
1188
+ raise ValueError(
1189
+ "Unrecognized output format: {}".format(output_format))
1190
+ coeffs.append(d)
1191
+ return coeffs
1192
+
1193
+
1194
+ def _check_fswavedecn_axes(data, axes):
1195
+ """Axes checks common to fswavedecn, fswaverecn."""
1196
+ if len(axes) != len(set(axes)):
1197
+ raise np.AxisError("The axes passed to fswavedecn must be unique.")
1198
+ try:
1199
+ [data.shape[ax] for ax in axes]
1200
+ except IndexError:
1201
+ raise np.AxisError("Axis greater than data dimensions")
1202
+
1203
+
1204
+ class FswavedecnResult(object):
1205
+ """Object representing fully separable wavelet transform coefficients.
1206
+
1207
+ Parameters
1208
+ ----------
1209
+ coeffs : ndarray
1210
+ The coefficient array.
1211
+ coeff_slices : list
1212
+ List of slices corresponding to each detail or approximation
1213
+ coefficient array.
1214
+ wavelets : list of pywt.DiscreteWavelet objects
1215
+ The wavelets used. Will be a list with length equal to
1216
+ ``len(axes)``.
1217
+ mode_enums : list of int
1218
+ The border modes used. Will be a list with length equal to
1219
+ ``len(axes)``.
1220
+ axes : tuple of int
1221
+ The set of axes over which the transform was performed.
1222
+
1223
+ """
1224
+ def __init__(self, coeffs, coeff_slices, wavelets, mode_enums,
1225
+ axes):
1226
+ self._coeffs = coeffs
1227
+ self._coeff_slices = coeff_slices
1228
+ self._axes = axes
1229
+ if not np.all(isinstance(w, Wavelet) for w in wavelets):
1230
+ raise ValueError(
1231
+ "wavelets must contain pywt.Wavelet objects")
1232
+ self._wavelets = wavelets
1233
+ if not np.all(isinstance(m, int) for m in mode_enums):
1234
+ raise ValueError(
1235
+ "mode_enums must be integers")
1236
+ self._mode_enums = mode_enums
1237
+
1238
+ @property
1239
+ def coeffs(self):
1240
+ """ndarray: All coefficients stacked into a single array."""
1241
+ return self._coeffs
1242
+
1243
+ @coeffs.setter
1244
+ def coeffs(self, c):
1245
+ if c.shape != self._coeffs.shape:
1246
+ raise ValueError("new coefficient array must match the existing "
1247
+ "coefficient shape")
1248
+ self._coeffs = c
1249
+
1250
+ @property
1251
+ def coeff_slices(self):
1252
+ """List: List of coefficient slices."""
1253
+ return self._coeff_slices
1254
+
1255
+ @property
1256
+ def ndim(self):
1257
+ """int: Number of data dimensions."""
1258
+ return self.coeffs.ndim
1259
+
1260
+ @property
1261
+ def ndim_transform(self):
1262
+ """int: Number of axes transformed."""
1263
+ return len(self.axes)
1264
+
1265
+ @property
1266
+ def axes(self):
1267
+ """List of str: The axes the transform was performed along."""
1268
+ return self._axes
1269
+
1270
+ @property
1271
+ def levels(self):
1272
+ """List of int: Levels of decomposition along each transformed axis."""
1273
+ return [len(s) - 1 for s in self.coeff_slices]
1274
+
1275
+ @property
1276
+ def wavelets(self):
1277
+ """List of pywt.DiscreteWavelet: wavelet for each transformed axis."""
1278
+ return self._wavelets
1279
+
1280
+ @property
1281
+ def wavelet_names(self):
1282
+ """List of pywt.DiscreteWavelet: wavelet for each transformed axis."""
1283
+ return [w.name for w in self._wavelets]
1284
+
1285
+ @property
1286
+ def modes(self):
1287
+ """List of str: The border mode used along each transformed axis."""
1288
+ names_dict = {getattr(Modes, mode): mode
1289
+ for mode in Modes.modes}
1290
+ return [names_dict[m] for m in self._mode_enums]
1291
+
1292
+ def _get_coef_sl(self, levels):
1293
+ sl = [slice(None), ] * self.ndim
1294
+ for n, (ax, lev) in enumerate(zip(self.axes, levels)):
1295
+ sl[ax] = self.coeff_slices[n][lev]
1296
+ return tuple(sl)
1297
+
1298
+ @property
1299
+ def approx(self):
1300
+ """ndarray: The approximation coefficients."""
1301
+ sl = self._get_coef_sl((0, )*self.ndim)
1302
+ return self._coeffs[sl]
1303
+
1304
+ @approx.setter
1305
+ def approx(self, a):
1306
+ sl = self._get_coef_sl((0, )*self.ndim)
1307
+ if self._coeffs[sl].shape != a.shape:
1308
+ raise ValueError(
1309
+ "x does not match the shape of the requested coefficient")
1310
+ self._coeffs[sl] = a
1311
+
1312
+ def _validate_index(self, levels):
1313
+ levels = tuple(levels)
1314
+
1315
+ if len(levels) != len(self.axes):
1316
+ raise ValueError(
1317
+ "levels must match the number of transformed axes")
1318
+
1319
+ # check that all elements are non-negative integers
1320
+ if (not np.all([isinstance(lev, numbers.Number) for lev in levels]) or
1321
+ np.any(np.asarray(levels) % 1 > 0) or
1322
+ np.any([lev < 0 for lev in levels])):
1323
+ raise ValueError("Index must be a tuple of non-negative integers")
1324
+ # convert integer-valued floats to int
1325
+ levels = tuple([int(lev) for lev in levels])
1326
+
1327
+ # check for out of range levels
1328
+ if np.any([lev > maxlev for lev, maxlev in zip(levels, self.levels)]):
1329
+ raise ValueError(
1330
+ "Specified indices exceed the number of transform levels.")
1331
+
1332
+ def __getitem__(self, levels):
1333
+ """Retrieve a coefficient subband.
1334
+
1335
+ Parameters
1336
+ ----------
1337
+ levels : tuple of int
1338
+ The number of degrees of decomposition along each transformed
1339
+ axis.
1340
+ """
1341
+ self._validate_index(levels)
1342
+ sl = self._get_coef_sl(levels)
1343
+ return self._coeffs[sl]
1344
+
1345
+ def __setitem__(self, levels, x):
1346
+ """Assign values to a coefficient subband.
1347
+
1348
+ Parameters
1349
+ ----------
1350
+ levels : tuple of int
1351
+ The number of degrees of decomposition along each transformed
1352
+ axis.
1353
+ x : ndarray
1354
+ The data corresponding to assign. It must match the expected
1355
+ shape and dtype of the specified subband.
1356
+ """
1357
+ self._validate_index(levels)
1358
+ sl = self._get_coef_sl(levels)
1359
+ current_dtype = self._coeffs[sl].dtype
1360
+ if self._coeffs[sl].shape != x.shape:
1361
+ raise ValueError(
1362
+ "x does not match the shape of the requested coefficient")
1363
+ if x.dtype != current_dtype:
1364
+ warnings.warn("dtype mismatch: converting the provided array to"
1365
+ "dtype {}".format(current_dtype))
1366
+ self._coeffs[sl] = x
1367
+
1368
+ def detail_keys(self):
1369
+ """Return a list of all detail coefficient keys.
1370
+
1371
+ Returns
1372
+ -------
1373
+ keys : list of str
1374
+ List of all detail coefficient keys.
1375
+ """
1376
+ keys = list(product(*(range(l+1) for l in self.levels)))
1377
+ keys.remove((0, )*len(self.axes))
1378
+ return sorted(keys)
1379
+
1380
+
1381
+ def fswavedecn(data, wavelet, mode='symmetric', levels=None, axes=None):
1382
+ """Fully Separable Wavelet Decomposition.
1383
+
1384
+ This is a variant of the multilevel discrete wavelet transform where all
1385
+ levels of decomposition are performed along a single axis prior to moving
1386
+ onto the next axis. Unlike in ``wavedecn``, the number of levels of
1387
+ decomposition are not required to be the same along each axis which can be
1388
+ a benefit for anisotropic data.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ data: array_like
1393
+ Input data
1394
+ wavelet : Wavelet object or name string, or tuple of wavelets
1395
+ Wavelet to use. This can also be a tuple containing a wavelet to
1396
+ apply along each axis in ``axes``.
1397
+ mode : str or tuple of str, optional
1398
+ Signal extension mode, see :ref:`Modes <ref-modes>`. This can
1399
+ also be a tuple containing a mode to apply along each axis in ``axes``.
1400
+ levels : int or sequence of ints, optional
1401
+ Decomposition levels along each axis (must be >= 0). If an integer is
1402
+ provided, the same number of levels are used for all axes. If
1403
+ ``levels`` is None (default), ``dwt_max_level`` will be used to compute
1404
+ the maximum number of levels possible for each axis.
1405
+ axes : sequence of ints, optional
1406
+ Axes over which to compute the transform. Axes may not be repeated. The
1407
+ default is to transform along all axes.
1408
+
1409
+ Returns
1410
+ -------
1411
+ fswavedecn_result : FswavedecnResult object
1412
+ Contains the wavelet coefficients, slice objects to allow obtaining
1413
+ the coefficients per detail or approximation level, and more.
1414
+ See ``FswavedecnResult`` for details.
1415
+
1416
+ Examples
1417
+ --------
1418
+ >>> from pywt import fswavedecn
1419
+ >>> fs_result = fswavedecn(np.ones((32, 32)), 'sym2', levels=(1, 3))
1420
+ >>> print(fs_result.detail_keys())
1421
+ [(0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3)]
1422
+ >>> approx_coeffs = fs_result.approx
1423
+ >>> detail_1_2 = fs_result[(1, 2)]
1424
+
1425
+
1426
+ Notes
1427
+ -----
1428
+ This transformation has been variously referred to as the (fully) separable
1429
+ wavelet transform (e.g. refs [1]_, [3]_), the tensor-product wavelet
1430
+ ([2]_) or the hyperbolic wavelet transform ([4]_). It is well suited to
1431
+ data with anisotropic smoothness.
1432
+
1433
+ In [2]_ it was demonstrated that fully separable transform performs at
1434
+ least as well as the DWT for image compression. Computation time is a
1435
+ factor 2 larger than that for the DWT.
1436
+
1437
+ See Also
1438
+ --------
1439
+ fswaverecn : inverse of fswavedecn
1440
+
1441
+ References
1442
+ ----------
1443
+ .. [1] PH Westerink. Subband Coding of Images. Ph.D. dissertation, Dept.
1444
+ Elect. Eng., Inf. Theory Group, Delft Univ. Technol., Delft, The
1445
+ Netherlands, 1989. (see Section 2.3)
1446
+ http://resolver.tudelft.nl/uuid:a4d195c3-1f89-4d66-913d-db9af0969509
1447
+
1448
+ .. [2] CP Rosiene and TQ Nguyen. Tensor-product wavelet vs. Mallat
1449
+ decomposition: A comparative analysis, in Proc. IEEE Int. Symp.
1450
+ Circuits and Systems, Orlando, FL, Jun. 1999, pp. 431-434.
1451
+
1452
+ .. [3] V Velisavljevic, B Beferull-Lozano, M Vetterli and PL Dragotti.
1453
+ Directionlets: Anisotropic Multidirectional Representation With
1454
+ Separable Filtering. IEEE Transactions on Image Processing, Vol. 15,
1455
+ No. 7, July 2006.
1456
+
1457
+ .. [4] RA DeVore, SV Konyagin and VN Temlyakov. "Hyperbolic wavelet
1458
+ approximation," Constr. Approx. 14 (1998), 1-26.
1459
+ """
1460
+ data = np.asarray(data)
1461
+ if axes is None:
1462
+ axes = tuple(np.arange(data.ndim))
1463
+ _check_fswavedecn_axes(data, axes)
1464
+
1465
+ if levels is None or np.isscalar(levels):
1466
+ levels = [levels, ] * len(axes)
1467
+ if len(levels) != len(axes):
1468
+ raise ValueError("levels must match the length of the axes list")
1469
+
1470
+ modes = _modes_per_axis(mode, axes)
1471
+ wavelets = _wavelets_per_axis(wavelet, axes)
1472
+
1473
+ coeff_slices = [slice(None), ] * len(axes)
1474
+ coeffs_arr = data
1475
+ for ax_count, (ax, lev, wav, mode) in enumerate(
1476
+ zip(axes, levels, wavelets, modes)):
1477
+ coeffs = wavedec(coeffs_arr, wav, mode=mode, level=lev, axis=ax)
1478
+
1479
+ # Slice objects for accessing coefficient subsets.
1480
+ # These can be used to access specific detail coefficient arrays
1481
+ # (e.g. as needed for inverse transformation via fswaverecn).
1482
+ c_shapes = [c.shape[ax] for c in coeffs]
1483
+ c_offsets = np.cumsum([0, ] + c_shapes)
1484
+ coeff_slices[ax_count] = [
1485
+ slice(c_offsets[d], c_offsets[d+1]) for d in range(len(c_shapes))]
1486
+
1487
+ # stack the coefficients from all levels into a single array
1488
+ coeffs_arr = np.concatenate(coeffs, axis=ax)
1489
+
1490
+ return FswavedecnResult(coeffs_arr, coeff_slices, wavelets, modes, axes)
1491
+
1492
+
1493
+ def fswaverecn(fswavedecn_result):
1494
+ """Fully Separable Inverse Wavelet Reconstruction.
1495
+
1496
+ Parameters
1497
+ ----------
1498
+ fswavedecn_result : FswavedecnResult object
1499
+ FswavedecnResult object from ``fswavedecn``.
1500
+
1501
+ Returns
1502
+ -------
1503
+ reconstructed : ndarray
1504
+ Array of reconstructed data.
1505
+
1506
+ Notes
1507
+ -----
1508
+ This transformation has been variously referred to as the (fully) separable
1509
+ wavelet transform (e.g. refs [1]_, [3]_), the tensor-product wavelet
1510
+ ([2]_) or the hyperbolic wavelet transform ([4]_). It is well suited to
1511
+ data with anisotropic smoothness.
1512
+
1513
+ In [2]_ it was demonstrated that the fully separable transform performs at
1514
+ least as well as the DWT for image compression. Computation time is a
1515
+ factor 2 larger than that for the DWT.
1516
+
1517
+ See Also
1518
+ --------
1519
+ fswavedecn : inverse of fswaverecn
1520
+
1521
+ References
1522
+ ----------
1523
+ .. [1] PH Westerink. Subband Coding of Images. Ph.D. dissertation, Dept.
1524
+ Elect. Eng., Inf. Theory Group, Delft Univ. Technol., Delft, The
1525
+ Netherlands, 1989. (see Section 2.3)
1526
+ http://resolver.tudelft.nl/uuid:a4d195c3-1f89-4d66-913d-db9af0969509
1527
+
1528
+ .. [2] CP Rosiene and TQ Nguyen. Tensor-product wavelet vs. Mallat
1529
+ decomposition: A comparative analysis, in Proc. IEEE Int. Symp.
1530
+ Circuits and Systems, Orlando, FL, Jun. 1999, pp. 431-434.
1531
+
1532
+ .. [3] V Velisavljevic, B Beferull-Lozano, M Vetterli and PL Dragotti.
1533
+ Directionlets: Anisotropic Multidirectional Representation With
1534
+ Separable Filtering. IEEE Transactions on Image Processing, Vol. 15,
1535
+ No. 7, July 2006.
1536
+
1537
+ .. [4] RA DeVore, SV Konyagin and VN Temlyakov. "Hyperbolic wavelet
1538
+ approximation," Constr. Approx. 14 (1998), 1-26.
1539
+ """
1540
+ coeffs_arr = fswavedecn_result.coeffs
1541
+ coeff_slices = fswavedecn_result.coeff_slices
1542
+ axes = fswavedecn_result.axes
1543
+ modes = fswavedecn_result.modes
1544
+ wavelets = fswavedecn_result.wavelets
1545
+
1546
+ _check_fswavedecn_axes(coeffs_arr, axes)
1547
+ if len(axes) != len(coeff_slices):
1548
+ raise ValueError("dimension mismatch")
1549
+
1550
+ arr = coeffs_arr
1551
+ csl = [slice(None), ] * arr.ndim
1552
+ # for ax_count, (ax, wav, mode) in reversed(
1553
+ # list(enumerate(zip(axes, wavelets, modes)))):
1554
+ for ax_count, (ax, wav, mode) in enumerate(zip(axes, wavelets, modes)):
1555
+ coeffs = []
1556
+ for sl in coeff_slices[ax_count]:
1557
+ csl[ax] = sl
1558
+ coeffs.append(arr[tuple(csl)])
1559
+ csl[ax] = slice(None)
1560
+ arr = waverec(coeffs, wav, mode=mode, axis=ax)
1561
+ return arr
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_pytest.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """common test-related code."""
2
+ import os
3
+ import sys
4
+ import multiprocessing
5
+ import numpy as np
6
+ import pytest
7
+
8
+
9
+ __all__ = ['uses_matlab', # skip if pymatbridge and Matlab unavailable
10
+ 'uses_futures', # skip if futures unavailable
11
+ 'uses_pymatbridge', # skip if no PYWT_XSLOW environment variable
12
+ 'uses_precomputed', # skip if PYWT_XSLOW environment variable found
13
+ 'matlab_result_dict_cwt', # dict with precomputed Matlab dwt data
14
+ 'matlab_result_dict_dwt', # dict with precomputed Matlab cwt data
15
+ 'futures', # the futures module or None
16
+ 'max_workers', # the number of workers available to futures
17
+ 'size_set', # the set of Matlab tests to run
18
+ ]
19
+
20
+ try:
21
+ if sys.version_info[0] == 2:
22
+ import futures
23
+ else:
24
+ from concurrent import futures
25
+ max_workers = multiprocessing.cpu_count()
26
+ futures_available = True
27
+ except ImportError:
28
+ futures_available = False
29
+ futures = None
30
+
31
+ # check if pymatbridge + MATLAB tests should be run
32
+ matlab_result_dict_dwt = None
33
+ matlab_result_dict_cwt = None
34
+ matlab_missing = True
35
+ use_precomputed = True
36
+ size_set = 'reduced'
37
+ if 'PYWT_XSLOW' in os.environ:
38
+ try:
39
+ from pymatbridge import Matlab
40
+ mlab = Matlab()
41
+ matlab_missing = False
42
+ use_precomputed = False
43
+ size_set = 'full'
44
+ except ImportError:
45
+ print("To run Matlab compatibility tests you need to have MathWorks "
46
+ "MATLAB, MathWorks Wavelet Toolbox and the pymatbridge Python "
47
+ "package installed.")
48
+ if use_precomputed:
49
+ # load dictionaries of precomputed results
50
+ data_dir = os.path.join(os.path.dirname(__file__), 'tests', 'data')
51
+ matlab_data_file_cwt = os.path.join(
52
+ data_dir, 'cwt_matlabR2015b_result.npz')
53
+ matlab_result_dict_cwt = np.load(matlab_data_file_cwt)
54
+
55
+ matlab_data_file_dwt = os.path.join(
56
+ data_dir, 'dwt_matlabR2012a_result.npz')
57
+ matlab_result_dict_dwt = np.load(matlab_data_file_dwt)
58
+
59
+ uses_futures = pytest.mark.skipif(
60
+ not futures_available, reason='futures not available')
61
+ uses_matlab = pytest.mark.skipif(
62
+ matlab_missing, reason='pymatbridge and/or Matlab not available')
63
+ uses_pymatbridge = pytest.mark.skipif(
64
+ use_precomputed,
65
+ reason='PYWT_XSLOW set: skipping tests against precomputed Matlab results')
66
+ uses_precomputed = pytest.mark.skipif(
67
+ not use_precomputed,
68
+ reason='PYWT_XSLOW not set: test against precomputed matlab tests')
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_pytesttester.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pytest test running.
3
+
4
+ This module implements the ``test()`` function for NumPy modules. The usual
5
+ boiler plate for doing that is to put the following in the module
6
+ ``__init__.py`` file::
7
+
8
+ from pywt._pytesttester import PytestTester
9
+ test = PytestTester(__name__).test
10
+ del PytestTester
11
+
12
+
13
+ Warnings filtering and other runtime settings should be dealt with in the
14
+ ``pytest.ini`` file in the pywt repo root. The behavior of the test depends on
15
+ whether or not that file is found as follows:
16
+
17
+ * ``pytest.ini`` is present (develop mode)
18
+ All warnings except those explicily filtered out are raised as error.
19
+ * ``pytest.ini`` is absent (release mode)
20
+ DeprecationWarnings and PendingDeprecationWarnings are ignored, other
21
+ warnings are passed through.
22
+
23
+ In practice, tests run from the PyWavelets repo are run in develop mode. That
24
+ includes the standard ``python runtests.py`` invocation.
25
+
26
+ """
27
+ from __future__ import division, absolute_import, print_function
28
+
29
+ import sys
30
+ import os
31
+
32
+ __all__ = ['PytestTester']
33
+
34
+
35
+ def _show_pywt_info():
36
+ import pywt
37
+ from pywt._c99_config import _have_c99_complex
38
+ print("PyWavelets version %s" % pywt.__version__)
39
+ if _have_c99_complex:
40
+ print("Compiled with C99 complex support.")
41
+ else:
42
+ print("Compiled without C99 complex support.")
43
+
44
+
45
+ class PytestTester(object):
46
+ """
47
+ Pytest test runner.
48
+
49
+ This class is made available in ``pywt.testing``, and a test function
50
+ is typically added to a package's __init__.py like so::
51
+
52
+ from pywt.testing import PytestTester
53
+ test = PytestTester(__name__).test
54
+ del PytestTester
55
+
56
+ Calling this test function finds and runs all tests associated with the
57
+ module and all its sub-modules.
58
+
59
+ Attributes
60
+ ----------
61
+ module_name : str
62
+ Full path to the package to test.
63
+
64
+ Parameters
65
+ ----------
66
+ module_name : module name
67
+ The name of the module to test.
68
+
69
+ """
70
+ def __init__(self, module_name):
71
+ self.module_name = module_name
72
+
73
+ def __call__(self, label='fast', verbose=1, extra_argv=None,
74
+ doctests=False, coverage=False, durations=-1, tests=None):
75
+ """
76
+ Run tests for module using pytest.
77
+
78
+ Parameters
79
+ ----------
80
+ label : {'fast', 'full'}, optional
81
+ Identifies the tests to run. When set to 'fast', tests decorated
82
+ with `pytest.mark.slow` are skipped, when 'full', the slow marker
83
+ is ignored.
84
+ verbose : int, optional
85
+ Verbosity value for test outputs, in the range 1-3. Default is 1.
86
+ extra_argv : list, optional
87
+ List with any extra arguments to pass to pytests.
88
+ doctests : bool, optional
89
+ .. note:: Not supported
90
+ coverage : bool, optional
91
+ If True, report coverage of NumPy code. Default is False.
92
+ Requires installation of (pip) pytest-cov.
93
+ durations : int, optional
94
+ If < 0, do nothing, If 0, report time of all tests, if > 0,
95
+ report the time of the slowest `timer` tests. Default is -1.
96
+ tests : test or list of tests
97
+ Tests to be executed with pytest '--pyargs'
98
+
99
+ Returns
100
+ -------
101
+ result : bool
102
+ Return True on success, false otherwise.
103
+
104
+ Examples
105
+ --------
106
+ >>> result = np.lib.test() #doctest: +SKIP
107
+ ...
108
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
109
+ >>> result
110
+ True
111
+
112
+ """
113
+ import pytest
114
+
115
+ module = sys.modules[self.module_name]
116
+ module_path = os.path.abspath(module.__path__[0])
117
+
118
+ # setup the pytest arguments
119
+ pytest_args = ["-l"]
120
+
121
+ # offset verbosity. The "-q" cancels a "-v".
122
+ pytest_args += ["-q"]
123
+
124
+ # Filter out annoying import messages. Want these in both develop and
125
+ # release mode.
126
+ pytest_args += [
127
+ "-W ignore:Not importing directory",
128
+ "-W ignore:numpy.dtype size changed",
129
+ "-W ignore:numpy.ufunc size changed", ]
130
+
131
+ if doctests:
132
+ raise ValueError("Doctests not supported")
133
+
134
+ if extra_argv:
135
+ pytest_args += list(extra_argv)
136
+
137
+ if verbose > 1:
138
+ pytest_args += ["-" + "v"*(verbose - 1)]
139
+
140
+ if coverage:
141
+ pytest_args += ["--cov=" + module_path]
142
+
143
+ if label == "fast":
144
+ pytest_args += ["-m", "not slow"]
145
+ elif label != "full":
146
+ pytest_args += ["-m", label]
147
+
148
+ if durations >= 0:
149
+ pytest_args += ["--durations=%s" % durations]
150
+
151
+ if tests is None:
152
+ tests = [self.module_name]
153
+
154
+ pytest_args += ["--pyargs"] + list(tests)
155
+
156
+ # run tests.
157
+ _show_pywt_info()
158
+
159
+ try:
160
+ code = pytest.main(pytest_args)
161
+ except SystemExit as exc:
162
+ code = exc.code
163
+
164
+ return code == 0
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pywt/_swt.py ADDED
@@ -0,0 +1,824 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+
6
+ from ._c99_config import _have_c99_complex
7
+ from ._extensions._dwt import idwt_single
8
+ from ._extensions._swt import swt_max_level, swt as _swt, swt_axis as _swt_axis
9
+ from ._extensions._pywt import Wavelet, Modes, _check_dtype
10
+ from ._multidim import idwt2, idwtn
11
+ from ._utils import _as_wavelet, _wavelets_per_axis
12
+
13
+
14
+ __all__ = ["swt", "swt_max_level", 'iswt', 'swt2', 'iswt2', 'swtn', 'iswtn']
15
+
16
+
17
+ def _rescale_wavelet_filterbank(wavelet, sf):
18
+ wav = Wavelet(wavelet.name + 'r',
19
+ [np.asarray(f) * sf for f in wavelet.filter_bank])
20
+
21
+ # copy attributes from the original wavelet
22
+ wav.orthogonal = wavelet.orthogonal
23
+ wav.biorthogonal = wavelet.biorthogonal
24
+ return wav
25
+
26
+
27
+ def swt(data, wavelet, level=None, start_level=0, axis=-1,
28
+ trim_approx=False, norm=False):
29
+ """
30
+ Multilevel 1D stationary wavelet transform.
31
+
32
+ Parameters
33
+ ----------
34
+ data :
35
+ Input signal
36
+ wavelet :
37
+ Wavelet to use (Wavelet object or name)
38
+ level : int, optional
39
+ The number of decomposition steps to perform.
40
+ start_level : int, optional
41
+ The level at which the decomposition will begin (it allows one to
42
+ skip a given number of transform steps and compute
43
+ coefficients starting from start_level) (default: 0)
44
+ axis: int, optional
45
+ Axis over which to compute the SWT. If not given, the
46
+ last axis is used.
47
+ trim_approx : bool, optional
48
+ If True, approximation coefficients at the final level are retained.
49
+ norm : bool, optional
50
+ If True, transform is normalized so that the energy of the coefficients
51
+ will be equal to the energy of ``data``. In other words,
52
+ ``np.linalg.norm(data.ravel())`` will equal the norm of the
53
+ concatenated transform coefficients when ``trim_approx`` is True.
54
+
55
+ Returns
56
+ -------
57
+ coeffs : list
58
+ List of approximation and details coefficients pairs in order
59
+ similar to wavedec function::
60
+
61
+ [(cAn, cDn), ..., (cA2, cD2), (cA1, cD1)]
62
+
63
+ where n equals input parameter ``level``.
64
+
65
+ If ``start_level = m`` is given, then the beginning m steps are
66
+ skipped::
67
+
68
+ [(cAm+n, cDm+n), ..., (cAm+1, cDm+1), (cAm, cDm)]
69
+
70
+ If ``trim_approx`` is ``True``, then the output list is exactly as in
71
+ ``pywt.wavedec``, where the first coefficient in the list is the
72
+ approximation coefficient at the final level and the rest are the
73
+ detail coefficients::
74
+
75
+ [cAn, cDn, ..., cD2, cD1]
76
+
77
+ Notes
78
+ -----
79
+ The implementation here follows the "algorithm a-trous" and requires that
80
+ the signal length along the transformed axis be a multiple of ``2**level``.
81
+ If this is not the case, the user should pad up to an appropriate size
82
+ using a function such as ``numpy.pad``.
83
+
84
+ A primary benefit of this transform in comparison to its decimated
85
+ counterpart (``pywt.wavedecn``), is that it is shift-invariant. This comes
86
+ at cost of redundancy in the transform (the size of the output coefficients
87
+ is larger than the input).
88
+
89
+ When the following three conditions are true:
90
+
91
+ 1. The wavelet is orthogonal
92
+ 2. ``swt`` is called with ``norm=True``
93
+ 3. ``swt`` is called with ``trim_approx=True``
94
+
95
+ the transform has the following additional properties that may be
96
+ desirable in applications:
97
+
98
+ 1. energy is conserved
99
+ 2. variance is partitioned across scales
100
+
101
+ When used with ``norm=True``, this transform is closely related to the
102
+ multiple-overlap DWT (MODWT) as popularized for time-series analysis,
103
+ although the underlying implementation is slightly different from the one
104
+ published in [1]_. Specifically, the implementation used here requires a
105
+ signal that is a multiple of ``2**level`` in length.
106
+
107
+ References
108
+ ----------
109
+ .. [1] DB Percival and AT Walden. Wavelet Methods for Time Series Analysis.
110
+ Cambridge University Press, 2000.
111
+ """
112
+
113
+ if not _have_c99_complex and np.iscomplexobj(data):
114
+ data = np.asarray(data)
115
+ kwargs = dict(wavelet=wavelet, level=level, start_level=start_level,
116
+ trim_approx=trim_approx, axis=axis, norm=norm)
117
+ coeffs_real = swt(data.real, **kwargs)
118
+ coeffs_imag = swt(data.imag, **kwargs)
119
+ if not trim_approx:
120
+ coeffs_cplx = []
121
+ for (cA_r, cD_r), (cA_i, cD_i) in zip(coeffs_real, coeffs_imag):
122
+ coeffs_cplx.append((cA_r + 1j*cA_i, cD_r + 1j*cD_i))
123
+ else:
124
+ coeffs_cplx = [cr + 1j*ci
125
+ for (cr, ci) in zip(coeffs_real, coeffs_imag)]
126
+ return coeffs_cplx
127
+
128
+ # accept array_like input; make a copy to ensure a contiguous array
129
+ dt = _check_dtype(data)
130
+ data = np.array(data, dtype=dt)
131
+
132
+ wavelet = _as_wavelet(wavelet)
133
+ if norm:
134
+ if not wavelet.orthogonal:
135
+ warnings.warn(
136
+ "norm=True, but the wavelet is not orthogonal: \n"
137
+ "\tThe conditions for energy preservation are not satisfied.")
138
+ wavelet = _rescale_wavelet_filterbank(wavelet, 1/np.sqrt(2))
139
+
140
+ if axis < 0:
141
+ axis = axis + data.ndim
142
+ if not 0 <= axis < data.ndim:
143
+ raise np.AxisError("Axis greater than data dimensions")
144
+
145
+ if level is None:
146
+ level = swt_max_level(data.shape[axis])
147
+
148
+ if data.ndim == 1:
149
+ ret = _swt(data, wavelet, level, start_level, trim_approx)
150
+ else:
151
+ ret = _swt_axis(data, wavelet, level, start_level, axis, trim_approx)
152
+ return ret
153
+
154
+
155
+ def iswt(coeffs, wavelet, norm=False, axis=-1):
156
+ """
157
+ Multilevel 1D inverse discrete stationary wavelet transform.
158
+
159
+ Parameters
160
+ ----------
161
+ coeffs : array_like
162
+ Coefficients list of tuples::
163
+
164
+ [(cAn, cDn), ..., (cA2, cD2), (cA1, cD1)]
165
+
166
+ where cA is approximation, cD is details. Index 1 corresponds to
167
+ ``start_level`` from ``pywt.swt``.
168
+ wavelet : Wavelet object or name string
169
+ Wavelet to use
170
+ norm : bool, optional
171
+ Controls the normalization used by the inverse transform. This must
172
+ be set equal to the value that was used by ``pywt.swt`` to preserve the
173
+ energy of a round-trip transform.
174
+
175
+ Returns
176
+ -------
177
+ 1D array of reconstructed data.
178
+
179
+ Examples
180
+ --------
181
+ >>> import pywt
182
+ >>> coeffs = pywt.swt([1,2,3,4,5,6,7,8], 'db2', level=2)
183
+ >>> pywt.iswt(coeffs, 'db2')
184
+ array([ 1., 2., 3., 4., 5., 6., 7., 8.])
185
+ """
186
+ # copy to avoid modification of input data
187
+ # If swt was called with trim_approx=False, first element is a tuple
188
+ trim_approx = not isinstance(coeffs[0], (tuple, list))
189
+ cA = coeffs[0] if trim_approx else coeffs[0][0]
190
+ if cA.ndim > 1:
191
+ # convert to swtn coefficient format and call iswtn
192
+ if trim_approx:
193
+ coeffs_nd = [cA] + [{'d': d} for d in coeffs[1:]]
194
+ else:
195
+ coeffs_nd = [{'a': a, 'd': d} for a, d in coeffs]
196
+ return iswtn(coeffs_nd, wavelet, axes=(axis,), norm=norm)
197
+ elif axis != 0 and axis != -1:
198
+ raise np.AxisError("Axis greater than data dimensions")
199
+ if not _have_c99_complex and np.iscomplexobj(cA):
200
+ if trim_approx:
201
+ coeffs_real = [c.real for c in coeffs]
202
+ coeffs_imag = [c.imag for c in coeffs]
203
+ else:
204
+ coeffs_real = [(ca.real, cd.real) for ca, cd in coeffs]
205
+ coeffs_imag = [(ca.imag, cd.imag) for ca, cd in coeffs]
206
+ kwargs = dict(wavelet=wavelet, norm=norm)
207
+ y = iswt(coeffs_real, **kwargs)
208
+ return y + 1j * iswt(coeffs_imag, **kwargs)
209
+
210
+ if trim_approx:
211
+ coeffs = coeffs[1:]
212
+
213
+ if cA.ndim != 1:
214
+ raise ValueError("iswt only supports 1D data")
215
+
216
+ dt = _check_dtype(cA)
217
+ output = np.array(cA, dtype=dt, copy=True)
218
+
219
+ # num_levels, equivalent to the decomposition level, n
220
+ num_levels = len(coeffs)
221
+ wavelet = _as_wavelet(wavelet)
222
+ if norm:
223
+ wavelet = _rescale_wavelet_filterbank(wavelet, np.sqrt(2))
224
+ mode = Modes.from_object('periodization')
225
+ for j in range(num_levels, 0, -1):
226
+ step_size = int(pow(2, j-1))
227
+ last_index = step_size
228
+ if trim_approx:
229
+ cD = coeffs[-j]
230
+ else:
231
+ _, cD = coeffs[-j]
232
+ cD = np.asarray(cD, dtype=_check_dtype(cD))
233
+ if cD.dtype != output.dtype:
234
+ # upcast to a common dtype (float64 or complex128)
235
+ if output.dtype.kind == 'c' or cD.dtype.kind == 'c':
236
+ dtype = np.complex128
237
+ else:
238
+ dtype = np.float64
239
+ output = np.asarray(output, dtype=dtype)
240
+ cD = np.asarray(cD, dtype=dtype)
241
+ for first in range(last_index): # 0 to last_index - 1
242
+
243
+ # Getting the indices that we will transform
244
+ indices = np.arange(first, len(cD), step_size)
245
+
246
+ # select the even indices
247
+ even_indices = indices[0::2]
248
+ # select the odd indices
249
+ odd_indices = indices[1::2]
250
+
251
+ # perform the inverse dwt on the selected indices,
252
+ # making sure to use periodic boundary conditions
253
+ # Note: indexing with an array of ints returns a contiguous
254
+ # copy as required by idwt_single.
255
+ x1 = idwt_single(output[even_indices],
256
+ cD[even_indices],
257
+ wavelet, mode)
258
+ x2 = idwt_single(output[odd_indices],
259
+ cD[odd_indices],
260
+ wavelet, mode)
261
+
262
+ # perform a circular shift right
263
+ x2 = np.roll(x2, 1)
264
+
265
+ # average and insert into the correct indices
266
+ output[indices] = (x1 + x2)/2.
267
+
268
+ return output
269
+
270
+
271
+ def swt2(data, wavelet, level, start_level=0, axes=(-2, -1),
272
+ trim_approx=False, norm=False):
273
+ """
274
+ Multilevel 2D stationary wavelet transform.
275
+
276
+ Parameters
277
+ ----------
278
+ data : array_like
279
+ 2D array with input data
280
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
281
+ Wavelet to use. This can also be a tuple of wavelets to apply per
282
+ axis in ``axes``.
283
+ level : int
284
+ The number of decomposition steps to perform.
285
+ start_level : int, optional
286
+ The level at which the decomposition will start (default: 0)
287
+ axes : 2-tuple of ints, optional
288
+ Axes over which to compute the SWT. Repeated elements are not allowed.
289
+ trim_approx : bool, optional
290
+ If True, approximation coefficients at the final level are retained.
291
+ norm : bool, optional
292
+ If True, transform is normalized so that the energy of the coefficients
293
+ will be equal to the energy of ``data``. In other words,
294
+ ``np.linalg.norm(data.ravel())`` will equal the norm of the
295
+ concatenated transform coefficients when ``trim_approx`` is True.
296
+
297
+ Returns
298
+ -------
299
+ coeffs : list
300
+ Approximation and details coefficients (for ``start_level = m``).
301
+ If ``trim_approx`` is ``False``, approximation coefficients are
302
+ retained for all levels::
303
+
304
+ [
305
+ (cA_m+level,
306
+ (cH_m+level, cV_m+level, cD_m+level)
307
+ ),
308
+ ...,
309
+ (cA_m+1,
310
+ (cH_m+1, cV_m+1, cD_m+1)
311
+ ),
312
+ (cA_m,
313
+ (cH_m, cV_m, cD_m)
314
+ )
315
+ ]
316
+
317
+ where cA is approximation, cH is horizontal details, cV is
318
+ vertical details, cD is diagonal details and m is ``start_level``.
319
+
320
+ If ``trim_approx`` is ``True``, approximation coefficients are only
321
+ retained at the final level of decomposition. This matches the format
322
+ used by ``pywt.wavedec2``::
323
+
324
+ [
325
+ cA_m+level,
326
+ (cH_m+level, cV_m+level, cD_m+level),
327
+ ...,
328
+ (cH_m+1, cV_m+1, cD_m+1),
329
+ (cH_m, cV_m, cD_m),
330
+ ]
331
+
332
+ Notes
333
+ -----
334
+ The implementation here follows the "algorithm a-trous" and requires that
335
+ the signal length along the transformed axes be a multiple of ``2**level``.
336
+ If this is not the case, the user should pad up to an appropriate size
337
+ using a function such as ``numpy.pad``.
338
+
339
+ A primary benefit of this transform in comparison to its decimated
340
+ counterpart (``pywt.wavedecn``), is that it is shift-invariant. This comes
341
+ at cost of redundancy in the transform (the size of the output coefficients
342
+ is larger than the input).
343
+
344
+ When the following three conditions are true:
345
+
346
+ 1. The wavelet is orthogonal
347
+ 2. ``swt2`` is called with ``norm=True``
348
+ 3. ``swt2`` is called with ``trim_approx=True``
349
+
350
+ the transform has the following additional properties that may be
351
+ desirable in applications:
352
+
353
+ 1. energy is conserved
354
+ 2. variance is partitioned across scales
355
+
356
+ """
357
+ axes = tuple(axes)
358
+ data = np.asarray(data)
359
+ if len(axes) != 2:
360
+ raise ValueError("Expected 2 axes")
361
+ if len(axes) != len(set(axes)):
362
+ raise ValueError("The axes passed to swt2 must be unique.")
363
+ if data.ndim < len(np.unique(axes)):
364
+ raise ValueError("Input array has fewer dimensions than the specified "
365
+ "axes")
366
+
367
+ coefs = swtn(data, wavelet, level, start_level, axes, trim_approx, norm)
368
+ ret = []
369
+ if trim_approx:
370
+ ret.append(coefs[0])
371
+ coefs = coefs[1:]
372
+ for c in coefs:
373
+ if trim_approx:
374
+ ret.append((c['da'], c['ad'], c['dd']))
375
+ else:
376
+ ret.append((c['aa'], (c['da'], c['ad'], c['dd'])))
377
+ return ret
378
+
379
+
380
+ def iswt2(coeffs, wavelet, norm=False, axes=(-2, -1)):
381
+ """
382
+ Multilevel 2D inverse discrete stationary wavelet transform.
383
+
384
+ Parameters
385
+ ----------
386
+ coeffs : list
387
+ Approximation and details coefficients::
388
+
389
+ [
390
+ (cA_n,
391
+ (cH_n, cV_n, cD_n)
392
+ ),
393
+ ...,
394
+ (cA_2,
395
+ (cH_2, cV_2, cD_2)
396
+ ),
397
+ (cA_1,
398
+ (cH_1, cV_1, cD_1)
399
+ )
400
+ ]
401
+
402
+ where cA is approximation, cH is horizontal details, cV is
403
+ vertical details, cD is diagonal details and n is the number of
404
+ levels. Index 1 corresponds to ``start_level`` from ``pywt.swt2``.
405
+ wavelet : Wavelet object or name string, or 2-tuple of wavelets
406
+ Wavelet to use. This can also be a 2-tuple of wavelets to apply per
407
+ axis.
408
+ norm : bool, optional
409
+ Controls the normalization used by the inverse transform. This must
410
+ be set equal to the value that was used by ``pywt.swt2`` to preserve
411
+ the energy of a round-trip transform.
412
+
413
+ Returns
414
+ -------
415
+ 2D array of reconstructed data.
416
+
417
+ Examples
418
+ --------
419
+ >>> import pywt
420
+ >>> coeffs = pywt.swt2([[1,2,3,4],[5,6,7,8],
421
+ ... [9,10,11,12],[13,14,15,16]],
422
+ ... 'db1', level=2)
423
+ >>> pywt.iswt2(coeffs, 'db1')
424
+ array([[ 1., 2., 3., 4.],
425
+ [ 5., 6., 7., 8.],
426
+ [ 9., 10., 11., 12.],
427
+ [ 13., 14., 15., 16.]])
428
+
429
+ """
430
+
431
+ # If swt was called with trim_approx=False, first element is a tuple
432
+ trim_approx = not isinstance(coeffs[0], (tuple, list))
433
+ cA = coeffs[0] if trim_approx else coeffs[0][0]
434
+ if cA.ndim != 2 or axes != (-2, -1):
435
+ # convert to swtn coefficient format and call iswtn instead
436
+ if trim_approx:
437
+ coeffs_nd = [cA] + [{'da': h, 'ad': v, 'dd': d}
438
+ for h, v, d in coeffs[1:]]
439
+ else:
440
+ coeffs_nd = [{'aa': a, 'da': h, 'ad': v, 'dd': d}
441
+ for a, (h, v, d) in coeffs]
442
+ return iswtn(coeffs_nd, wavelet, axes=axes, norm=norm)
443
+ if not _have_c99_complex and np.iscomplexobj(cA):
444
+ if trim_approx:
445
+ coeffs_real = [cA.real]
446
+ coeffs_real += [(h.real, v.real, d.real) for h, v, d in coeffs[1:]]
447
+ coeffs_imag = [cA.imag]
448
+ coeffs_imag += [(h.imag, v.imag, d.imag) for h, v, d in coeffs[1:]]
449
+ else:
450
+ coeffs_real = [(a.real, (h.real, v.real, d.real))
451
+ for a, (h, v, d) in coeffs]
452
+ coeffs_imag = [(a.imag, (h.imag, v.imag, d.imag))
453
+ for a, (h, v, d) in coeffs]
454
+ kwargs = dict(wavelet=wavelet, norm=norm)
455
+ y = iswt2(coeffs_real, **kwargs)
456
+ return y + 1j * iswt2(coeffs_imag, **kwargs)
457
+
458
+ if trim_approx:
459
+ coeffs = coeffs[1:]
460
+
461
+ # copy to avoid modification of input data
462
+ dt = _check_dtype(cA)
463
+ output = np.array(cA, dtype=dt, copy=True)
464
+
465
+ if output.ndim != 2:
466
+ raise ValueError(
467
+ "iswt2 only supports 2D arrays. see iswtn for a general "
468
+ "n-dimensionsal ISWT")
469
+ # num_levels, equivalent to the decomposition level, n
470
+ num_levels = len(coeffs)
471
+ wavelets = _wavelets_per_axis(wavelet, axes=(0, 1))
472
+ if norm:
473
+ wavelets = [_rescale_wavelet_filterbank(wav, np.sqrt(2))
474
+ for wav in wavelets]
475
+
476
+ for j in range(num_levels):
477
+ step_size = int(pow(2, num_levels-j-1))
478
+ last_index = step_size
479
+ if trim_approx:
480
+ (cH, cV, cD) = coeffs[j]
481
+ else:
482
+ _, (cH, cV, cD) = coeffs[j]
483
+ # We are going to assume cH, cV, and cD are of equal size
484
+ if (cH.shape != cV.shape) or (cH.shape != cD.shape):
485
+ raise RuntimeError(
486
+ "Mismatch in shape of intermediate coefficient arrays")
487
+
488
+ # make sure output shares the common dtype
489
+ # (conversion of dtype for individual coeffs is handled within idwt2 )
490
+ common_dtype = np.result_type(*(
491
+ [dt, ] + [_check_dtype(c) for c in [cH, cV, cD]]))
492
+ if output.dtype != common_dtype:
493
+ output = output.astype(common_dtype)
494
+
495
+ for first_h in range(last_index): # 0 to last_index - 1
496
+ for first_w in range(last_index): # 0 to last_index - 1
497
+ # Getting the indices that we will transform
498
+ indices_h = slice(first_h, cH.shape[0], step_size)
499
+ indices_w = slice(first_w, cH.shape[1], step_size)
500
+
501
+ even_idx_h = slice(first_h, cH.shape[0], 2*step_size)
502
+ even_idx_w = slice(first_w, cH.shape[1], 2*step_size)
503
+ odd_idx_h = slice(first_h + step_size, cH.shape[0], 2*step_size)
504
+ odd_idx_w = slice(first_w + step_size, cH.shape[1], 2*step_size)
505
+
506
+ # perform the inverse dwt on the selected indices,
507
+ # making sure to use periodic boundary conditions
508
+ x1 = idwt2((output[even_idx_h, even_idx_w],
509
+ (cH[even_idx_h, even_idx_w],
510
+ cV[even_idx_h, even_idx_w],
511
+ cD[even_idx_h, even_idx_w])),
512
+ wavelets, 'periodization')
513
+ x2 = idwt2((output[even_idx_h, odd_idx_w],
514
+ (cH[even_idx_h, odd_idx_w],
515
+ cV[even_idx_h, odd_idx_w],
516
+ cD[even_idx_h, odd_idx_w])),
517
+ wavelets, 'periodization')
518
+ x3 = idwt2((output[odd_idx_h, even_idx_w],
519
+ (cH[odd_idx_h, even_idx_w],
520
+ cV[odd_idx_h, even_idx_w],
521
+ cD[odd_idx_h, even_idx_w])),
522
+ wavelets, 'periodization')
523
+ x4 = idwt2((output[odd_idx_h, odd_idx_w],
524
+ (cH[odd_idx_h, odd_idx_w],
525
+ cV[odd_idx_h, odd_idx_w],
526
+ cD[odd_idx_h, odd_idx_w])),
527
+ wavelets, 'periodization')
528
+
529
+ # perform a circular shifts
530
+ x2 = np.roll(x2, 1, axis=1)
531
+ x3 = np.roll(x3, 1, axis=0)
532
+ x4 = np.roll(x4, 1, axis=0)
533
+ x4 = np.roll(x4, 1, axis=1)
534
+ output[indices_h, indices_w] = (x1 + x2 + x3 + x4) / 4
535
+
536
+ return output
537
+
538
+
539
+ def swtn(data, wavelet, level, start_level=0, axes=None, trim_approx=False,
540
+ norm=False):
541
+ """
542
+ n-dimensional stationary wavelet transform.
543
+
544
+ Parameters
545
+ ----------
546
+ data : array_like
547
+ n-dimensional array with input data.
548
+ wavelet : Wavelet object or name string, or tuple of wavelets
549
+ Wavelet to use. This can also be a tuple of wavelets to apply per
550
+ axis in ``axes``.
551
+ level : int
552
+ The number of decomposition steps to perform.
553
+ start_level : int, optional
554
+ The level at which the decomposition will start (default: 0)
555
+ axes : sequence of ints, optional
556
+ Axes over which to compute the SWT. A value of ``None`` (the
557
+ default) selects all axes. Axes may not be repeated.
558
+ trim_approx : bool, optional
559
+ If True, approximation coefficients at the final level are retained.
560
+ norm : bool, optional
561
+ If True, transform is normalized so that the energy of the coefficients
562
+ will be equal to the energy of ``data``. In other words,
563
+ ``np.linalg.norm(data.ravel())`` will equal the norm of the
564
+ concatenated transform coefficients when ``trim_approx`` is True.
565
+
566
+ Returns
567
+ -------
568
+ [{coeffs_level_n}, ..., {coeffs_level_1}]: list of dict
569
+ Results for each level are arranged in a dictionary, where the key
570
+ specifies the transform type on each dimension and value is a
571
+ n-dimensional coefficients array.
572
+
573
+ For example, for a 2D case the result at a given level will look
574
+ something like this::
575
+
576
+ {'aa': <coeffs> # A(LL) - approx. on 1st dim, approx. on 2nd dim
577
+ 'ad': <coeffs> # V(LH) - approx. on 1st dim, det. on 2nd dim
578
+ 'da': <coeffs> # H(HL) - det. on 1st dim, approx. on 2nd dim
579
+ 'dd': <coeffs> # D(HH) - det. on 1st dim, det. on 2nd dim
580
+ }
581
+
582
+ For user-specified ``axes``, the order of the characters in the
583
+ dictionary keys map to the specified ``axes``.
584
+
585
+ If ``trim_approx`` is ``True``, the first element of the list contains
586
+ the array of approximation coefficients from the final level of
587
+ decomposition, while the remaining coefficient dictionaries contain
588
+ only detail coefficients. This matches the behavior of `pywt.wavedecn`.
589
+
590
+ Notes
591
+ -----
592
+ The implementation here follows the "algorithm a-trous" and requires that
593
+ the signal length along the transformed axes be a multiple of ``2**level``.
594
+ If this is not the case, the user should pad up to an appropriate size
595
+ using a function such as ``numpy.pad``.
596
+
597
+ A primary benefit of this transform in comparison to its decimated
598
+ counterpart (``pywt.wavedecn``), is that it is shift-invariant. This comes
599
+ at cost of redundancy in the transform (the size of the output coefficients
600
+ is larger than the input).
601
+
602
+ When the following three conditions are true:
603
+
604
+ 1. The wavelet is orthogonal
605
+ 2. ``swtn`` is called with ``norm=True``
606
+ 3. ``swtn`` is called with ``trim_approx=True``
607
+
608
+ the transform has the following additional properties that may be
609
+ desirable in applications:
610
+
611
+ 1. energy is conserved
612
+ 2. variance is partitioned across scales
613
+
614
+ """
615
+ data = np.asarray(data)
616
+ if not _have_c99_complex and np.iscomplexobj(data):
617
+ kwargs = dict(wavelet=wavelet, level=level, start_level=start_level,
618
+ trim_approx=trim_approx, axes=axes, norm=norm)
619
+ real = swtn(data.real, **kwargs)
620
+ imag = swtn(data.imag, **kwargs)
621
+ if trim_approx:
622
+ cplx = [real[0] + 1j * imag[0]]
623
+ offset = 1
624
+ else:
625
+ cplx = []
626
+ offset = 0
627
+ for rdict, idict in zip(real[offset:], imag[offset:]):
628
+ cplx.append(
629
+ dict((k, rdict[k] + 1j * idict[k]) for k in rdict.keys()))
630
+ return cplx
631
+
632
+ if data.dtype == np.dtype('object'):
633
+ raise TypeError("Input must be a numeric array-like")
634
+ if data.ndim < 1:
635
+ raise ValueError("Input data must be at least 1D")
636
+
637
+ if axes is None:
638
+ axes = range(data.ndim)
639
+ axes = [a + data.ndim if a < 0 else a for a in axes]
640
+ if any(a < 0 or a >= data.ndim for a in axes):
641
+ raise np.AxisError("Axis greater than data dimensions")
642
+ if len(axes) != len(set(axes)):
643
+ raise ValueError("The axes passed to swtn must be unique.")
644
+ num_axes = len(axes)
645
+
646
+ wavelets = _wavelets_per_axis(wavelet, axes)
647
+ if norm:
648
+ if not np.all([wav.orthogonal for wav in wavelets]):
649
+ warnings.warn(
650
+ "norm=True, but the wavelets used are not orthogonal: \n"
651
+ "\tThe conditions for energy preservation are not satisfied.")
652
+ wavelets = [_rescale_wavelet_filterbank(wav, 1/np.sqrt(2))
653
+ for wav in wavelets]
654
+ ret = []
655
+ for i in range(start_level, start_level + level):
656
+ coeffs = [('', data)]
657
+ for axis, wavelet in zip(axes, wavelets):
658
+ new_coeffs = []
659
+ for subband, x in coeffs:
660
+ cA, cD = _swt_axis(x, wavelet, level=1, start_level=i,
661
+ axis=axis)[0]
662
+ new_coeffs.extend([(subband + 'a', cA),
663
+ (subband + 'd', cD)])
664
+ coeffs = new_coeffs
665
+
666
+ coeffs = dict(coeffs)
667
+ ret.append(coeffs)
668
+
669
+ # data for the next level is the approximation coeffs from this level
670
+ data = coeffs['a' * num_axes]
671
+ if trim_approx:
672
+ coeffs.pop('a' * num_axes)
673
+ if trim_approx:
674
+ ret.append(data)
675
+ ret.reverse()
676
+ return ret
677
+
678
+
679
+ def iswtn(coeffs, wavelet, axes=None, norm=False):
680
+ """
681
+ Multilevel nD inverse discrete stationary wavelet transform.
682
+
683
+ Parameters
684
+ ----------
685
+ coeffs : list
686
+ [{coeffs_level_n}, ..., {coeffs_level_1}]: list of dict
687
+ wavelet : Wavelet object or name string, or tuple of wavelets
688
+ Wavelet to use. This can also be a tuple of wavelets to apply per
689
+ axis in ``axes``.
690
+ axes : sequence of ints, optional
691
+ Axes over which to compute the inverse SWT. Axes may not be repeated.
692
+ The default is ``None``, which means transform all axes
693
+ (``axes = range(data.ndim)``).
694
+ norm : bool, optional
695
+ Controls the normalization used by the inverse transform. This must
696
+ be set equal to the value that was used by ``pywt.swtn`` to preserve
697
+ the energy of a round-trip transform.
698
+
699
+ Returns
700
+ -------
701
+ nD array of reconstructed data.
702
+
703
+ Examples
704
+ --------
705
+ >>> import pywt
706
+ >>> coeffs = pywt.swtn([[1,2,3,4],[5,6,7,8],
707
+ ... [9,10,11,12],[13,14,15,16]],
708
+ ... 'db1', level=2)
709
+ >>> pywt.iswtn(coeffs, 'db1')
710
+ array([[ 1., 2., 3., 4.],
711
+ [ 5., 6., 7., 8.],
712
+ [ 9., 10., 11., 12.],
713
+ [ 13., 14., 15., 16.]])
714
+
715
+ """
716
+
717
+ # key length matches the number of axes transformed
718
+ ndim_transform = max(len(key) for key in coeffs[-1].keys())
719
+ trim_approx = not isinstance(coeffs[0], dict)
720
+ cA = coeffs[0] if trim_approx else coeffs[0]['a'*ndim_transform]
721
+
722
+ if not _have_c99_complex and np.iscomplexobj(cA):
723
+ if trim_approx:
724
+ coeffs_real = [coeffs[0].real]
725
+ coeffs_imag = [coeffs[0].imag]
726
+ coeffs = coeffs[1:]
727
+ else:
728
+ coeffs_real = []
729
+ coeffs_imag = []
730
+ coeffs_real += [{k: v.real for k, v in c.items()} for c in coeffs]
731
+ coeffs_imag += [{k: v.imag for k, v in c.items()} for c in coeffs]
732
+ kwargs = dict(wavelet=wavelet, axes=axes, norm=norm)
733
+ y = iswtn(coeffs_real, **kwargs)
734
+ return y + 1j * iswtn(coeffs_imag, **kwargs)
735
+
736
+ if trim_approx:
737
+ coeffs = coeffs[1:]
738
+
739
+ # copy to avoid modification of input data
740
+ dt = _check_dtype(cA)
741
+ output = np.array(cA, dtype=dt, copy=True)
742
+ ndim = output.ndim
743
+
744
+ if axes is None:
745
+ axes = range(output.ndim)
746
+ axes = [a + ndim if a < 0 else a for a in axes]
747
+ if len(axes) != len(set(axes)):
748
+ raise ValueError("The axes passed to swtn must be unique.")
749
+ if ndim_transform != len(axes):
750
+ raise ValueError("The number of axes used in iswtn must match the "
751
+ "number of dimensions transformed in swtn.")
752
+
753
+ # num_levels, equivalent to the decomposition level, n
754
+ num_levels = len(coeffs)
755
+ wavelets = _wavelets_per_axis(wavelet, axes)
756
+ if norm:
757
+ wavelets = [_rescale_wavelet_filterbank(wav, np.sqrt(2))
758
+ for wav in wavelets]
759
+
760
+ # initialize various slice objects used in the loops below
761
+ # these will remain slice(None) only on axes that aren't transformed
762
+ indices = [slice(None), ]*ndim
763
+ even_indices = [slice(None), ]*ndim
764
+ odd_indices = [slice(None), ]*ndim
765
+ odd_even_slices = [slice(None), ]*ndim
766
+
767
+ for j in range(num_levels):
768
+ step_size = int(pow(2, num_levels-j-1))
769
+ last_index = step_size
770
+ if not trim_approx:
771
+ a = coeffs[j].pop('a'*ndim_transform) # will restore later
772
+ details = coeffs[j]
773
+ # make sure dtype matches the coarsest level approximation coefficients
774
+ common_dtype = np.result_type(*(
775
+ [dt, ] + [v.dtype for v in details.values()]))
776
+ if output.dtype != common_dtype:
777
+ output = output.astype(common_dtype)
778
+
779
+ # We assume all coefficient arrays are of equal size
780
+ shapes = [v.shape for k, v in details.items()]
781
+ if len(set(shapes)) != 1:
782
+ raise RuntimeError(
783
+ "Mismatch in shape of intermediate coefficient arrays")
784
+
785
+ # shape of a single coefficient array, excluding non-transformed axes
786
+ coeff_trans_shape = tuple([shapes[0][ax] for ax in axes])
787
+
788
+ # nested loop over all combinations of axis offsets at this level
789
+ for firsts in product(*([range(last_index), ]*ndim_transform)):
790
+ for first, sh, ax in zip(firsts, coeff_trans_shape, axes):
791
+ indices[ax] = slice(first, sh, step_size)
792
+ even_indices[ax] = slice(first, sh, 2*step_size)
793
+ odd_indices[ax] = slice(first+step_size, sh, 2*step_size)
794
+
795
+ # nested loop over all combinations of odd/even inidices
796
+ approx = output.copy()
797
+ output[tuple(indices)] = 0
798
+ ntransforms = 0
799
+ for odds in product(*([(0, 1), ]*ndim_transform)):
800
+ for o, ax in zip(odds, axes):
801
+ if o:
802
+ odd_even_slices[ax] = odd_indices[ax]
803
+ else:
804
+ odd_even_slices[ax] = even_indices[ax]
805
+ # extract the odd/even indices for all detail coefficients
806
+ details_slice = {}
807
+ for key, value in details.items():
808
+ details_slice[key] = value[tuple(odd_even_slices)]
809
+ details_slice['a'*ndim_transform] = approx[
810
+ tuple(odd_even_slices)]
811
+
812
+ # perform the inverse dwt on the selected indices,
813
+ # making sure to use periodic boundary conditions
814
+ x = idwtn(details_slice, wavelets, 'periodization', axes=axes)
815
+ for o, ax in zip(odds, axes):
816
+ # circular shift along any odd indexed axis
817
+ if o:
818
+ x = np.roll(x, 1, axis=ax)
819
+ output[tuple(indices)] += x
820
+ ntransforms += 1
821
+ output[tuple(indices)] /= ntransforms # normalize
822
+ if not trim_approx:
823
+ coeffs[j]['a'*ndim_transform] = a # restore approx coeffs to dict
824
+ return output