prasb commited on
Commit
3f3f9bd
·
verified ·
1 Parent(s): cae1119

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs/libpng16-52f22300.so.16.37.0 +3 -0
  3. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/INSTALLER +1 -0
  4. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE +6 -0
  5. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE.APACHE +202 -0
  6. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE.PSF +41 -0
  7. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/RECORD +189 -0
  8. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/REQUESTED +0 -0
  9. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/WHEEL +5 -0
  10. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/top_level.txt +3 -0
  11. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/INSTALLER +1 -0
  12. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/__init__.py +4 -0
  13. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/constants.py +4 -0
  14. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/errors.py +25 -0
  15. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/store.py +107 -0
  16. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/utils.py +38 -0
  17. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/version.py +2 -0
  18. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/LICENSE +21 -0
  19. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/METADATA +309 -0
  20. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/RECORD +31 -0
  21. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/REQUESTED +0 -0
  22. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/WHEEL +5 -0
  23. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/top_level.txt +1 -0
  24. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/__init__.py +51 -0
  25. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_error.py +30 -0
  26. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_soft.py +47 -0
  27. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_windows.py +65 -0
  28. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/version.py +16 -0
  29. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5t.cpython-38-x86_64-linux-gnu.so +3 -0
  30. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_dask.py +364 -0
  31. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_parallel_backends.py +610 -0
  32. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_store_backends.py +414 -0
  33. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/backports.py +78 -0
  34. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/disk.py +136 -0
  35. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/func_inspect.py +365 -0
  36. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/hashing.py +266 -0
  37. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/numpy_pickle_compat.py +243 -0
  38. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/numpy_pickle_utils.py +253 -0
  39. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/parallel.py +1074 -0
  40. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/pool.py +352 -0
  41. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/testing.py +77 -0
  42. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/__init__.py +92 -0
  43. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/conftest.py +51 -0
  44. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/masking.py +940 -0
  45. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/signal.py +870 -0
  46. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/version.py +183 -0
  47. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/INSTALLER +1 -0
  48. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/LICENSE +27 -0
  49. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/METADATA +335 -0
  50. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/RECORD +13 -0
.gitattributes CHANGED
@@ -297,3 +297,9 @@ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5d.cp
297
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5f.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
298
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wrapt/_wrappers.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
299
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
297
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5f.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
298
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/wrapt/_wrappers.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
299
  my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
300
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/scikit_learn.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
301
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5t.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
302
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/torchaudio/_torchaudio.so filter=lfs diff=lfs merge=lfs -text
303
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs/libpng16-52f22300.so.16.37.0 filter=lfs diff=lfs merge=lfs -text
304
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/scikit_image.libs/libgomp-a34b3233.so.1.0.0 filter=lfs diff=lfs merge=lfs -text
305
+ my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/scipy.libs/libquadmath-96973f99.so.0.0.0 filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/Pillow.libs/libpng16-52f22300.so.16.37.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cc800050906e9a3489fe60fbf57f8dca649c9627f87696ce3369f32c9457d56
3
+ size 277816
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ conda
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ This software is made available under the terms of *either* of the licenses
2
+ found in LICENSE.APACHE or LICENSE.BSD. Contributions to cryptography are made
3
+ under the terms of *both* these licenses.
4
+
5
+ The code used in the OS random engine is derived from CPython, and is licensed
6
+ under the terms of the PSF License Agreement.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE.APACHE ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Apache License
3
+ Version 2.0, January 2004
4
+ https://www.apache.org/licenses/
5
+
6
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7
+
8
+ 1. Definitions.
9
+
10
+ "License" shall mean the terms and conditions for use, reproduction,
11
+ and distribution as defined by Sections 1 through 9 of this document.
12
+
13
+ "Licensor" shall mean the copyright owner or entity authorized by
14
+ the copyright owner that is granting the License.
15
+
16
+ "Legal Entity" shall mean the union of the acting entity and all
17
+ other entities that control, are controlled by, or are under common
18
+ control with that entity. For the purposes of this definition,
19
+ "control" means (i) the power, direct or indirect, to cause the
20
+ direction or management of such entity, whether by contract or
21
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
22
+ outstanding shares, or (iii) beneficial ownership of such entity.
23
+
24
+ "You" (or "Your") shall mean an individual or Legal Entity
25
+ exercising permissions granted by this License.
26
+
27
+ "Source" form shall mean the preferred form for making modifications,
28
+ including but not limited to software source code, documentation
29
+ source, and configuration files.
30
+
31
+ "Object" form shall mean any form resulting from mechanical
32
+ transformation or translation of a Source form, including but
33
+ not limited to compiled object code, generated documentation,
34
+ and conversions to other media types.
35
+
36
+ "Work" shall mean the work of authorship, whether in Source or
37
+ Object form, made available under the License, as indicated by a
38
+ copyright notice that is included in or attached to the work
39
+ (an example is provided in the Appendix below).
40
+
41
+ "Derivative Works" shall mean any work, whether in Source or Object
42
+ form, that is based on (or derived from) the Work and for which the
43
+ editorial revisions, annotations, elaborations, or other modifications
44
+ represent, as a whole, an original work of authorship. For the purposes
45
+ of this License, Derivative Works shall not include works that remain
46
+ separable from, or merely link (or bind by name) to the interfaces of,
47
+ the Work and Derivative Works thereof.
48
+
49
+ "Contribution" shall mean any work of authorship, including
50
+ the original version of the Work and any modifications or additions
51
+ to that Work or Derivative Works thereof, that is intentionally
52
+ submitted to Licensor for inclusion in the Work by the copyright owner
53
+ or by an individual or Legal Entity authorized to submit on behalf of
54
+ the copyright owner. For the purposes of this definition, "submitted"
55
+ means any form of electronic, verbal, or written communication sent
56
+ to the Licensor or its representatives, including but not limited to
57
+ communication on electronic mailing lists, source code control systems,
58
+ and issue tracking systems that are managed by, or on behalf of, the
59
+ Licensor for the purpose of discussing and improving the Work, but
60
+ excluding communication that is conspicuously marked or otherwise
61
+ designated in writing by the copyright owner as "Not a Contribution."
62
+
63
+ "Contributor" shall mean Licensor and any individual or Legal Entity
64
+ on behalf of whom a Contribution has been received by Licensor and
65
+ subsequently incorporated within the Work.
66
+
67
+ 2. Grant of Copyright License. Subject to the terms and conditions of
68
+ this License, each Contributor hereby grants to You a perpetual,
69
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70
+ copyright license to reproduce, prepare Derivative Works of,
71
+ publicly display, publicly perform, sublicense, and distribute the
72
+ Work and such Derivative Works in Source or Object form.
73
+
74
+ 3. Grant of Patent License. Subject to the terms and conditions of
75
+ this License, each Contributor hereby grants to You a perpetual,
76
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77
+ (except as stated in this section) patent license to make, have made,
78
+ use, offer to sell, sell, import, and otherwise transfer the Work,
79
+ where such license applies only to those patent claims licensable
80
+ by such Contributor that are necessarily infringed by their
81
+ Contribution(s) alone or by combination of their Contribution(s)
82
+ with the Work to which such Contribution(s) was submitted. If You
83
+ institute patent litigation against any entity (including a
84
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
85
+ or a Contribution incorporated within the Work constitutes direct
86
+ or contributory patent infringement, then any patent licenses
87
+ granted to You under this License for that Work shall terminate
88
+ as of the date such litigation is filed.
89
+
90
+ 4. Redistribution. You may reproduce and distribute copies of the
91
+ Work or Derivative Works thereof in any medium, with or without
92
+ modifications, and in Source or Object form, provided that You
93
+ meet the following conditions:
94
+
95
+ (a) You must give any other recipients of the Work or
96
+ Derivative Works a copy of this License; and
97
+
98
+ (b) You must cause any modified files to carry prominent notices
99
+ stating that You changed the files; and
100
+
101
+ (c) You must retain, in the Source form of any Derivative Works
102
+ that You distribute, all copyright, patent, trademark, and
103
+ attribution notices from the Source form of the Work,
104
+ excluding those notices that do not pertain to any part of
105
+ the Derivative Works; and
106
+
107
+ (d) If the Work includes a "NOTICE" text file as part of its
108
+ distribution, then any Derivative Works that You distribute must
109
+ include a readable copy of the attribution notices contained
110
+ within such NOTICE file, excluding those notices that do not
111
+ pertain to any part of the Derivative Works, in at least one
112
+ of the following places: within a NOTICE text file distributed
113
+ as part of the Derivative Works; within the Source form or
114
+ documentation, if provided along with the Derivative Works; or,
115
+ within a display generated by the Derivative Works, if and
116
+ wherever such third-party notices normally appear. The contents
117
+ of the NOTICE file are for informational purposes only and
118
+ do not modify the License. You may add Your own attribution
119
+ notices within Derivative Works that You distribute, alongside
120
+ or as an addendum to the NOTICE text from the Work, provided
121
+ that such additional attribution notices cannot be construed
122
+ as modifying the License.
123
+
124
+ You may add Your own copyright statement to Your modifications and
125
+ may provide additional or different license terms and conditions
126
+ for use, reproduction, or distribution of Your modifications, or
127
+ for any such Derivative Works as a whole, provided Your use,
128
+ reproduction, and distribution of the Work otherwise complies with
129
+ the conditions stated in this License.
130
+
131
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
132
+ any Contribution intentionally submitted for inclusion in the Work
133
+ by You to the Licensor shall be under the terms and conditions of
134
+ this License, without any additional terms or conditions.
135
+ Notwithstanding the above, nothing herein shall supersede or modify
136
+ the terms of any separate license agreement you may have executed
137
+ with Licensor regarding such Contributions.
138
+
139
+ 6. Trademarks. This License does not grant permission to use the trade
140
+ names, trademarks, service marks, or product names of the Licensor,
141
+ except as required for reasonable and customary use in describing the
142
+ origin of the Work and reproducing the content of the NOTICE file.
143
+
144
+ 7. Disclaimer of Warranty. Unless required by applicable law or
145
+ agreed to in writing, Licensor provides the Work (and each
146
+ Contributor provides its Contributions) on an "AS IS" BASIS,
147
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148
+ implied, including, without limitation, any warranties or conditions
149
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150
+ PARTICULAR PURPOSE. You are solely responsible for determining the
151
+ appropriateness of using or redistributing the Work and assume any
152
+ risks associated with Your exercise of permissions under this License.
153
+
154
+ 8. Limitation of Liability. In no event and under no legal theory,
155
+ whether in tort (including negligence), contract, or otherwise,
156
+ unless required by applicable law (such as deliberate and grossly
157
+ negligent acts) or agreed to in writing, shall any Contributor be
158
+ liable to You for damages, including any direct, indirect, special,
159
+ incidental, or consequential damages of any character arising as a
160
+ result of this License or out of the use or inability to use the
161
+ Work (including but not limited to damages for loss of goodwill,
162
+ work stoppage, computer failure or malfunction, or any and all
163
+ other commercial damages or losses), even if such Contributor
164
+ has been advised of the possibility of such damages.
165
+
166
+ 9. Accepting Warranty or Additional Liability. While redistributing
167
+ the Work or Derivative Works thereof, You may choose to offer,
168
+ and charge a fee for, acceptance of support, warranty, indemnity,
169
+ or other liability obligations and/or rights consistent with this
170
+ License. However, in accepting such obligations, You may act only
171
+ on Your own behalf and on Your sole responsibility, not on behalf
172
+ of any other Contributor, and only if You agree to indemnify,
173
+ defend, and hold each Contributor harmless for any liability
174
+ incurred by, or claims asserted against, such Contributor by reason
175
+ of your accepting any such warranty or additional liability.
176
+
177
+ END OF TERMS AND CONDITIONS
178
+
179
+ APPENDIX: How to apply the Apache License to your work.
180
+
181
+ To apply the Apache License to your work, attach the following
182
+ boilerplate notice, with the fields enclosed by brackets "[]"
183
+ replaced with your own identifying information. (Don't include
184
+ the brackets!) The text should be enclosed in the appropriate
185
+ comment syntax for the file format. We also recommend that a
186
+ file or class name and description of purpose be included on the
187
+ same "printed page" as the copyright notice for easier
188
+ identification within third-party archives.
189
+
190
+ Copyright [yyyy] [name of copyright owner]
191
+
192
+ Licensed under the Apache License, Version 2.0 (the "License");
193
+ you may not use this file except in compliance with the License.
194
+ You may obtain a copy of the License at
195
+
196
+ https://www.apache.org/licenses/LICENSE-2.0
197
+
198
+ Unless required by applicable law or agreed to in writing, software
199
+ distributed under the License is distributed on an "AS IS" BASIS,
200
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201
+ See the License for the specific language governing permissions and
202
+ limitations under the License.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/LICENSE.PSF ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
2
+ the Individual or Organization ("Licensee") accessing and otherwise using Python
3
+ 2.7.12 software in source or binary form and its associated documentation.
4
+
5
+ 2. Subject to the terms and conditions of this License Agreement, PSF hereby
6
+ grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
7
+ analyze, test, perform and/or display publicly, prepare derivative works,
8
+ distribute, and otherwise use Python 2.7.12 alone or in any derivative
9
+ version, provided, however, that PSF's License Agreement and PSF's notice of
10
+ copyright, i.e., "Copyright © 2001-2016 Python Software Foundation; All Rights
11
+ Reserved" are retained in Python 2.7.12 alone or in any derivative version
12
+ prepared by Licensee.
13
+
14
+ 3. In the event Licensee prepares a derivative work that is based on or
15
+ incorporates Python 2.7.12 or any part thereof, and wants to make the
16
+ derivative work available to others as provided herein, then Licensee hereby
17
+ agrees to include in any such work a brief summary of the changes made to Python
18
+ 2.7.12.
19
+
20
+ 4. PSF is making Python 2.7.12 available to Licensee on an "AS IS" basis.
21
+ PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
22
+ EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
23
+ WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
24
+ USE OF PYTHON 2.7.12 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
25
+
26
+ 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.12
27
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
28
+ MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.12, OR ANY DERIVATIVE
29
+ THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
30
+
31
+ 6. This License Agreement will automatically terminate upon a material breach of
32
+ its terms and conditions.
33
+
34
+ 7. Nothing in this License Agreement shall be deemed to create any relationship
35
+ of agency, partnership, or joint venture between PSF and Licensee. This License
36
+ Agreement does not grant permission to use PSF trademarks or trade name in a
37
+ trademark sense to endorse or promote products or services of Licensee, or any
38
+ third party.
39
+
40
+ 8. By copying, installing or otherwise using Python 2.7.12, Licensee agrees
41
+ to be bound by the terms and conditions of this License Agreement.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/RECORD ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cryptography-3.4.8.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ cryptography-3.4.8.dist-info/LICENSE,sha256=Q9rSzHUqtyHNmp827OcPtTq3cTVR8tPYaU2OjFoG1uI,323
3
+ cryptography-3.4.8.dist-info/LICENSE.APACHE,sha256=qsc7MUj20dcRHbyjIJn2jSbGRMaBOuHk8F9leaomY_4,11360
4
+ cryptography-3.4.8.dist-info/LICENSE.BSD,sha256=YCxMdILeZHndLpeTzaJ15eY9dz2s0eymiSMqtwCPtPs,1532
5
+ cryptography-3.4.8.dist-info/LICENSE.PSF,sha256=aT7ApmKzn5laTyUrA6YiKUVHDBtvEsoCkY5O_g32S58,2415
6
+ cryptography-3.4.8.dist-info/METADATA,sha256=YmATOFBmnPoAnlGZIf8XwOw2GGEcUv9QGgqgusVOqNs,5171
7
+ cryptography-3.4.8.dist-info/RECORD,,
8
+ cryptography-3.4.8.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ cryptography-3.4.8.dist-info/WHEEL,sha256=lZtnO0mZxqr35T145kHCmb-YnzMXkVWJhJ6jnbb7-XE,103
10
+ cryptography-3.4.8.dist-info/direct_url.json,sha256=id5KSvB64YamuPh3QnIux4QOgiFnFq41ltIFKfgnthc,108
11
+ cryptography-3.4.8.dist-info/top_level.txt,sha256=rR2wh6A6juD02TBZNJqqonh8x9UP9Sa5Z9Hl1pCPCiM,31
12
+ cryptography/__about__.py,sha256=Gma4uMyERDaqXMloHsN56Lo-XunkiH9-joZKZJPG5a8,805
13
+ cryptography/__init__.py,sha256=qZ9_96xJ8au-AKkdk2Kq60RKN7zGaim_8YY_rAy3_QY,511
14
+ cryptography/__pycache__/__about__.cpython-38.pyc,,
15
+ cryptography/__pycache__/__init__.cpython-38.pyc,,
16
+ cryptography/__pycache__/exceptions.cpython-38.pyc,,
17
+ cryptography/__pycache__/fernet.cpython-38.pyc,,
18
+ cryptography/__pycache__/utils.cpython-38.pyc,,
19
+ cryptography/exceptions.py,sha256=W25jw80RaAL0NOppZt48x1LSmgqaZqAObTtUExWCh3k,1194
20
+ cryptography/fernet.py,sha256=Kn_d3z5YFnFP2t9pbX9wpsm7nvlrY7oKO3XLthdstmg,6538
21
+ cryptography/hazmat/__init__.py,sha256=OYlvgprzULzZlsf3yYTsd6VUVyQmpsbHjgJdNnsyRwE,418
22
+ cryptography/hazmat/__pycache__/__init__.cpython-38.pyc,,
23
+ cryptography/hazmat/__pycache__/_der.cpython-38.pyc,,
24
+ cryptography/hazmat/__pycache__/_oid.cpython-38.pyc,,
25
+ cryptography/hazmat/__pycache__/_types.cpython-38.pyc,,
26
+ cryptography/hazmat/_der.py,sha256=1Kf4nwKRUt56KpG3a9Idgn0YFeUcnYecoN60p5oZRcA,5221
27
+ cryptography/hazmat/_oid.py,sha256=GVsyziASzIVcnAP_C7dx4czeI_VIccYu9GNV03rWjI0,2372
28
+ cryptography/hazmat/_types.py,sha256=TWd5Q_pS_iDOoUdP3MrYbNbPwwM2hSdONh7230eByto,646
29
+ cryptography/hazmat/backends/__init__.py,sha256=StVq0WWDbGTx0nsqMxVclREpGYp4j467m-k87xuDQRY,576
30
+ cryptography/hazmat/backends/__pycache__/__init__.cpython-38.pyc,,
31
+ cryptography/hazmat/backends/__pycache__/interfaces.cpython-38.pyc,,
32
+ cryptography/hazmat/backends/interfaces.py,sha256=7_PB6ZpxcRhPSXrZcseOy1u9nQcdb6jXpgf_FDliPQU,10472
33
+ cryptography/hazmat/backends/openssl/__init__.py,sha256=7rpz1Z3eV9vZy_d2iLrwC8Oz0vEruDFrjJlc6W2ZDXA,271
34
+ cryptography/hazmat/backends/openssl/__pycache__/__init__.cpython-38.pyc,,
35
+ cryptography/hazmat/backends/openssl/__pycache__/aead.cpython-38.pyc,,
36
+ cryptography/hazmat/backends/openssl/__pycache__/backend.cpython-38.pyc,,
37
+ cryptography/hazmat/backends/openssl/__pycache__/ciphers.cpython-38.pyc,,
38
+ cryptography/hazmat/backends/openssl/__pycache__/cmac.cpython-38.pyc,,
39
+ cryptography/hazmat/backends/openssl/__pycache__/decode_asn1.cpython-38.pyc,,
40
+ cryptography/hazmat/backends/openssl/__pycache__/dh.cpython-38.pyc,,
41
+ cryptography/hazmat/backends/openssl/__pycache__/dsa.cpython-38.pyc,,
42
+ cryptography/hazmat/backends/openssl/__pycache__/ec.cpython-38.pyc,,
43
+ cryptography/hazmat/backends/openssl/__pycache__/ed25519.cpython-38.pyc,,
44
+ cryptography/hazmat/backends/openssl/__pycache__/ed448.cpython-38.pyc,,
45
+ cryptography/hazmat/backends/openssl/__pycache__/encode_asn1.cpython-38.pyc,,
46
+ cryptography/hazmat/backends/openssl/__pycache__/hashes.cpython-38.pyc,,
47
+ cryptography/hazmat/backends/openssl/__pycache__/hmac.cpython-38.pyc,,
48
+ cryptography/hazmat/backends/openssl/__pycache__/ocsp.cpython-38.pyc,,
49
+ cryptography/hazmat/backends/openssl/__pycache__/poly1305.cpython-38.pyc,,
50
+ cryptography/hazmat/backends/openssl/__pycache__/rsa.cpython-38.pyc,,
51
+ cryptography/hazmat/backends/openssl/__pycache__/utils.cpython-38.pyc,,
52
+ cryptography/hazmat/backends/openssl/__pycache__/x25519.cpython-38.pyc,,
53
+ cryptography/hazmat/backends/openssl/__pycache__/x448.cpython-38.pyc,,
54
+ cryptography/hazmat/backends/openssl/__pycache__/x509.cpython-38.pyc,,
55
+ cryptography/hazmat/backends/openssl/aead.py,sha256=zt8ZQ-JethHblWEfwAnB5-09JIL9K8qU1NXwPTjeVYA,5700
56
+ cryptography/hazmat/backends/openssl/backend.py,sha256=HC-d83ZUru3Z11Q7UnjFuko8Jp-ZEHCjzkpocJEfctM,105287
57
+ cryptography/hazmat/backends/openssl/ciphers.py,sha256=fUn5DLrbhI_upLKMvU0aX2_An1dOX8T14PgdZXZr6hU,8611
58
+ cryptography/hazmat/backends/openssl/cmac.py,sha256=KXcwF1XlY0Ew6sTBqPj0I1vr62dfMwCjeV3qBosIw8s,2846
59
+ cryptography/hazmat/backends/openssl/decode_asn1.py,sha256=9s52X0DBtY4zSM0-nPze7A7nho3aM5nCbRa5T4bCvEU,32254
60
+ cryptography/hazmat/backends/openssl/dh.py,sha256=cVPA_PKT4BlT4OvHiJm5ZIDmxNeXBnWy2My4uz8wYpo,10565
61
+ cryptography/hazmat/backends/openssl/dsa.py,sha256=eyWzcpZggJuHLD4U3F9-neLyUqIoEN0MAiSwPIcEw2I,10684
62
+ cryptography/hazmat/backends/openssl/ec.py,sha256=AOKJntDH0-vRCH_BquHiC8RpkM4ENFv509IX7Myuong,13371
63
+ cryptography/hazmat/backends/openssl/ed25519.py,sha256=bSlMfJedRoyzZXoJeaehj_0H_j6Ye5doQHgnib602-Q,5789
64
+ cryptography/hazmat/backends/openssl/ed448.py,sha256=dpJf1zt_o8vfVcXYi_PD8d9H-jBbYEp-d6ZIYDKlC1s,5743
65
+ cryptography/hazmat/backends/openssl/encode_asn1.py,sha256=aiTahXPWVoG-e_0a8aSlE-OIosoT605P_SKZOpB-mJM,23988
66
+ cryptography/hazmat/backends/openssl/hashes.py,sha256=_XZc3glydVD88e0qoHqvOuQ_0xfl2sq0ywfZF4dH91s,3090
67
+ cryptography/hazmat/backends/openssl/hmac.py,sha256=ATz-rzSjGiRjL9_I5WJRO3R7QCiujd0izNqYrqPAHsA,2933
68
+ cryptography/hazmat/backends/openssl/ocsp.py,sha256=pV4Js2tyOcZPdeeNjFl835COi200yRTt-0PUx9MRGlY,14617
69
+ cryptography/hazmat/backends/openssl/poly1305.py,sha256=0hJDAb4pl9dJ_2xgt-XkNfyFA6U_IFXCe5jzOg7gkG0,2327
70
+ cryptography/hazmat/backends/openssl/rsa.py,sha256=3GaXjh3j2LwK4idwSHfaqxVMhhDPKftw8CerJDyRLmQ,20919
71
+ cryptography/hazmat/backends/openssl/utils.py,sha256=k3i_ARXsPvGTEtUUbnWkg9CkiJgPP4Y0VTTLtOEzEmU,2283
72
+ cryptography/hazmat/backends/openssl/x25519.py,sha256=kCnWzuchrJn1Nne4zeotKvlkMty9p3VuM8y1EWo70vQ,4622
73
+ cryptography/hazmat/backends/openssl/x448.py,sha256=8OKYMNXDR7UlViU3sNIH5qmLMGP7J-F3OeEaRK0aots,4141
74
+ cryptography/hazmat/backends/openssl/x509.py,sha256=mbiJfQrTu_G3jttY_FXRZvqZ8wkjiHcMiPsPlwVHyOg,22831
75
+ cryptography/hazmat/bindings/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
76
+ cryptography/hazmat/bindings/__pycache__/__init__.cpython-38.pyc,,
77
+ cryptography/hazmat/bindings/_openssl.abi3.so,sha256=PnatPMQXvFl3qIjIdZuEUvpwbX7xKE8920rex6Y2u6g,915208
78
+ cryptography/hazmat/bindings/_padding.abi3.so,sha256=onQk-dheo-36oyeufSX-XN2G_z0sbtrz-_qT5dd2iLk,16568
79
+ cryptography/hazmat/bindings/_rust.abi3.so,sha256=PdNgtLAf8-ZO9SLwuCwvQqoRh-6l4iiJOqQybltT5KU,1741984
80
+ cryptography/hazmat/bindings/openssl/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
81
+ cryptography/hazmat/bindings/openssl/__pycache__/__init__.cpython-38.pyc,,
82
+ cryptography/hazmat/bindings/openssl/__pycache__/_conditional.cpython-38.pyc,,
83
+ cryptography/hazmat/bindings/openssl/__pycache__/binding.cpython-38.pyc,,
84
+ cryptography/hazmat/bindings/openssl/_conditional.py,sha256=2yZw_Ekya_GKKWUMzUbj3yYrLFZQNproXx1N4HL7TbU,8251
85
+ cryptography/hazmat/bindings/openssl/binding.py,sha256=mIwnL3fICywOLt-iXZIvw2ijSaOIvdYs1Lwk2FUcxYs,5812
86
+ cryptography/hazmat/primitives/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
87
+ cryptography/hazmat/primitives/__pycache__/__init__.cpython-38.pyc,,
88
+ cryptography/hazmat/primitives/__pycache__/_asymmetric.cpython-38.pyc,,
89
+ cryptography/hazmat/primitives/__pycache__/_cipheralgorithm.cpython-38.pyc,,
90
+ cryptography/hazmat/primitives/__pycache__/_serialization.cpython-38.pyc,,
91
+ cryptography/hazmat/primitives/__pycache__/cmac.cpython-38.pyc,,
92
+ cryptography/hazmat/primitives/__pycache__/constant_time.cpython-38.pyc,,
93
+ cryptography/hazmat/primitives/__pycache__/hashes.cpython-38.pyc,,
94
+ cryptography/hazmat/primitives/__pycache__/hmac.cpython-38.pyc,,
95
+ cryptography/hazmat/primitives/__pycache__/keywrap.cpython-38.pyc,,
96
+ cryptography/hazmat/primitives/__pycache__/padding.cpython-38.pyc,,
97
+ cryptography/hazmat/primitives/__pycache__/poly1305.cpython-38.pyc,,
98
+ cryptography/hazmat/primitives/_asymmetric.py,sha256=nVJwmxkakirAXfFp410pC4kY_CinzN5FSJwhEn2IE34,485
99
+ cryptography/hazmat/primitives/_cipheralgorithm.py,sha256=sV8-SjhhY4WtHsaLI7e2x4o2cYAAqP8YWBjhC6k1u10,1000
100
+ cryptography/hazmat/primitives/_serialization.py,sha256=nl1g48RG17TWhegK8WKlBlXquMae_lmUSzgZnEqdwbU,1307
101
+ cryptography/hazmat/primitives/asymmetric/__init__.py,sha256=DwsPrun2J00dimo7mq73llEb-O-N4qaOwEx5SwQbleI,909
102
+ cryptography/hazmat/primitives/asymmetric/__pycache__/__init__.cpython-38.pyc,,
103
+ cryptography/hazmat/primitives/asymmetric/__pycache__/dh.cpython-38.pyc,,
104
+ cryptography/hazmat/primitives/asymmetric/__pycache__/dsa.cpython-38.pyc,,
105
+ cryptography/hazmat/primitives/asymmetric/__pycache__/ec.cpython-38.pyc,,
106
+ cryptography/hazmat/primitives/asymmetric/__pycache__/ed25519.cpython-38.pyc,,
107
+ cryptography/hazmat/primitives/asymmetric/__pycache__/ed448.cpython-38.pyc,,
108
+ cryptography/hazmat/primitives/asymmetric/__pycache__/padding.cpython-38.pyc,,
109
+ cryptography/hazmat/primitives/asymmetric/__pycache__/rsa.cpython-38.pyc,,
110
+ cryptography/hazmat/primitives/asymmetric/__pycache__/utils.cpython-38.pyc,,
111
+ cryptography/hazmat/primitives/asymmetric/__pycache__/x25519.cpython-38.pyc,,
112
+ cryptography/hazmat/primitives/asymmetric/__pycache__/x448.cpython-38.pyc,,
113
+ cryptography/hazmat/primitives/asymmetric/dh.py,sha256=dyNhMSOqPNPVuVtvpUNVwPiPHkeqFrKy6lYSPTn4VqI,6303
114
+ cryptography/hazmat/primitives/asymmetric/dsa.py,sha256=TdeZwnJq8ODqcoreu4jr1LFoFYtxA_z_6mhF8dYc5Yg,8116
115
+ cryptography/hazmat/primitives/asymmetric/ec.py,sha256=1e0IpF8SbzrKPbPD4BYTazOaVrVCXMd406x5hzlB3_0,14613
116
+ cryptography/hazmat/primitives/asymmetric/ed25519.py,sha256=Q42f1Cpnlt9UTSfh29T8xcdEgiNaiWr2Wic3sL_eJnk,2719
117
+ cryptography/hazmat/primitives/asymmetric/ed448.py,sha256=SmBsd5pf3RaJoVxETIAcXC_DB6YGsrJUOrWE1BPx3T0,2630
118
+ cryptography/hazmat/primitives/asymmetric/padding.py,sha256=ETdsTtHWSER0ZmTWoCVnWPkG9wvBIxGtal-e6xxl0i4,2115
119
+ cryptography/hazmat/primitives/asymmetric/rsa.py,sha256=Ekxr0B_O2IUre0kw_oIiLJNtx46ADqC6caypjI6d_0w,12004
120
+ cryptography/hazmat/primitives/asymmetric/utils.py,sha256=prIqN-UBc7RfOzFMgM8ON2s3DX8MrXeUlUH1LnmG8gg,1225
121
+ cryptography/hazmat/primitives/asymmetric/x25519.py,sha256=-nbaGlgT1sufO9Ic-urwKDql8Da0U3GL6hZJIMqHgVc,2588
122
+ cryptography/hazmat/primitives/asymmetric/x448.py,sha256=38mR8pqTBFWz5Emv9cQGlqtv_Qg37Bmrla0kRc2HmrU,2549
123
+ cryptography/hazmat/primitives/ciphers/__init__.py,sha256=njx_RoatYaxZD0rYhYGi84WQnTZkMSpK67UfWIqkQpE,582
124
+ cryptography/hazmat/primitives/ciphers/__pycache__/__init__.cpython-38.pyc,,
125
+ cryptography/hazmat/primitives/ciphers/__pycache__/aead.cpython-38.pyc,,
126
+ cryptography/hazmat/primitives/ciphers/__pycache__/algorithms.cpython-38.pyc,,
127
+ cryptography/hazmat/primitives/ciphers/__pycache__/base.cpython-38.pyc,,
128
+ cryptography/hazmat/primitives/ciphers/__pycache__/modes.cpython-38.pyc,,
129
+ cryptography/hazmat/primitives/ciphers/aead.py,sha256=eKzVH2mf-5aFSaBOG9JnJAAd7XBnf9w4BH2Uu2ZT01w,6833
130
+ cryptography/hazmat/primitives/ciphers/algorithms.py,sha256=EEJCTrUCe8iHN2O1f_bwR2UqhOemhi53-34WsQ6DddI,3829
131
+ cryptography/hazmat/primitives/ciphers/base.py,sha256=w8_AWJwX1PrWpvjeB-_RF3iobalR3Hu3HIMDOMr92c8,7164
132
+ cryptography/hazmat/primitives/ciphers/modes.py,sha256=mOnOgXyoD0N9NsSOkZvA8qMA3V5O7HubVwYiWVJvRFs,6549
133
+ cryptography/hazmat/primitives/cmac.py,sha256=Kkzk8VQHe-_cYeVab24S4ODMWJOZkC4bLWLvCoMWyvQ,2158
134
+ cryptography/hazmat/primitives/constant_time.py,sha256=6bkW00QjhKusdgsQbexXhMlGX0XRN59XNmxWS2W38NA,387
135
+ cryptography/hazmat/primitives/hashes.py,sha256=cLNJcKKsI8E6ZhENKkppsJ_8S6W97y0tHzXa-ABBhtY,6051
136
+ cryptography/hazmat/primitives/hmac.py,sha256=rhrLt6LwlzbIvnqpmOQVT6L_4Xd9xBsUBunPCkHcvWs,2332
137
+ cryptography/hazmat/primitives/kdf/__init__.py,sha256=DcZhzfLG8d8IYBH771lGTVU5S87OQDpu3nrfOwZnsmA,715
138
+ cryptography/hazmat/primitives/kdf/__pycache__/__init__.cpython-38.pyc,,
139
+ cryptography/hazmat/primitives/kdf/__pycache__/concatkdf.cpython-38.pyc,,
140
+ cryptography/hazmat/primitives/kdf/__pycache__/hkdf.cpython-38.pyc,,
141
+ cryptography/hazmat/primitives/kdf/__pycache__/kbkdf.cpython-38.pyc,,
142
+ cryptography/hazmat/primitives/kdf/__pycache__/pbkdf2.cpython-38.pyc,,
143
+ cryptography/hazmat/primitives/kdf/__pycache__/scrypt.cpython-38.pyc,,
144
+ cryptography/hazmat/primitives/kdf/__pycache__/x963kdf.cpython-38.pyc,,
145
+ cryptography/hazmat/primitives/kdf/concatkdf.py,sha256=F9wepne-IRmhTZ9J4H_XLDI0Rl8LccY6wvhVA0jQ4Tc,4576
146
+ cryptography/hazmat/primitives/kdf/hkdf.py,sha256=doR70wjOcA56hxhhQtV2M-ekajjjr5hoT5F8KMxoZdo,3807
147
+ cryptography/hazmat/primitives/kdf/kbkdf.py,sha256=teuWbRvCZShWiRnv0eg-sXrxm-g7Ss02Ulb3vVbzPvc,5195
148
+ cryptography/hazmat/primitives/kdf/pbkdf2.py,sha256=4HaLcppspYe8od6vur0E408qYgQPjJKtI9kDrWesIdo,2261
149
+ cryptography/hazmat/primitives/kdf/scrypt.py,sha256=vCMYGRp-Q--9DxiDQHbkVVRXkhrQTR0qkC0LriV6Hy8,2248
150
+ cryptography/hazmat/primitives/kdf/x963kdf.py,sha256=N5-2KOA2Z-7kAxjhhU5quNcRpmThyQC5dhU-Cw95jWk,2458
151
+ cryptography/hazmat/primitives/keywrap.py,sha256=ibpVZ19OGcoEVrSE7cizdoMDdRDaqcATeVRK5_4MCO4,5927
152
+ cryptography/hazmat/primitives/padding.py,sha256=PYlgTNHZUYROnQZ1oeeqKm1WyzkqLlwIpRUgdASHOG8,6193
153
+ cryptography/hazmat/primitives/poly1305.py,sha256=_Dtv6oCMn94rAhQ6pjie9mO_MiDLVL5It3Z5sdpCU3c,1711
154
+ cryptography/hazmat/primitives/serialization/__init__.py,sha256=RALEthF7wRjlMyTvSq09XmKQey74tsSdDCCsDaD6yQU,1129
155
+ cryptography/hazmat/primitives/serialization/__pycache__/__init__.cpython-38.pyc,,
156
+ cryptography/hazmat/primitives/serialization/__pycache__/base.cpython-38.pyc,,
157
+ cryptography/hazmat/primitives/serialization/__pycache__/pkcs12.cpython-38.pyc,,
158
+ cryptography/hazmat/primitives/serialization/__pycache__/pkcs7.cpython-38.pyc,,
159
+ cryptography/hazmat/primitives/serialization/__pycache__/ssh.cpython-38.pyc,,
160
+ cryptography/hazmat/primitives/serialization/base.py,sha256=OYqk2UnIR5IAKP1QRNifhoQw-HX3etcWudn3W2JVIyg,1440
161
+ cryptography/hazmat/primitives/serialization/pkcs12.py,sha256=JuWr5Vqz6zEpjh3j7ME1SCk3TFDNhONjQds_Se7XpFg,2270
162
+ cryptography/hazmat/primitives/serialization/pkcs7.py,sha256=CsmnGEbtLKm2o6D7h_a-EvHQOfwlHxrV96VkjnrNX7s,5223
163
+ cryptography/hazmat/primitives/serialization/ssh.py,sha256=doX0irj_Q1wd1N_JU-Xic_5zUkMH_zZKcQUUOB-axGk,22293
164
+ cryptography/hazmat/primitives/twofactor/__init__.py,sha256=ZHo4zwWidFP2RWFl8luiNuYkVMZPghzx54izPNSCtD4,222
165
+ cryptography/hazmat/primitives/twofactor/__pycache__/__init__.cpython-38.pyc,,
166
+ cryptography/hazmat/primitives/twofactor/__pycache__/hotp.cpython-38.pyc,,
167
+ cryptography/hazmat/primitives/twofactor/__pycache__/totp.cpython-38.pyc,,
168
+ cryptography/hazmat/primitives/twofactor/__pycache__/utils.cpython-38.pyc,,
169
+ cryptography/hazmat/primitives/twofactor/hotp.py,sha256=JXph-N0S8CDM-laRoV_G-Welhn7PvcpgXTxRbp_yEjk,2826
170
+ cryptography/hazmat/primitives/twofactor/totp.py,sha256=2GTFsdUdA585-N_sqfPhlBBWDY-ExaH1HKH1p3XPWmk,1912
171
+ cryptography/hazmat/primitives/twofactor/utils.py,sha256=8TG5oyaz8CxHCXqqh26iAny9w_W1e9SgVdCZaeEzOwU,982
172
+ cryptography/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
173
+ cryptography/utils.py,sha256=dyYUz2jr1tTsYQ3SaX3_cBYu720kopdatNy_83L1Mkc,4861
174
+ cryptography/x509/__init__.py,sha256=4_Xsv7yVMCGbpIbSgc4SPxDX-3Mn83gN07Us1PAM_eA,7634
175
+ cryptography/x509/__pycache__/__init__.cpython-38.pyc,,
176
+ cryptography/x509/__pycache__/base.cpython-38.pyc,,
177
+ cryptography/x509/__pycache__/certificate_transparency.cpython-38.pyc,,
178
+ cryptography/x509/__pycache__/extensions.cpython-38.pyc,,
179
+ cryptography/x509/__pycache__/general_name.cpython-38.pyc,,
180
+ cryptography/x509/__pycache__/name.cpython-38.pyc,,
181
+ cryptography/x509/__pycache__/ocsp.cpython-38.pyc,,
182
+ cryptography/x509/__pycache__/oid.cpython-38.pyc,,
183
+ cryptography/x509/base.py,sha256=duSe4bIuBiJ5g2NC8-VSxDfqHZ0CEEcXZKhcBGq-eeA,28193
184
+ cryptography/x509/certificate_transparency.py,sha256=rzJvxd1FVfc5gOjUT-T2VF5vcOC597UrrI_5JJwZprI,979
185
+ cryptography/x509/extensions.py,sha256=M-n_8gEjO5_03ufGHoK_6w8YSSiNyWvHUJ5Kgq5zoN4,54019
186
+ cryptography/x509/general_name.py,sha256=5dld2ktZnCEg3l14UyKk6DSlzFHXlc6WxW5J8R8Mk-Q,8161
187
+ cryptography/x509/name.py,sha256=PpRua5nWFLZtOg77XdaybGVNspO8ZvQ7ddNDn203vys,8529
188
+ cryptography/x509/ocsp.py,sha256=ERB5osTWbNieLj945Xoq0NjBkzqodo_WBL7ORaC2fDg,14738
189
+ cryptography/x509/oid.py,sha256=1PxP9Pr_lh77zqyvTJefeRozK3VYaRlNmWfYfDWr2Ak,12619
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp38-cp38-linux_x86_64
5
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/cryptography-3.4.8.dist-info/top_level.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ _openssl
2
+ _padding
3
+ cryptography
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docker_pycreds-0.4.0.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # flake8: noqa
2
+ from .store import Store
3
+ from .errors import StoreError, CredentialsNotFound
4
+ from .constants import *
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/constants.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ PROGRAM_PREFIX = 'docker-credential-'
2
+ DEFAULT_LINUX_STORE = 'secretservice'
3
+ DEFAULT_OSX_STORE = 'osxkeychain'
4
+ DEFAULT_WIN32_STORE = 'wincred'
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/errors.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class StoreError(RuntimeError):
2
+ pass
3
+
4
+
5
+ class CredentialsNotFound(StoreError):
6
+ pass
7
+
8
+
9
+ class InitializationError(StoreError):
10
+ pass
11
+
12
+
13
+ def process_store_error(cpe, program):
14
+ message = cpe.output.decode('utf-8')
15
+ if 'credentials not found in native keychain' in message:
16
+ return CredentialsNotFound(
17
+ 'No matching credentials in {0}'.format(
18
+ program
19
+ )
20
+ )
21
+ return StoreError(
22
+ 'Credentials store {0} exited with "{1}".'.format(
23
+ program, cpe.output.decode('utf-8').strip()
24
+ )
25
+ )
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/store.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import subprocess
4
+
5
+ import six
6
+
7
+ from . import constants
8
+ from . import errors
9
+ from .utils import create_environment_dict
10
+ from .utils import find_executable
11
+
12
+
13
+ class Store(object):
14
+ def __init__(self, program, environment=None):
15
+ """ Create a store object that acts as an interface to
16
+ perform the basic operations for storing, retrieving
17
+ and erasing credentials using `program`.
18
+ """
19
+ self.program = constants.PROGRAM_PREFIX + program
20
+ self.exe = find_executable(self.program)
21
+ self.environment = environment
22
+ if self.exe is None:
23
+ raise errors.InitializationError(
24
+ '{0} not installed or not available in PATH'.format(
25
+ self.program
26
+ )
27
+ )
28
+
29
+ def get(self, server):
30
+ """ Retrieve credentials for `server`. If no credentials are found,
31
+ a `StoreError` will be raised.
32
+ """
33
+ if not isinstance(server, six.binary_type):
34
+ server = server.encode('utf-8')
35
+ data = self._execute('get', server)
36
+ result = json.loads(data.decode('utf-8'))
37
+
38
+ # docker-credential-pass will return an object for inexistent servers
39
+ # whereas other helpers will exit with returncode != 0. For
40
+ # consistency, if no significant data is returned,
41
+ # raise CredentialsNotFound
42
+ if result['Username'] == '' and result['Secret'] == '':
43
+ raise errors.CredentialsNotFound(
44
+ 'No matching credentials in {}'.format(self.program)
45
+ )
46
+
47
+ return result
48
+
49
+ def store(self, server, username, secret):
50
+ """ Store credentials for `server`. Raises a `StoreError` if an error
51
+ occurs.
52
+ """
53
+ data_input = json.dumps({
54
+ 'ServerURL': server,
55
+ 'Username': username,
56
+ 'Secret': secret
57
+ }).encode('utf-8')
58
+ return self._execute('store', data_input)
59
+
60
+ def erase(self, server):
61
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
62
+ occurs.
63
+ """
64
+ if not isinstance(server, six.binary_type):
65
+ server = server.encode('utf-8')
66
+ self._execute('erase', server)
67
+
68
+ def list(self):
69
+ """ List stored credentials. Requires v0.4.0+ of the helper.
70
+ """
71
+ data = self._execute('list', None)
72
+ return json.loads(data.decode('utf-8'))
73
+
74
+ def _execute(self, subcmd, data_input):
75
+ output = None
76
+ env = create_environment_dict(self.environment)
77
+ try:
78
+ if six.PY3:
79
+ output = subprocess.check_output(
80
+ [self.exe, subcmd], input=data_input, env=env,
81
+ )
82
+ else:
83
+ process = subprocess.Popen(
84
+ [self.exe, subcmd], stdin=subprocess.PIPE,
85
+ stdout=subprocess.PIPE, env=env,
86
+ )
87
+ output, err = process.communicate(data_input)
88
+ if process.returncode != 0:
89
+ raise subprocess.CalledProcessError(
90
+ returncode=process.returncode, cmd='', output=output
91
+ )
92
+ except subprocess.CalledProcessError as e:
93
+ raise errors.process_store_error(e, self.program)
94
+ except OSError as e:
95
+ if e.errno == os.errno.ENOENT:
96
+ raise errors.StoreError(
97
+ '{0} not installed or not available in PATH'.format(
98
+ self.program
99
+ )
100
+ )
101
+ else:
102
+ raise errors.StoreError(
103
+ 'Unexpected OS error "{0}", errno={1}'.format(
104
+ e.strerror, e.errno
105
+ )
106
+ )
107
+ return output
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/utils.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import distutils.spawn
2
+ import os
3
+ import sys
4
+
5
+
6
+ def find_executable(executable, path=None):
7
+ """
8
+ As distutils.spawn.find_executable, but on Windows, look up
9
+ every extension declared in PATHEXT instead of just `.exe`
10
+ """
11
+ if sys.platform != 'win32':
12
+ return distutils.spawn.find_executable(executable, path)
13
+
14
+ if path is None:
15
+ path = os.environ['PATH']
16
+
17
+ paths = path.split(os.pathsep)
18
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
19
+ base, ext = os.path.splitext(executable)
20
+
21
+ if not os.path.isfile(executable):
22
+ for p in paths:
23
+ for ext in extensions:
24
+ f = os.path.join(p, base + ext)
25
+ if os.path.isfile(f):
26
+ return f
27
+ return None
28
+ else:
29
+ return executable
30
+
31
+
32
+ def create_environment_dict(overrides):
33
+ """
34
+ Create and return a copy of os.environ with the specified overrides
35
+ """
36
+ result = os.environ.copy()
37
+ result.update(overrides or {})
38
+ return result
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/dockerpycreds/version.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ version = "0.4.0"
2
+ version_info = tuple([int(d) for d in version.split("-")[0].split(".")])
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2018 Alex Rogozhnikov
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/METADATA ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: einops
3
+ Version: 0.4.1
4
+ Summary: A new flavour of deep learning operations
5
+ Home-page: https://github.com/arogozhnikov/einops
6
+ Author: Alex Rogozhnikov
7
+ License: UNKNOWN
8
+ Keywords: deep learning,neural networks,tensor manipulation,machine learning,scientific computations,einops
9
+ Platform: UNKNOWN
10
+ Classifier: Intended Audience :: Science/Research
11
+ Classifier: Programming Language :: Python :: 3
12
+ Description-Content-Type: text/markdown
13
+ License-File: LICENSE
14
+
15
+ <!-- this link magically rendered as video, unfortunately not in docs -->
16
+
17
+ <!-- https://user-images.githubusercontent.com/6318811/116849688-0ca41c00-aba4-11eb-8ccf-74744f6cbc23.mp4 -->
18
+
19
+ <a href='http://arogozhnikov.github.io/images/einops/einops_video.mp4' >
20
+ <div align="center">
21
+ <img src="http://arogozhnikov.github.io/images/einops/einops_video.gif" alt="einops package examples" />
22
+ <br>
23
+ <small><a href='http://arogozhnikov.github.io/images/einops/einops_video.mp4'>This video in high quality (mp4)</a></small>
24
+ <br><br>
25
+ </div>
26
+ </a>
27
+
28
+ # einops
29
+ [![Run tests](https://github.com/arogozhnikov/einops/actions/workflows/run_tests.yml/badge.svg)](https://github.com/arogozhnikov/einops/actions/workflows/run_tests.yml)
30
+ [![PyPI version](https://badge.fury.io/py/einops.svg)](https://badge.fury.io/py/einops)
31
+ [![Documentation](https://img.shields.io/badge/documentation-link-blue.svg)](https://einops.rocks/)
32
+ ![Supported python versions](https://raw.githubusercontent.com/arogozhnikov/einops/master/docs/resources/python_badge.svg)
33
+
34
+
35
+ Flexible and powerful tensor operations for readable and reliable code.
36
+ Supports numpy, pytorch, tensorflow, jax, and [others](#supported-frameworks).
37
+
38
+ ## Recent updates:
39
+
40
+ - torch.jit.script is supported for pytorch layers
41
+ - powerful EinMix added to einops. [Einmix tutorial notebook](https://github.com/arogozhnikov/einops/blob/master/docs/3-einmix-layer.ipynb)
42
+
43
+ <!--<div align="center">
44
+ <img src="http://arogozhnikov.github.io/images/einops/einops_logo_350x350.png"
45
+ alt="einops package logo" width="250" height="250" />
46
+ <br><br>
47
+ </div> -->
48
+
49
+ ## Tweets
50
+
51
+ > In case you need convincing arguments for setting aside time to learn about einsum and einops...
52
+ [Tim Rocktäschel, FAIR](https://twitter.com/_rockt/status/1230818967205425152)
53
+
54
+ > Writing better code with PyTorch and einops 👌
55
+ [Andrej Karpathy, AI at Tesla](https://twitter.com/karpathy/status/1290826075916779520)
56
+
57
+ > Slowly but surely, einops is seeping in to every nook and cranny of my code. If you find yourself shuffling around bazillion dimensional tensors, this might change your life
58
+ [Nasim Rahaman, MILA (Montreal)](https://twitter.com/nasim_rahaman/status/1216022614755463169)
59
+
60
+ [More testimonials](https://einops.rocks/pages/testimonials/)
61
+
62
+ ## Contents
63
+
64
+ - [Installation](#Installation)
65
+ - [Documentation](https://einops.rocks/)
66
+ - [Tutorial](#Tutorials)
67
+ - [API micro-reference](#API)
68
+ - [Why using einops](#Why-using-einops-notation)
69
+ - [Supported frameworks](#Supported-frameworks)
70
+ - [Contributing](#Contributing)
71
+ - [Repository](https://github.com/arogozhnikov/einops) and [discussions](https://github.com/arogozhnikov/einops/discussions)
72
+
73
+ ## Installation <a name="Installation"></a>
74
+
75
+ Plain and simple:
76
+ ```bash
77
+ pip install einops
78
+ ```
79
+
80
+ <!--
81
+ `einops` has no mandatory dependencies (code examples also require jupyter, pillow + backends).
82
+ To obtain the latest github version
83
+
84
+ ```bash
85
+ pip install https://github.com/arogozhnikov/einops/archive/master.zip
86
+ ```
87
+ -->
88
+
89
+ ## Tutorials <a name="Tutorials"></a>
90
+
91
+ Tutorials are the most convenient way to see `einops` in action
92
+
93
+ - part 1: [einops fundamentals](https://github.com/arogozhnikov/einops/blob/master/docs/1-einops-basics.ipynb)
94
+ - part 2: [einops for deep learning](https://github.com/arogozhnikov/einops/blob/master/docs/2-einops-for-deep-learning.ipynb)
95
+ - part 3: [improve pytorch code with einops](https://arogozhnikov.github.io/einops/pytorch-examples.html)
96
+
97
+
98
+ ## API <a name="API"></a>
99
+
100
+ `einops` has a minimalistic yet powerful API.
101
+
102
+ Three operations provided ([einops tutorial](https://github.com/arogozhnikov/einops/blob/master/docs/)
103
+ shows those cover stacking, reshape, transposition, squeeze/unsqueeze, repeat, tile, concatenate, view and numerous reductions)
104
+
105
+ ```python
106
+ from einops import rearrange, reduce, repeat
107
+ # rearrange elements according to the pattern
108
+ output_tensor = rearrange(input_tensor, 't b c -> b c t')
109
+ # combine rearrangement and reduction
110
+ output_tensor = reduce(input_tensor, 'b c (h h2) (w w2) -> b h w c', 'mean', h2=2, w2=2)
111
+ # copy along a new axis
112
+ output_tensor = repeat(input_tensor, 'h w -> h w c', c=3)
113
+ ```
114
+ And two corresponding layers (`einops` keeps a separate version for each framework) with the same API.
115
+
116
+ ```python
117
+ from einops.layers.chainer import Rearrange, Reduce
118
+ from einops.layers.gluon import Rearrange, Reduce
119
+ from einops.layers.keras import Rearrange, Reduce
120
+ from einops.layers.torch import Rearrange, Reduce
121
+ from einops.layers.tensorflow import Rearrange, Reduce
122
+ ```
123
+
124
+ Layers behave similarly to operations and have the same parameters
125
+ (with the exception of the first argument, which is passed during call)
126
+
127
+ ```python
128
+ layer = Rearrange(pattern, **axes_lengths)
129
+ layer = Reduce(pattern, reduction, **axes_lengths)
130
+
131
+ # apply created layer to a tensor / variable
132
+ x = layer(x)
133
+ ```
134
+
135
+ Example of using layers within a model:
136
+ ```python
137
+ # example given for pytorch, but code in other frameworks is almost identical
138
+ from torch.nn import Sequential, Conv2d, MaxPool2d, Linear, ReLU
139
+ from einops.layers.torch import Rearrange
140
+
141
+ model = Sequential(
142
+ Conv2d(3, 6, kernel_size=5),
143
+ MaxPool2d(kernel_size=2),
144
+ Conv2d(6, 16, kernel_size=5),
145
+ MaxPool2d(kernel_size=2),
146
+ # flattening
147
+ Rearrange('b c h w -> b (c h w)'),
148
+ Linear(16*5*5, 120),
149
+ ReLU(),
150
+ Linear(120, 10),
151
+ )
152
+ ```
153
+
154
+ <!---
155
+ Additionally two auxiliary functions provided
156
+ ```python
157
+ from einops import asnumpy, parse_shape
158
+ # einops.asnumpy converts tensors of imperative frameworks to numpy
159
+ numpy_tensor = asnumpy(input_tensor)
160
+ # einops.parse_shape gives a shape of axes of interest
161
+ parse_shape(input_tensor, 'batch _ h w') # e.g {'batch': 64, 'h': 128, 'w': 160}
162
+ ```
163
+ -->
164
+
165
+ ## Naming <a name="Naming"></a>
166
+
167
+ `einops` stands for Einstein-Inspired Notation for operations
168
+ (though "Einstein operations" is more attractive and easier to remember).
169
+
170
+ Notation was loosely inspired by Einstein summation (in particular by `numpy.einsum` operation).
171
+
172
+ ## Why use `einops` notation?! <a name="Why-using-einops-notation"></a>
173
+
174
+
175
+ ### Semantic information (being verbose in expectations)
176
+
177
+ ```python
178
+ y = x.view(x.shape[0], -1)
179
+ y = rearrange(x, 'b c h w -> b (c h w)')
180
+ ```
181
+ While these two lines are doing the same job in *some* context,
182
+ the second one provides information about the input and output.
183
+ In other words, `einops` focuses on interface: *what is the input and output*, not *how* the output is computed.
184
+
185
+ The next operation looks similar:
186
+
187
+ ```python
188
+ y = rearrange(x, 'time c h w -> time (c h w)')
189
+ ```
190
+ but it gives the reader a hint:
191
+ this is not an independent batch of images we are processing,
192
+ but rather a sequence (video).
193
+
194
+ Semantic information makes the code easier to read and maintain.
195
+
196
+ ### Convenient checks
197
+
198
+ Reconsider the same example:
199
+
200
+ ```python
201
+ y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19)
202
+ y = rearrange(x, 'b c h w -> b (c h w)')
203
+ ```
204
+ The second line checks that the input has four dimensions,
205
+ but you can also specify particular dimensions.
206
+ That's opposed to just writing comments about shapes since
207
+ [comments don't work and don't prevent mistakes](https://medium.freecodecamp.org/code-comments-the-good-the-bad-and-the-ugly-be9cc65fbf83)
208
+ as we know
209
+ ```python
210
+ y = x.view(x.shape[0], -1) # x: (batch, 256, 19, 19)
211
+ y = rearrange(x, 'b c h w -> b (c h w)', c=256, h=19, w=19)
212
+ ```
213
+
214
+ ### Result is strictly determined
215
+
216
+ Below we have at least two ways to define the depth-to-space operation
217
+ ```python
218
+ # depth-to-space
219
+ rearrange(x, 'b c (h h2) (w w2) -> b (c h2 w2) h w', h2=2, w2=2)
220
+ rearrange(x, 'b c (h h2) (w w2) -> b (h2 w2 c) h w', h2=2, w2=2)
221
+ ```
222
+ There are at least four more ways to do it. Which one is used by the framework?
223
+
224
+ These details are ignored, since *usually* it makes no difference,
225
+ but it can make a big difference (e.g. if you use grouped convolutions in the next stage),
226
+ and you'd like to specify this in your code.
227
+
228
+
229
+ ### Uniformity
230
+
231
+ ```python
232
+ reduce(x, 'b c (x dx) -> b c x', 'max', dx=2)
233
+ reduce(x, 'b c (x dx) (y dy) -> b c x y', 'max', dx=2, dy=3)
234
+ reduce(x, 'b c (x dx) (y dy) (z dz) -> b c x y z', 'max', dx=2, dy=3, dz=4)
235
+ ```
236
+ These examples demonstrated that we don't use separate operations for 1d/2d/3d pooling,
237
+ those are all defined in a uniform way.
238
+
239
+ Space-to-depth and depth-to space are defined in many frameworks but how about width-to-height? Here you go:
240
+
241
+ ```python
242
+ rearrange(x, 'b c h (w w2) -> b c (h w2) w', w2=2)
243
+ ```
244
+
245
+ ### Framework independent behavior
246
+
247
+ Even simple functions are defined differently by different frameworks
248
+
249
+ ```python
250
+ y = x.flatten() # or flatten(x)
251
+ ```
252
+
253
+ Suppose `x`'s shape was `(3, 4, 5)`, then `y` has shape ...
254
+
255
+ - numpy, cupy, chainer, pytorch: `(60,)`
256
+ - keras, tensorflow.layers, mxnet and gluon: `(3, 20)`
257
+
258
+ `einops` works the same way in all frameworks.
259
+
260
+ ### Independence of framework terminology
261
+
262
+ Example: `tile` vs `repeat` causes lots of confusion. To copy image along width:
263
+ ```python
264
+ np.tile(image, (1, 2)) # in numpy
265
+ image.repeat(1, 2) # pytorch's repeat ~ numpy's tile
266
+ ```
267
+
268
+ With einops you don't need to decipher which axis was repeated:
269
+ ```python
270
+ repeat(image, 'h w -> h (tile w)', tile=2) # in numpy
271
+ repeat(image, 'h w -> h (tile w)', tile=2) # in pytorch
272
+ repeat(image, 'h w -> h (tile w)', tile=2) # in tf
273
+ repeat(image, 'h w -> h (tile w)', tile=2) # in jax
274
+ repeat(image, 'h w -> h (tile w)', tile=2) # in mxnet
275
+ ... (etc.)
276
+ ```
277
+
278
+ Testimonials provide user's perspective on the same question.
279
+
280
+ ## Supported frameworks <a name="Supported-frameworks"></a>
281
+
282
+ Einops works with ...
283
+
284
+ - [numpy](http://www.numpy.org/)
285
+ - [pytorch](https://pytorch.org/)
286
+ - [tensorflow](https://www.tensorflow.org/)
287
+ - [jax](https://github.com/google/jax)
288
+ - [cupy](https://cupy.chainer.org/)
289
+ - [chainer](https://chainer.org/)
290
+ - [gluon](https://gluon.mxnet.io/)
291
+ - [tf.keras](https://www.tensorflow.org/guide/keras)
292
+ - [mxnet](https://mxnet.apache.org/) (experimental)
293
+
294
+
295
+ ## Contributing <a name="Contributing"></a>
296
+
297
+ Best ways to contribute are
298
+
299
+ - spread the word about `einops`
300
+ - if you like explaining things, more tutorials/tear-downs of implementations is welcome
301
+ - tutorials in other languages are very welcome
302
+ - do you have project/code example to share? Let me know in github discussions
303
+ - use `einops` in your papers!
304
+
305
+ ## Supported python versions
306
+
307
+ `einops` works with python 3.6 or later.
308
+
309
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/RECORD ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ einops-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ einops-0.4.1.dist-info/LICENSE,sha256=MNmENkKW9R_67K1LAe4SfpUlDFBokY1LZvyWIGcj5DQ,1073
3
+ einops-0.4.1.dist-info/METADATA,sha256=UdOBa4tijnwPJI48dGASJt4-czHTJ4LLiY4dfdRXffI,10737
4
+ einops-0.4.1.dist-info/RECORD,,
5
+ einops-0.4.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ einops-0.4.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
7
+ einops-0.4.1.dist-info/top_level.txt,sha256=zh9ckJ4QUP-fUBSO5-UKAcNKvC_lzMGMYS6_nnoT4Tc,7
8
+ einops/__init__.py,sha256=8uWtV9MPDvreSsu6VG0E-j2TlxrckMGW2Zy5fCRFu6I,297
9
+ einops/__pycache__/__init__.cpython-38.pyc,,
10
+ einops/__pycache__/_backends.cpython-38.pyc,,
11
+ einops/__pycache__/_torch_specific.cpython-38.pyc,,
12
+ einops/__pycache__/einops.cpython-38.pyc,,
13
+ einops/__pycache__/parsing.cpython-38.pyc,,
14
+ einops/_backends.py,sha256=xyh2XysbubzGMqyRZPu8ld5-1bob693ESp6vkZR4gV8,17132
15
+ einops/_torch_specific.py,sha256=-VuZFXozi6GPtKQFWvb7BhzswyOfjByXGG0GLvvhDXg,2804
16
+ einops/einops.py,sha256=LkY-JdbOUqf_GtuiskedET4aaSs0yJKfVaLoBwS6UP8,27879
17
+ einops/layers/__init__.py,sha256=JHwHQUP5sBIYhSwRrjhZYxGIdw8-UTEWUPbeEduBuBY,2824
18
+ einops/layers/__pycache__/__init__.cpython-38.pyc,,
19
+ einops/layers/__pycache__/_einmix.cpython-38.pyc,,
20
+ einops/layers/__pycache__/chainer.cpython-38.pyc,,
21
+ einops/layers/__pycache__/gluon.cpython-38.pyc,,
22
+ einops/layers/__pycache__/keras.cpython-38.pyc,,
23
+ einops/layers/__pycache__/tensorflow.cpython-38.pyc,,
24
+ einops/layers/__pycache__/torch.cpython-38.pyc,,
25
+ einops/layers/_einmix.py,sha256=k1Wt5z7KmJF9nj345ZhUXeRBcV1D2bNkz35yF82zB_E,8249
26
+ einops/layers/chainer.py,sha256=VisqqyZiEpDl7NdCSjVSa4u7aXgZuNpA0hglkfGydiM,1927
27
+ einops/layers/gluon.py,sha256=Ll85s1OWKqRAhSwFS33jQwbTicD1MnhrH4lbnlqvoPU,2101
28
+ einops/layers/keras.py,sha256=RTsR-aim1Sco5VXI2W1Qs639hJRJ0hWIilTZCs3Ftn4,212
29
+ einops/layers/tensorflow.py,sha256=xNsVaKIMoB2kZeSeFUKXq29LWz-Fppt2K2aRln5s0-Y,3269
30
+ einops/layers/torch.py,sha256=IOdwPR2uL_ZFuzWthGz6p-8af1zg801UmjB8uTBA5HY,2379
31
+ einops/parsing.py,sha256=75hvgp6iWvvLUe67IaQujmox1tjvF9ZsBMaXQYnQmqU,6637
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/einops-0.4.1.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ einops
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/__init__.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A platform independent file lock that supports the with-statement.
3
+
4
+ .. autodata:: filelock.__version__
5
+ :no-value:
6
+
7
+ """
8
+ from __future__ import annotations
9
+
10
+ import sys
11
+ import warnings
12
+ from typing import TYPE_CHECKING
13
+
14
+ from ._api import AcquireReturnProxy, BaseFileLock
15
+ from ._error import Timeout
16
+ from ._soft import SoftFileLock
17
+ from ._unix import UnixFileLock, has_fcntl
18
+ from ._windows import WindowsFileLock
19
+ from .version import version
20
+
21
+ #: version of the project as a string
22
+ __version__: str = version
23
+
24
+
25
+ if sys.platform == "win32": # pragma: win32 cover
26
+ _FileLock: type[BaseFileLock] = WindowsFileLock
27
+ else: # pragma: win32 no cover # noqa: PLR5501
28
+ if has_fcntl:
29
+ _FileLock: type[BaseFileLock] = UnixFileLock
30
+ else:
31
+ _FileLock = SoftFileLock
32
+ if warnings is not None:
33
+ warnings.warn("only soft file lock is available", stacklevel=2)
34
+
35
+ if TYPE_CHECKING:
36
+ FileLock = SoftFileLock
37
+ else:
38
+ #: Alias for the lock, which should be used for the current platform.
39
+ FileLock = _FileLock
40
+
41
+
42
+ __all__ = [
43
+ "__version__",
44
+ "FileLock",
45
+ "SoftFileLock",
46
+ "Timeout",
47
+ "UnixFileLock",
48
+ "WindowsFileLock",
49
+ "BaseFileLock",
50
+ "AcquireReturnProxy",
51
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_error.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+
6
+ class Timeout(TimeoutError): # noqa: N818
7
+ """Raised when the lock could not be acquired in *timeout* seconds."""
8
+
9
+ def __init__(self, lock_file: str) -> None:
10
+ super().__init__()
11
+ self._lock_file = lock_file
12
+
13
+ def __reduce__(self) -> str | tuple[Any, ...]:
14
+ return self.__class__, (self._lock_file,) # Properly pickle the exception
15
+
16
+ def __str__(self) -> str:
17
+ return f"The file lock '{self._lock_file}' could not be acquired."
18
+
19
+ def __repr__(self) -> str:
20
+ return f"{self.__class__.__name__}({self.lock_file!r})"
21
+
22
+ @property
23
+ def lock_file(self) -> str:
24
+ """:return: The path of the file lock."""
25
+ return self._lock_file
26
+
27
+
28
+ __all__ = [
29
+ "Timeout",
30
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_soft.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES, EEXIST
7
+ from pathlib import Path
8
+
9
+ from ._api import BaseFileLock
10
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
11
+
12
+
13
+ class SoftFileLock(BaseFileLock):
14
+ """Simply watches the existence of the lock file."""
15
+
16
+ def _acquire(self) -> None:
17
+ raise_on_not_writable_file(self.lock_file)
18
+ ensure_directory_exists(self.lock_file)
19
+ # first check for exists and read-only mode as the open will mask this case as EEXIST
20
+ flags = (
21
+ os.O_WRONLY # open for writing only
22
+ | os.O_CREAT
23
+ | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists
24
+ | os.O_TRUNC # truncate the file to zero byte
25
+ )
26
+ try:
27
+ file_handler = os.open(self.lock_file, flags, self._context.mode)
28
+ except OSError as exception: # re-raise unless expected exception
29
+ if not (
30
+ exception.errno == EEXIST # lock already exist
31
+ or (exception.errno == EACCES and sys.platform == "win32") # has no access to this lock
32
+ ): # pragma: win32 no cover
33
+ raise
34
+ else:
35
+ self._context.lock_file_fd = file_handler
36
+
37
+ def _release(self) -> None:
38
+ assert self._context.lock_file_fd is not None # noqa: S101
39
+ os.close(self._context.lock_file_fd) # the lock file is definitely not None
40
+ self._context.lock_file_fd = None
41
+ with suppress(OSError): # the file is already deleted and that's what we want
42
+ Path(self.lock_file).unlink()
43
+
44
+
45
+ __all__ = [
46
+ "SoftFileLock",
47
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/_windows.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import sys
5
+ from contextlib import suppress
6
+ from errno import EACCES
7
+ from pathlib import Path
8
+ from typing import cast
9
+
10
+ from ._api import BaseFileLock
11
+ from ._util import ensure_directory_exists, raise_on_not_writable_file
12
+
13
+ if sys.platform == "win32": # pragma: win32 cover
14
+ import msvcrt
15
+
16
+ class WindowsFileLock(BaseFileLock):
17
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
18
+
19
+ def _acquire(self) -> None:
20
+ raise_on_not_writable_file(self.lock_file)
21
+ ensure_directory_exists(self.lock_file)
22
+ flags = (
23
+ os.O_RDWR # open for read and write
24
+ | os.O_CREAT # create file if not exists
25
+ | os.O_TRUNC # truncate file if not empty
26
+ )
27
+ try:
28
+ fd = os.open(self.lock_file, flags, self._context.mode)
29
+ except OSError as exception:
30
+ if exception.errno != EACCES: # has no access to this lock
31
+ raise
32
+ else:
33
+ try:
34
+ msvcrt.locking(fd, msvcrt.LK_NBLCK, 1)
35
+ except OSError as exception:
36
+ os.close(fd) # close file first
37
+ if exception.errno != EACCES: # file is already locked
38
+ raise
39
+ else:
40
+ self._context.lock_file_fd = fd
41
+
42
+ def _release(self) -> None:
43
+ fd = cast(int, self._context.lock_file_fd)
44
+ self._context.lock_file_fd = None
45
+ msvcrt.locking(fd, msvcrt.LK_UNLCK, 1)
46
+ os.close(fd)
47
+
48
+ with suppress(OSError): # Probably another instance of the application hat acquired the file lock.
49
+ Path(self.lock_file).unlink()
50
+
51
+ else: # pragma: win32 no cover
52
+
53
+ class WindowsFileLock(BaseFileLock):
54
+ """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems."""
55
+
56
+ def _acquire(self) -> None:
57
+ raise NotImplementedError
58
+
59
+ def _release(self) -> None:
60
+ raise NotImplementedError
61
+
62
+
63
+ __all__ = [
64
+ "WindowsFileLock",
65
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock/version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '3.13.1'
16
+ __version_tuple__ = version_tuple = (3, 13, 1)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/h5py/h5t.cpython-38-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb221d3a6600c61dd539917c6744220aa72dbd66b25a24795eeea1f3e43ab4a9
3
+ size 950896
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_dask.py ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import print_function, division, absolute_import
2
+
3
+ import asyncio
4
+ import concurrent.futures
5
+ import contextlib
6
+
7
+ import time
8
+ from uuid import uuid4
9
+ import weakref
10
+
11
+ from .parallel import AutoBatchingMixin, ParallelBackendBase, BatchedCalls
12
+ from .parallel import parallel_backend
13
+
14
+ try:
15
+ import distributed
16
+ except ImportError:
17
+ distributed = None
18
+
19
+ if distributed is not None:
20
+ from dask.utils import funcname, itemgetter
21
+ from dask.sizeof import sizeof
22
+ from dask.distributed import (
23
+ Client,
24
+ as_completed,
25
+ get_client,
26
+ secede,
27
+ rejoin
28
+ )
29
+ from distributed.utils import thread_state
30
+
31
+ try:
32
+ # asyncio.TimeoutError, Python3-only error thrown by recent versions of
33
+ # distributed
34
+ from distributed.utils import TimeoutError as _TimeoutError
35
+ except ImportError:
36
+ from tornado.gen import TimeoutError as _TimeoutError
37
+
38
+
39
+ def is_weakrefable(obj):
40
+ try:
41
+ weakref.ref(obj)
42
+ return True
43
+ except TypeError:
44
+ return False
45
+
46
+
47
+ class _WeakKeyDictionary:
48
+ """A variant of weakref.WeakKeyDictionary for unhashable objects.
49
+
50
+ This datastructure is used to store futures for broadcasted data objects
51
+ such as large numpy arrays or pandas dataframes that are not hashable and
52
+ therefore cannot be used as keys of traditional python dicts.
53
+
54
+ Furthermore using a dict with id(array) as key is not safe because the
55
+ Python is likely to reuse id of recently collected arrays.
56
+ """
57
+
58
+ def __init__(self):
59
+ self._data = {}
60
+
61
+ def __getitem__(self, obj):
62
+ ref, val = self._data[id(obj)]
63
+ if ref() is not obj:
64
+ # In case of a race condition with on_destroy.
65
+ raise KeyError(obj)
66
+ return val
67
+
68
+ def __setitem__(self, obj, value):
69
+ key = id(obj)
70
+ try:
71
+ ref, _ = self._data[key]
72
+ if ref() is not obj:
73
+ # In case of race condition with on_destroy.
74
+ raise KeyError(obj)
75
+ except KeyError:
76
+ # Insert the new entry in the mapping along with a weakref
77
+ # callback to automatically delete the entry from the mapping
78
+ # as soon as the object used as key is garbage collected.
79
+ def on_destroy(_):
80
+ del self._data[key]
81
+ ref = weakref.ref(obj, on_destroy)
82
+ self._data[key] = ref, value
83
+
84
+ def __len__(self):
85
+ return len(self._data)
86
+
87
+ def clear(self):
88
+ self._data.clear()
89
+
90
+
91
+ def _funcname(x):
92
+ try:
93
+ if isinstance(x, list):
94
+ x = x[0][0]
95
+ except Exception:
96
+ pass
97
+ return funcname(x)
98
+
99
+
100
+ def _make_tasks_summary(tasks):
101
+ """Summarize of list of (func, args, kwargs) function calls"""
102
+ unique_funcs = {func for func, args, kwargs in tasks}
103
+
104
+ if len(unique_funcs) == 1:
105
+ mixed = False
106
+ else:
107
+ mixed = True
108
+ return len(tasks), mixed, _funcname(tasks)
109
+
110
+
111
+ class Batch:
112
+ """dask-compatible wrapper that executes a batch of tasks"""
113
+ def __init__(self, tasks):
114
+ # collect some metadata from the tasks to ease Batch calls
115
+ # introspection when debugging
116
+ self._num_tasks, self._mixed, self._funcname = _make_tasks_summary(
117
+ tasks
118
+ )
119
+
120
+ def __call__(self, tasks=None):
121
+ results = []
122
+ with parallel_backend('dask'):
123
+ for func, args, kwargs in tasks:
124
+ results.append(func(*args, **kwargs))
125
+ return results
126
+
127
+ def __repr__(self):
128
+ descr = f"batch_of_{self._funcname}_{self._num_tasks}_calls"
129
+ if self._mixed:
130
+ descr = "mixed_" + descr
131
+ return descr
132
+
133
+
134
+ def _joblib_probe_task():
135
+ # Noop used by the joblib connector to probe when workers are ready.
136
+ pass
137
+
138
+
139
+ class DaskDistributedBackend(AutoBatchingMixin, ParallelBackendBase):
140
+ MIN_IDEAL_BATCH_DURATION = 0.2
141
+ MAX_IDEAL_BATCH_DURATION = 1.0
142
+ supports_timeout = True
143
+
144
+ def __init__(self, scheduler_host=None, scatter=None,
145
+ client=None, loop=None, wait_for_workers_timeout=10,
146
+ **submit_kwargs):
147
+ super().__init__()
148
+
149
+ if distributed is None:
150
+ msg = ("You are trying to use 'dask' as a joblib parallel backend "
151
+ "but dask is not installed. Please install dask "
152
+ "to fix this error.")
153
+ raise ValueError(msg)
154
+
155
+ if client is None:
156
+ if scheduler_host:
157
+ client = Client(scheduler_host, loop=loop,
158
+ set_as_default=False)
159
+ else:
160
+ try:
161
+ client = get_client()
162
+ except ValueError as e:
163
+ msg = ("To use Joblib with Dask first create a Dask Client"
164
+ "\n\n"
165
+ " from dask.distributed import Client\n"
166
+ " client = Client()\n"
167
+ "or\n"
168
+ " client = Client('scheduler-address:8786')")
169
+ raise ValueError(msg) from e
170
+
171
+ self.client = client
172
+
173
+ if scatter is not None and not isinstance(scatter, (list, tuple)):
174
+ raise TypeError("scatter must be a list/tuple, got "
175
+ "`%s`" % type(scatter).__name__)
176
+
177
+ if scatter is not None and len(scatter) > 0:
178
+ # Keep a reference to the scattered data to keep the ids the same
179
+ self._scatter = list(scatter)
180
+ scattered = self.client.scatter(scatter, broadcast=True)
181
+ self.data_futures = {id(x): f for x, f in zip(scatter, scattered)}
182
+ else:
183
+ self._scatter = []
184
+ self.data_futures = {}
185
+ self.wait_for_workers_timeout = wait_for_workers_timeout
186
+ self.submit_kwargs = submit_kwargs
187
+ self.waiting_futures = as_completed(
188
+ [],
189
+ loop=client.loop,
190
+ with_results=True,
191
+ raise_errors=False
192
+ )
193
+ self._results = {}
194
+ self._callbacks = {}
195
+
196
+ async def _collect(self):
197
+ while self._continue:
198
+ async for future, result in self.waiting_futures:
199
+ cf_future = self._results.pop(future)
200
+ callback = self._callbacks.pop(future)
201
+ if future.status == "error":
202
+ typ, exc, tb = result
203
+ cf_future.set_exception(exc)
204
+ else:
205
+ cf_future.set_result(result)
206
+ callback(result)
207
+ await asyncio.sleep(0.01)
208
+
209
+ def __reduce__(self):
210
+ return (DaskDistributedBackend, ())
211
+
212
+ def get_nested_backend(self):
213
+ return DaskDistributedBackend(client=self.client), -1
214
+
215
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
216
+ self.parallel = parallel
217
+ return self.effective_n_jobs(n_jobs)
218
+
219
+ def start_call(self):
220
+ self._continue = True
221
+ self.client.loop.add_callback(self._collect)
222
+ self.call_data_futures = _WeakKeyDictionary()
223
+
224
+ def stop_call(self):
225
+ # The explicit call to clear is required to break a cycling reference
226
+ # to the futures.
227
+ self._continue = False
228
+ # wait for the future collection routine (self._backend._collect) to
229
+ # finish in order to limit asyncio warnings due to aborting _collect
230
+ # during a following backend termination call
231
+ time.sleep(0.01)
232
+ self.call_data_futures.clear()
233
+
234
+ def effective_n_jobs(self, n_jobs):
235
+ effective_n_jobs = sum(self.client.ncores().values())
236
+ if effective_n_jobs != 0 or not self.wait_for_workers_timeout:
237
+ return effective_n_jobs
238
+
239
+ # If there is no worker, schedule a probe task to wait for the workers
240
+ # to come up and be available. If the dask cluster is in adaptive mode
241
+ # task might cause the cluster to provision some workers.
242
+ try:
243
+ self.client.submit(_joblib_probe_task).result(
244
+ timeout=self.wait_for_workers_timeout)
245
+ except _TimeoutError as e:
246
+ error_msg = (
247
+ "DaskDistributedBackend has no worker after {} seconds. "
248
+ "Make sure that workers are started and can properly connect "
249
+ "to the scheduler and increase the joblib/dask connection "
250
+ "timeout with:\n\n"
251
+ "parallel_backend('dask', wait_for_workers_timeout={})"
252
+ ).format(self.wait_for_workers_timeout,
253
+ max(10, 2 * self.wait_for_workers_timeout))
254
+ raise TimeoutError(error_msg) from e
255
+ return sum(self.client.ncores().values())
256
+
257
+ async def _to_func_args(self, func):
258
+ itemgetters = dict()
259
+
260
+ # Futures that are dynamically generated during a single call to
261
+ # Parallel.__call__.
262
+ call_data_futures = getattr(self, 'call_data_futures', None)
263
+
264
+ async def maybe_to_futures(args):
265
+ out = []
266
+ for arg in args:
267
+ arg_id = id(arg)
268
+ if arg_id in itemgetters:
269
+ out.append(itemgetters[arg_id])
270
+ continue
271
+
272
+ f = self.data_futures.get(arg_id, None)
273
+ if f is None and call_data_futures is not None:
274
+ try:
275
+ f = await call_data_futures[arg]
276
+ except KeyError:
277
+ pass
278
+ if f is None:
279
+ if is_weakrefable(arg) and sizeof(arg) > 1e3:
280
+ # Automatically scatter large objects to some of
281
+ # the workers to avoid duplicated data transfers.
282
+ # Rely on automated inter-worker data stealing if
283
+ # more workers need to reuse this data
284
+ # concurrently.
285
+ # set hash=False - nested scatter calls (i.e
286
+ # calling client.scatter inside a dask worker)
287
+ # using hash=True often raise CancelledError,
288
+ # see dask/distributed#3703
289
+ _coro = self.client.scatter(
290
+ arg,
291
+ asynchronous=True,
292
+ hash=False
293
+ )
294
+ # Centralize the scattering of identical arguments
295
+ # between concurrent apply_async callbacks by
296
+ # exposing the running coroutine in
297
+ # call_data_futures before it completes.
298
+ t = asyncio.Task(_coro)
299
+ call_data_futures[arg] = t
300
+
301
+ f = await t
302
+
303
+ if f is not None:
304
+ out.append(f)
305
+ else:
306
+ out.append(arg)
307
+ return out
308
+
309
+ tasks = []
310
+ for f, args, kwargs in func.items:
311
+ args = list(await maybe_to_futures(args))
312
+ kwargs = dict(zip(kwargs.keys(),
313
+ await maybe_to_futures(kwargs.values())))
314
+ tasks.append((f, args, kwargs))
315
+
316
+ return (Batch(tasks), tasks)
317
+
318
+ def apply_async(self, func, callback=None):
319
+
320
+ cf_future = concurrent.futures.Future()
321
+ cf_future.get = cf_future.result # achieve AsyncResult API
322
+
323
+ async def f(func, callback):
324
+ batch, tasks = await self._to_func_args(func)
325
+ key = f'{repr(batch)}-{uuid4().hex}'
326
+
327
+ dask_future = self.client.submit(
328
+ batch, tasks=tasks, key=key, **self.submit_kwargs
329
+ )
330
+ self.waiting_futures.add(dask_future)
331
+ self._callbacks[dask_future] = callback
332
+ self._results[dask_future] = cf_future
333
+
334
+ self.client.loop.add_callback(f, func, callback)
335
+
336
+ return cf_future
337
+
338
+ def abort_everything(self, ensure_ready=True):
339
+ """ Tell the client to cancel any task submitted via this instance
340
+
341
+ joblib.Parallel will never access those results
342
+ """
343
+ with self.waiting_futures.lock:
344
+ self.waiting_futures.futures.clear()
345
+ while not self.waiting_futures.queue.empty():
346
+ self.waiting_futures.queue.get()
347
+
348
+ @contextlib.contextmanager
349
+ def retrieval_context(self):
350
+ """Override ParallelBackendBase.retrieval_context to avoid deadlocks.
351
+
352
+ This removes thread from the worker's thread pool (using 'secede').
353
+ Seceding avoids deadlock in nested parallelism settings.
354
+ """
355
+ # See 'joblib.Parallel.__call__' and 'joblib.Parallel.retrieve' for how
356
+ # this is used.
357
+ if hasattr(thread_state, 'execution_state'):
358
+ # we are in a worker. Secede to avoid deadlock.
359
+ secede()
360
+
361
+ yield
362
+
363
+ if hasattr(thread_state, 'execution_state'):
364
+ rejoin()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_parallel_backends.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backends for embarrassingly parallel code.
3
+ """
4
+
5
+ import gc
6
+ import os
7
+ import warnings
8
+ import threading
9
+ import functools
10
+ import contextlib
11
+ from abc import ABCMeta, abstractmethod
12
+
13
+ from .my_exceptions import WorkerInterrupt
14
+ from ._multiprocessing_helpers import mp
15
+
16
+ if mp is not None:
17
+ from .pool import MemmappingPool
18
+ from multiprocessing.pool import ThreadPool
19
+ from .executor import get_memmapping_executor
20
+
21
+ # Compat between concurrent.futures and multiprocessing TimeoutError
22
+ from multiprocessing import TimeoutError
23
+ from concurrent.futures._base import TimeoutError as CfTimeoutError
24
+ from .externals.loky import process_executor, cpu_count
25
+
26
+
27
+ class ParallelBackendBase(metaclass=ABCMeta):
28
+ """Helper abc which defines all methods a ParallelBackend must implement"""
29
+
30
+ supports_timeout = False
31
+ supports_inner_max_num_threads = False
32
+ nesting_level = None
33
+
34
+ def __init__(self, nesting_level=None, inner_max_num_threads=None,
35
+ **kwargs):
36
+ super().__init__(**kwargs)
37
+ self.nesting_level = nesting_level
38
+ self.inner_max_num_threads = inner_max_num_threads
39
+
40
+ MAX_NUM_THREADS_VARS = [
41
+ 'OMP_NUM_THREADS', 'OPENBLAS_NUM_THREADS', 'MKL_NUM_THREADS',
42
+ 'BLIS_NUM_THREADS', 'VECLIB_MAXIMUM_THREADS', 'NUMBA_NUM_THREADS',
43
+ 'NUMEXPR_NUM_THREADS',
44
+ ]
45
+
46
+ TBB_ENABLE_IPC_VAR = "ENABLE_IPC"
47
+
48
+ @abstractmethod
49
+ def effective_n_jobs(self, n_jobs):
50
+ """Determine the number of jobs that can actually run in parallel
51
+
52
+ n_jobs is the number of workers requested by the callers. Passing
53
+ n_jobs=-1 means requesting all available workers for instance matching
54
+ the number of CPU cores on the worker host(s).
55
+
56
+ This method should return a guesstimate of the number of workers that
57
+ can actually perform work concurrently. The primary use case is to make
58
+ it possible for the caller to know in how many chunks to slice the
59
+ work.
60
+
61
+ In general working on larger data chunks is more efficient (less
62
+ scheduling overhead and better use of CPU cache prefetching heuristics)
63
+ as long as all the workers have enough work to do.
64
+ """
65
+
66
+ @abstractmethod
67
+ def apply_async(self, func, callback=None):
68
+ """Schedule a func to be run"""
69
+
70
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
71
+ **backend_args):
72
+ """Reconfigure the backend and return the number of workers.
73
+
74
+ This makes it possible to reuse an existing backend instance for
75
+ successive independent calls to Parallel with different parameters.
76
+ """
77
+ self.parallel = parallel
78
+ return self.effective_n_jobs(n_jobs)
79
+
80
+ def start_call(self):
81
+ """Call-back method called at the beginning of a Parallel call"""
82
+
83
+ def stop_call(self):
84
+ """Call-back method called at the end of a Parallel call"""
85
+
86
+ def terminate(self):
87
+ """Shutdown the workers and free the shared memory."""
88
+
89
+ def compute_batch_size(self):
90
+ """Determine the optimal batch size"""
91
+ return 1
92
+
93
+ def batch_completed(self, batch_size, duration):
94
+ """Callback indicate how long it took to run a batch"""
95
+
96
+ def get_exceptions(self):
97
+ """List of exception types to be captured."""
98
+ return []
99
+
100
+ def abort_everything(self, ensure_ready=True):
101
+ """Abort any running tasks
102
+
103
+ This is called when an exception has been raised when executing a tasks
104
+ and all the remaining tasks will be ignored and can therefore be
105
+ aborted to spare computation resources.
106
+
107
+ If ensure_ready is True, the backend should be left in an operating
108
+ state as future tasks might be re-submitted via that same backend
109
+ instance.
110
+
111
+ If ensure_ready is False, the implementer of this method can decide
112
+ to leave the backend in a closed / terminated state as no new task
113
+ are expected to be submitted to this backend.
114
+
115
+ Setting ensure_ready to False is an optimization that can be leveraged
116
+ when aborting tasks via killing processes from a local process pool
117
+ managed by the backend it-self: if we expect no new tasks, there is no
118
+ point in re-creating new workers.
119
+ """
120
+ # Does nothing by default: to be overridden in subclasses when
121
+ # canceling tasks is possible.
122
+ pass
123
+
124
+ def get_nested_backend(self):
125
+ """Backend instance to be used by nested Parallel calls.
126
+
127
+ By default a thread-based backend is used for the first level of
128
+ nesting. Beyond, switch to sequential backend to avoid spawning too
129
+ many threads on the host.
130
+ """
131
+ nesting_level = getattr(self, 'nesting_level', 0) + 1
132
+ if nesting_level > 1:
133
+ return SequentialBackend(nesting_level=nesting_level), None
134
+ else:
135
+ return ThreadingBackend(nesting_level=nesting_level), None
136
+
137
+ @contextlib.contextmanager
138
+ def retrieval_context(self):
139
+ """Context manager to manage an execution context.
140
+
141
+ Calls to Parallel.retrieve will be made inside this context.
142
+
143
+ By default, this does nothing. It may be useful for subclasses to
144
+ handle nested parallelism. In particular, it may be required to avoid
145
+ deadlocks if a backend manages a fixed number of workers, when those
146
+ workers may be asked to do nested Parallel calls. Without
147
+ 'retrieval_context' this could lead to deadlock, as all the workers
148
+ managed by the backend may be "busy" waiting for the nested parallel
149
+ calls to finish, but the backend has no free workers to execute those
150
+ tasks.
151
+ """
152
+ yield
153
+
154
+ def _prepare_worker_env(self, n_jobs):
155
+ """Return environment variables limiting threadpools in external libs.
156
+
157
+ This function return a dict containing environment variables to pass
158
+ when creating a pool of process. These environment variables limit the
159
+ number of threads to `n_threads` for OpenMP, MKL, Accelerated and
160
+ OpenBLAS libraries in the child processes.
161
+ """
162
+ explicit_n_threads = self.inner_max_num_threads
163
+ default_n_threads = str(max(cpu_count() // n_jobs, 1))
164
+
165
+ # Set the inner environment variables to self.inner_max_num_threads if
166
+ # it is given. Else, default to cpu_count // n_jobs unless the variable
167
+ # is already present in the parent process environment.
168
+ env = {}
169
+ for var in self.MAX_NUM_THREADS_VARS:
170
+ if explicit_n_threads is None:
171
+ var_value = os.environ.get(var, None)
172
+ if var_value is None:
173
+ var_value = default_n_threads
174
+ else:
175
+ var_value = str(explicit_n_threads)
176
+
177
+ env[var] = var_value
178
+
179
+ if self.TBB_ENABLE_IPC_VAR not in os.environ:
180
+ # To avoid over-subscription when using TBB, let the TBB schedulers
181
+ # use Inter Process Communication to coordinate:
182
+ env[self.TBB_ENABLE_IPC_VAR] = "1"
183
+ return env
184
+
185
+ @staticmethod
186
+ def in_main_thread():
187
+ return isinstance(threading.current_thread(), threading._MainThread)
188
+
189
+
190
+ class SequentialBackend(ParallelBackendBase):
191
+ """A ParallelBackend which will execute all batches sequentially.
192
+
193
+ Does not use/create any threading objects, and hence has minimal
194
+ overhead. Used when n_jobs == 1.
195
+ """
196
+
197
+ uses_threads = True
198
+ supports_sharedmem = True
199
+
200
+ def effective_n_jobs(self, n_jobs):
201
+ """Determine the number of jobs which are going to run in parallel"""
202
+ if n_jobs == 0:
203
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
204
+ return 1
205
+
206
+ def apply_async(self, func, callback=None):
207
+ """Schedule a func to be run"""
208
+ result = ImmediateResult(func)
209
+ if callback:
210
+ callback(result)
211
+ return result
212
+
213
+ def get_nested_backend(self):
214
+ # import is not top level to avoid cyclic import errors.
215
+ from .parallel import get_active_backend
216
+
217
+ # SequentialBackend should neither change the nesting level, the
218
+ # default backend or the number of jobs. Just return the current one.
219
+ return get_active_backend()
220
+
221
+
222
+ class PoolManagerMixin(object):
223
+ """A helper class for managing pool of workers."""
224
+
225
+ _pool = None
226
+
227
+ def effective_n_jobs(self, n_jobs):
228
+ """Determine the number of jobs which are going to run in parallel"""
229
+ if n_jobs == 0:
230
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
231
+ elif mp is None or n_jobs is None:
232
+ # multiprocessing is not available or disabled, fallback
233
+ # to sequential mode
234
+ return 1
235
+ elif n_jobs < 0:
236
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
237
+ return n_jobs
238
+
239
+ def terminate(self):
240
+ """Shutdown the process or thread pool"""
241
+ if self._pool is not None:
242
+ self._pool.close()
243
+ self._pool.terminate() # terminate does a join()
244
+ self._pool = None
245
+
246
+ def _get_pool(self):
247
+ """Used by apply_async to make it possible to implement lazy init"""
248
+ return self._pool
249
+
250
+ def apply_async(self, func, callback=None):
251
+ """Schedule a func to be run"""
252
+ return self._get_pool().apply_async(
253
+ SafeFunction(func), callback=callback)
254
+
255
+ def abort_everything(self, ensure_ready=True):
256
+ """Shutdown the pool and restart a new one with the same parameters"""
257
+ self.terminate()
258
+ if ensure_ready:
259
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel,
260
+ **self.parallel._backend_args)
261
+
262
+
263
+ class AutoBatchingMixin(object):
264
+ """A helper class for automagically batching jobs."""
265
+
266
+ # In seconds, should be big enough to hide multiprocessing dispatching
267
+ # overhead.
268
+ # This settings was found by running benchmarks/bench_auto_batching.py
269
+ # with various parameters on various platforms.
270
+ MIN_IDEAL_BATCH_DURATION = .2
271
+
272
+ # Should not be too high to avoid stragglers: long jobs running alone
273
+ # on a single worker while other workers have no work to process any more.
274
+ MAX_IDEAL_BATCH_DURATION = 2
275
+
276
+ # Batching counters default values
277
+ _DEFAULT_EFFECTIVE_BATCH_SIZE = 1
278
+ _DEFAULT_SMOOTHED_BATCH_DURATION = 0.0
279
+
280
+ def __init__(self, **kwargs):
281
+ super().__init__(**kwargs)
282
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
283
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
284
+
285
+ def compute_batch_size(self):
286
+ """Determine the optimal batch size"""
287
+ old_batch_size = self._effective_batch_size
288
+ batch_duration = self._smoothed_batch_duration
289
+ if (batch_duration > 0 and
290
+ batch_duration < self.MIN_IDEAL_BATCH_DURATION):
291
+ # The current batch size is too small: the duration of the
292
+ # processing of a batch of task is not large enough to hide
293
+ # the scheduling overhead.
294
+ ideal_batch_size = int(old_batch_size *
295
+ self.MIN_IDEAL_BATCH_DURATION /
296
+ batch_duration)
297
+ # Multiply by two to limit oscilations between min and max.
298
+ ideal_batch_size *= 2
299
+
300
+ # dont increase the batch size too fast to limit huge batch sizes
301
+ # potentially leading to starving worker
302
+ batch_size = min(2 * old_batch_size, ideal_batch_size)
303
+
304
+ batch_size = max(batch_size, 1)
305
+
306
+ self._effective_batch_size = batch_size
307
+ if self.parallel.verbose >= 10:
308
+ self.parallel._print(
309
+ "Batch computation too fast (%.4fs.) "
310
+ "Setting batch_size=%d.", (batch_duration, batch_size))
311
+ elif (batch_duration > self.MAX_IDEAL_BATCH_DURATION and
312
+ old_batch_size >= 2):
313
+ # The current batch size is too big. If we schedule overly long
314
+ # running batches some CPUs might wait with nothing left to do
315
+ # while a couple of CPUs a left processing a few long running
316
+ # batches. Better reduce the batch size a bit to limit the
317
+ # likelihood of scheduling such stragglers.
318
+
319
+ # decrease the batch size quickly to limit potential starving
320
+ ideal_batch_size = int(
321
+ old_batch_size * self.MIN_IDEAL_BATCH_DURATION / batch_duration
322
+ )
323
+ # Multiply by two to limit oscilations between min and max.
324
+ batch_size = max(2 * ideal_batch_size, 1)
325
+ self._effective_batch_size = batch_size
326
+ if self.parallel.verbose >= 10:
327
+ self.parallel._print(
328
+ "Batch computation too slow (%.4fs.) "
329
+ "Setting batch_size=%d.", (batch_duration, batch_size))
330
+ else:
331
+ # No batch size adjustment
332
+ batch_size = old_batch_size
333
+
334
+ if batch_size != old_batch_size:
335
+ # Reset estimation of the smoothed mean batch duration: this
336
+ # estimate is updated in the multiprocessing apply_async
337
+ # CallBack as long as the batch_size is constant. Therefore
338
+ # we need to reset the estimate whenever we re-tune the batch
339
+ # size.
340
+ self._smoothed_batch_duration = \
341
+ self._DEFAULT_SMOOTHED_BATCH_DURATION
342
+
343
+ return batch_size
344
+
345
+ def batch_completed(self, batch_size, duration):
346
+ """Callback indicate how long it took to run a batch"""
347
+ if batch_size == self._effective_batch_size:
348
+ # Update the smoothed streaming estimate of the duration of a batch
349
+ # from dispatch to completion
350
+ old_duration = self._smoothed_batch_duration
351
+ if old_duration == self._DEFAULT_SMOOTHED_BATCH_DURATION:
352
+ # First record of duration for this batch size after the last
353
+ # reset.
354
+ new_duration = duration
355
+ else:
356
+ # Update the exponentially weighted average of the duration of
357
+ # batch for the current effective size.
358
+ new_duration = 0.8 * old_duration + 0.2 * duration
359
+ self._smoothed_batch_duration = new_duration
360
+
361
+ def reset_batch_stats(self):
362
+ """Reset batch statistics to default values.
363
+
364
+ This avoids interferences with future jobs.
365
+ """
366
+ self._effective_batch_size = self._DEFAULT_EFFECTIVE_BATCH_SIZE
367
+ self._smoothed_batch_duration = self._DEFAULT_SMOOTHED_BATCH_DURATION
368
+
369
+
370
+ class ThreadingBackend(PoolManagerMixin, ParallelBackendBase):
371
+ """A ParallelBackend which will use a thread pool to execute batches in.
372
+
373
+ This is a low-overhead backend but it suffers from the Python Global
374
+ Interpreter Lock if the called function relies a lot on Python objects.
375
+ Mostly useful when the execution bottleneck is a compiled extension that
376
+ explicitly releases the GIL (for instance a Cython loop wrapped in a "with
377
+ nogil" block or an expensive call to a library such as NumPy).
378
+
379
+ The actual thread pool is lazily initialized: the actual thread pool
380
+ construction is delayed to the first call to apply_async.
381
+
382
+ ThreadingBackend is used as the default backend for nested calls.
383
+ """
384
+
385
+ supports_timeout = True
386
+ uses_threads = True
387
+ supports_sharedmem = True
388
+
389
+ def configure(self, n_jobs=1, parallel=None, **backend_args):
390
+ """Build a process or thread pool and return the number of workers"""
391
+ n_jobs = self.effective_n_jobs(n_jobs)
392
+ if n_jobs == 1:
393
+ # Avoid unnecessary overhead and use sequential backend instead.
394
+ raise FallbackToBackend(
395
+ SequentialBackend(nesting_level=self.nesting_level))
396
+ self.parallel = parallel
397
+ self._n_jobs = n_jobs
398
+ return n_jobs
399
+
400
+ def _get_pool(self):
401
+ """Lazily initialize the thread pool
402
+
403
+ The actual pool of worker threads is only initialized at the first
404
+ call to apply_async.
405
+ """
406
+ if self._pool is None:
407
+ self._pool = ThreadPool(self._n_jobs)
408
+ return self._pool
409
+
410
+
411
+ class MultiprocessingBackend(PoolManagerMixin, AutoBatchingMixin,
412
+ ParallelBackendBase):
413
+ """A ParallelBackend which will use a multiprocessing.Pool.
414
+
415
+ Will introduce some communication and memory overhead when exchanging
416
+ input and output data with the with the worker Python processes.
417
+ However, does not suffer from the Python Global Interpreter Lock.
418
+ """
419
+
420
+ supports_timeout = True
421
+
422
+ def effective_n_jobs(self, n_jobs):
423
+ """Determine the number of jobs which are going to run in parallel.
424
+
425
+ This also checks if we are attempting to create a nested parallel
426
+ loop.
427
+ """
428
+ if mp is None:
429
+ return 1
430
+
431
+ if mp.current_process().daemon:
432
+ # Daemonic processes cannot have children
433
+ if n_jobs != 1:
434
+ warnings.warn(
435
+ 'Multiprocessing-backed parallel loops cannot be nested,'
436
+ ' setting n_jobs=1',
437
+ stacklevel=3)
438
+ return 1
439
+
440
+ if process_executor._CURRENT_DEPTH > 0:
441
+ # Mixing loky and multiprocessing in nested loop is not supported
442
+ if n_jobs != 1:
443
+ warnings.warn(
444
+ 'Multiprocessing-backed parallel loops cannot be nested,'
445
+ ' below loky, setting n_jobs=1',
446
+ stacklevel=3)
447
+ return 1
448
+
449
+ elif not (self.in_main_thread() or self.nesting_level == 0):
450
+ # Prevent posix fork inside in non-main posix threads
451
+ if n_jobs != 1:
452
+ warnings.warn(
453
+ 'Multiprocessing-backed parallel loops cannot be nested'
454
+ ' below threads, setting n_jobs=1',
455
+ stacklevel=3)
456
+ return 1
457
+
458
+ return super(MultiprocessingBackend, self).effective_n_jobs(n_jobs)
459
+
460
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
461
+ **memmappingpool_args):
462
+ """Build a process or thread pool and return the number of workers"""
463
+ n_jobs = self.effective_n_jobs(n_jobs)
464
+ if n_jobs == 1:
465
+ raise FallbackToBackend(
466
+ SequentialBackend(nesting_level=self.nesting_level))
467
+
468
+ # Make sure to free as much memory as possible before forking
469
+ gc.collect()
470
+ self._pool = MemmappingPool(n_jobs, **memmappingpool_args)
471
+ self.parallel = parallel
472
+ return n_jobs
473
+
474
+ def terminate(self):
475
+ """Shutdown the process or thread pool"""
476
+ super(MultiprocessingBackend, self).terminate()
477
+ self.reset_batch_stats()
478
+
479
+
480
+ class LokyBackend(AutoBatchingMixin, ParallelBackendBase):
481
+ """Managing pool of workers with loky instead of multiprocessing."""
482
+
483
+ supports_timeout = True
484
+ supports_inner_max_num_threads = True
485
+
486
+ def configure(self, n_jobs=1, parallel=None, prefer=None, require=None,
487
+ idle_worker_timeout=300, **memmappingexecutor_args):
488
+ """Build a process executor and return the number of workers"""
489
+ n_jobs = self.effective_n_jobs(n_jobs)
490
+ if n_jobs == 1:
491
+ raise FallbackToBackend(
492
+ SequentialBackend(nesting_level=self.nesting_level))
493
+
494
+ self._workers = get_memmapping_executor(
495
+ n_jobs, timeout=idle_worker_timeout,
496
+ env=self._prepare_worker_env(n_jobs=n_jobs),
497
+ context_id=parallel._id, **memmappingexecutor_args)
498
+ self.parallel = parallel
499
+ return n_jobs
500
+
501
+ def effective_n_jobs(self, n_jobs):
502
+ """Determine the number of jobs which are going to run in parallel"""
503
+ if n_jobs == 0:
504
+ raise ValueError('n_jobs == 0 in Parallel has no meaning')
505
+ elif mp is None or n_jobs is None:
506
+ # multiprocessing is not available or disabled, fallback
507
+ # to sequential mode
508
+ return 1
509
+ elif mp.current_process().daemon:
510
+ # Daemonic processes cannot have children
511
+ if n_jobs != 1:
512
+ warnings.warn(
513
+ 'Loky-backed parallel loops cannot be called in a'
514
+ ' multiprocessing, setting n_jobs=1',
515
+ stacklevel=3)
516
+ return 1
517
+ elif not (self.in_main_thread() or self.nesting_level == 0):
518
+ # Prevent posix fork inside in non-main posix threads
519
+ if n_jobs != 1:
520
+ warnings.warn(
521
+ 'Loky-backed parallel loops cannot be nested below '
522
+ 'threads, setting n_jobs=1',
523
+ stacklevel=3)
524
+ return 1
525
+ elif n_jobs < 0:
526
+ n_jobs = max(cpu_count() + 1 + n_jobs, 1)
527
+ return n_jobs
528
+
529
+ def apply_async(self, func, callback=None):
530
+ """Schedule a func to be run"""
531
+ future = self._workers.submit(SafeFunction(func))
532
+ future.get = functools.partial(self.wrap_future_result, future)
533
+ if callback is not None:
534
+ future.add_done_callback(callback)
535
+ return future
536
+
537
+ @staticmethod
538
+ def wrap_future_result(future, timeout=None):
539
+ """Wrapper for Future.result to implement the same behaviour as
540
+ AsyncResults.get from multiprocessing."""
541
+ try:
542
+ return future.result(timeout=timeout)
543
+ except CfTimeoutError as e:
544
+ raise TimeoutError from e
545
+
546
+ def terminate(self):
547
+ if self._workers is not None:
548
+ # Don't terminate the workers as we want to reuse them in later
549
+ # calls, but cleanup the temporary resources that the Parallel call
550
+ # created. This 'hack' requires a private, low-level operation.
551
+ self._workers._temp_folder_manager._unlink_temporary_resources(
552
+ context_id=self.parallel._id
553
+ )
554
+ self._workers = None
555
+
556
+ self.reset_batch_stats()
557
+
558
+ def abort_everything(self, ensure_ready=True):
559
+ """Shutdown the workers and restart a new one with the same parameters
560
+ """
561
+ self._workers.terminate(kill_workers=True)
562
+ self._workers = None
563
+
564
+ if ensure_ready:
565
+ self.configure(n_jobs=self.parallel.n_jobs, parallel=self.parallel)
566
+
567
+
568
+ class ImmediateResult(object):
569
+ def __init__(self, batch):
570
+ # Don't delay the application, to avoid keeping the input
571
+ # arguments in memory
572
+ self.results = batch()
573
+
574
+ def get(self):
575
+ return self.results
576
+
577
+
578
+ class SafeFunction(object):
579
+ """Wrapper that handles the serialization of exception tracebacks.
580
+
581
+ TODO python2_drop: check whether SafeFunction is still needed since we
582
+ dropped support for Python 2. If not needed anymore it should be
583
+ deprecated.
584
+
585
+ If an exception is triggered when calling the inner function, a copy of
586
+ the full traceback is captured to make it possible to serialize
587
+ it so that it can be rendered in a different Python process.
588
+
589
+ """
590
+ def __init__(self, func):
591
+ self.func = func
592
+
593
+ def __call__(self, *args, **kwargs):
594
+ try:
595
+ return self.func(*args, **kwargs)
596
+ except KeyboardInterrupt as e:
597
+ # We capture the KeyboardInterrupt and reraise it as
598
+ # something different, as multiprocessing does not
599
+ # interrupt processing for a KeyboardInterrupt
600
+ raise WorkerInterrupt() from e
601
+ except BaseException:
602
+ # Rely on Python 3 built-in Remote Traceback reporting
603
+ raise
604
+
605
+
606
+ class FallbackToBackend(Exception):
607
+ """Raised when configuration should fallback to another backend"""
608
+
609
+ def __init__(self, backend):
610
+ self.backend = backend
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/_store_backends.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Storage providers backends for Memory caching."""
2
+
3
+ import re
4
+ import os
5
+ import os.path
6
+ import datetime
7
+ import json
8
+ import shutil
9
+ import warnings
10
+ import collections
11
+ import operator
12
+ import threading
13
+ from abc import ABCMeta, abstractmethod
14
+
15
+ from .backports import concurrency_safe_rename
16
+ from .disk import mkdirp, memstr_to_bytes, rm_subdirs
17
+ from . import numpy_pickle
18
+
19
+ CacheItemInfo = collections.namedtuple('CacheItemInfo',
20
+ 'path size last_access')
21
+
22
+
23
+ def concurrency_safe_write(object_to_write, filename, write_func):
24
+ """Writes an object into a unique file in a concurrency-safe way."""
25
+ thread_id = id(threading.current_thread())
26
+ temporary_filename = '{}.thread-{}-pid-{}'.format(
27
+ filename, thread_id, os.getpid())
28
+ write_func(object_to_write, temporary_filename)
29
+
30
+ return temporary_filename
31
+
32
+
33
+ class StoreBackendBase(metaclass=ABCMeta):
34
+ """Helper Abstract Base Class which defines all methods that
35
+ a StorageBackend must implement."""
36
+
37
+ location = None
38
+
39
+ @abstractmethod
40
+ def _open_item(self, f, mode):
41
+ """Opens an item on the store and return a file-like object.
42
+
43
+ This method is private and only used by the StoreBackendMixin object.
44
+
45
+ Parameters
46
+ ----------
47
+ f: a file-like object
48
+ The file-like object where an item is stored and retrieved
49
+ mode: string, optional
50
+ the mode in which the file-like object is opened allowed valued are
51
+ 'rb', 'wb'
52
+
53
+ Returns
54
+ -------
55
+ a file-like object
56
+ """
57
+
58
+ @abstractmethod
59
+ def _item_exists(self, location):
60
+ """Checks if an item location exists in the store.
61
+
62
+ This method is private and only used by the StoreBackendMixin object.
63
+
64
+ Parameters
65
+ ----------
66
+ location: string
67
+ The location of an item. On a filesystem, this corresponds to the
68
+ absolute path, including the filename, of a file.
69
+
70
+ Returns
71
+ -------
72
+ True if the item exists, False otherwise
73
+ """
74
+
75
+ @abstractmethod
76
+ def _move_item(self, src, dst):
77
+ """Moves an item from src to dst in the store.
78
+
79
+ This method is private and only used by the StoreBackendMixin object.
80
+
81
+ Parameters
82
+ ----------
83
+ src: string
84
+ The source location of an item
85
+ dst: string
86
+ The destination location of an item
87
+ """
88
+
89
+ @abstractmethod
90
+ def create_location(self, location):
91
+ """Creates a location on the store.
92
+
93
+ Parameters
94
+ ----------
95
+ location: string
96
+ The location in the store. On a filesystem, this corresponds to a
97
+ directory.
98
+ """
99
+
100
+ @abstractmethod
101
+ def clear_location(self, location):
102
+ """Clears a location on the store.
103
+
104
+ Parameters
105
+ ----------
106
+ location: string
107
+ The location in the store. On a filesystem, this corresponds to a
108
+ directory or a filename absolute path
109
+ """
110
+
111
+ @abstractmethod
112
+ def get_items(self):
113
+ """Returns the whole list of items available in the store.
114
+
115
+ Returns
116
+ -------
117
+ The list of items identified by their ids (e.g filename in a
118
+ filesystem).
119
+ """
120
+
121
+ @abstractmethod
122
+ def configure(self, location, verbose=0, backend_options=dict()):
123
+ """Configures the store.
124
+
125
+ Parameters
126
+ ----------
127
+ location: string
128
+ The base location used by the store. On a filesystem, this
129
+ corresponds to a directory.
130
+ verbose: int
131
+ The level of verbosity of the store
132
+ backend_options: dict
133
+ Contains a dictionnary of named paremeters used to configure the
134
+ store backend.
135
+ """
136
+
137
+
138
+ class StoreBackendMixin(object):
139
+ """Class providing all logic for managing the store in a generic way.
140
+
141
+ The StoreBackend subclass has to implement 3 methods: create_location,
142
+ clear_location and configure. The StoreBackend also has to provide
143
+ a private _open_item, _item_exists and _move_item methods. The _open_item
144
+ method has to have the same signature as the builtin open and return a
145
+ file-like object.
146
+ """
147
+
148
+ def load_item(self, path, verbose=1, msg=None):
149
+ """Load an item from the store given its path as a list of
150
+ strings."""
151
+ full_path = os.path.join(self.location, *path)
152
+
153
+ if verbose > 1:
154
+ if verbose < 10:
155
+ print('{0}...'.format(msg))
156
+ else:
157
+ print('{0} from {1}'.format(msg, full_path))
158
+
159
+ mmap_mode = (None if not hasattr(self, 'mmap_mode')
160
+ else self.mmap_mode)
161
+
162
+ filename = os.path.join(full_path, 'output.pkl')
163
+ if not self._item_exists(filename):
164
+ raise KeyError("Non-existing item (may have been "
165
+ "cleared).\nFile %s does not exist" % filename)
166
+
167
+ # file-like object cannot be used when mmap_mode is set
168
+ if mmap_mode is None:
169
+ with self._open_item(filename, "rb") as f:
170
+ item = numpy_pickle.load(f)
171
+ else:
172
+ item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
173
+ return item
174
+
175
+ def dump_item(self, path, item, verbose=1):
176
+ """Dump an item in the store at the path given as a list of
177
+ strings."""
178
+ try:
179
+ item_path = os.path.join(self.location, *path)
180
+ if not self._item_exists(item_path):
181
+ self.create_location(item_path)
182
+ filename = os.path.join(item_path, 'output.pkl')
183
+ if verbose > 10:
184
+ print('Persisting in %s' % item_path)
185
+
186
+ def write_func(to_write, dest_filename):
187
+ with self._open_item(dest_filename, "wb") as f:
188
+ numpy_pickle.dump(to_write, f,
189
+ compress=self.compress)
190
+
191
+ self._concurrency_safe_write(item, filename, write_func)
192
+ except: # noqa: E722
193
+ " Race condition in the creation of the directory "
194
+
195
+ def clear_item(self, path):
196
+ """Clear the item at the path, given as a list of strings."""
197
+ item_path = os.path.join(self.location, *path)
198
+ if self._item_exists(item_path):
199
+ self.clear_location(item_path)
200
+
201
+ def contains_item(self, path):
202
+ """Check if there is an item at the path, given as a list of
203
+ strings"""
204
+ item_path = os.path.join(self.location, *path)
205
+ filename = os.path.join(item_path, 'output.pkl')
206
+
207
+ return self._item_exists(filename)
208
+
209
+ def get_item_info(self, path):
210
+ """Return information about item."""
211
+ return {'location': os.path.join(self.location,
212
+ *path)}
213
+
214
+ def get_metadata(self, path):
215
+ """Return actual metadata of an item."""
216
+ try:
217
+ item_path = os.path.join(self.location, *path)
218
+ filename = os.path.join(item_path, 'metadata.json')
219
+ with self._open_item(filename, 'rb') as f:
220
+ return json.loads(f.read().decode('utf-8'))
221
+ except: # noqa: E722
222
+ return {}
223
+
224
+ def store_metadata(self, path, metadata):
225
+ """Store metadata of a computation."""
226
+ try:
227
+ item_path = os.path.join(self.location, *path)
228
+ self.create_location(item_path)
229
+ filename = os.path.join(item_path, 'metadata.json')
230
+
231
+ def write_func(to_write, dest_filename):
232
+ with self._open_item(dest_filename, "wb") as f:
233
+ f.write(json.dumps(to_write).encode('utf-8'))
234
+
235
+ self._concurrency_safe_write(metadata, filename, write_func)
236
+ except: # noqa: E722
237
+ pass
238
+
239
+ def contains_path(self, path):
240
+ """Check cached function is available in store."""
241
+ func_path = os.path.join(self.location, *path)
242
+ return self.object_exists(func_path)
243
+
244
+ def clear_path(self, path):
245
+ """Clear all items with a common path in the store."""
246
+ func_path = os.path.join(self.location, *path)
247
+ if self._item_exists(func_path):
248
+ self.clear_location(func_path)
249
+
250
+ def store_cached_func_code(self, path, func_code=None):
251
+ """Store the code of the cached function."""
252
+ func_path = os.path.join(self.location, *path)
253
+ if not self._item_exists(func_path):
254
+ self.create_location(func_path)
255
+
256
+ if func_code is not None:
257
+ filename = os.path.join(func_path, "func_code.py")
258
+ with self._open_item(filename, 'wb') as f:
259
+ f.write(func_code.encode('utf-8'))
260
+
261
+ def get_cached_func_code(self, path):
262
+ """Store the code of the cached function."""
263
+ path += ['func_code.py', ]
264
+ filename = os.path.join(self.location, *path)
265
+ try:
266
+ with self._open_item(filename, 'rb') as f:
267
+ return f.read().decode('utf-8')
268
+ except: # noqa: E722
269
+ raise
270
+
271
+ def get_cached_func_info(self, path):
272
+ """Return information related to the cached function if it exists."""
273
+ return {'location': os.path.join(self.location, *path)}
274
+
275
+ def clear(self):
276
+ """Clear the whole store content."""
277
+ self.clear_location(self.location)
278
+
279
+ def reduce_store_size(self, bytes_limit):
280
+ """Reduce store size to keep it under the given bytes limit."""
281
+ items_to_delete = self._get_items_to_delete(bytes_limit)
282
+
283
+ for item in items_to_delete:
284
+ if self.verbose > 10:
285
+ print('Deleting item {0}'.format(item))
286
+ try:
287
+ self.clear_location(item.path)
288
+ except OSError:
289
+ # Even with ignore_errors=True shutil.rmtree can raise OSError
290
+ # with:
291
+ # [Errno 116] Stale file handle if another process has deleted
292
+ # the folder already.
293
+ pass
294
+
295
+ def _get_items_to_delete(self, bytes_limit):
296
+ """Get items to delete to keep the store under a size limit."""
297
+ if isinstance(bytes_limit, str):
298
+ bytes_limit = memstr_to_bytes(bytes_limit)
299
+
300
+ items = self.get_items()
301
+ size = sum(item.size for item in items)
302
+
303
+ to_delete_size = size - bytes_limit
304
+ if to_delete_size < 0:
305
+ return []
306
+
307
+ # We want to delete first the cache items that were accessed a
308
+ # long time ago
309
+ items.sort(key=operator.attrgetter('last_access'))
310
+
311
+ items_to_delete = []
312
+ size_so_far = 0
313
+
314
+ for item in items:
315
+ if size_so_far > to_delete_size:
316
+ break
317
+
318
+ items_to_delete.append(item)
319
+ size_so_far += item.size
320
+
321
+ return items_to_delete
322
+
323
+ def _concurrency_safe_write(self, to_write, filename, write_func):
324
+ """Writes an object into a file in a concurrency-safe way."""
325
+ temporary_filename = concurrency_safe_write(to_write,
326
+ filename, write_func)
327
+ self._move_item(temporary_filename, filename)
328
+
329
+ def __repr__(self):
330
+ """Printable representation of the store location."""
331
+ return '{class_name}(location="{location}")'.format(
332
+ class_name=self.__class__.__name__, location=self.location)
333
+
334
+
335
+ class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
336
+ """A StoreBackend used with local or network file systems."""
337
+
338
+ _open_item = staticmethod(open)
339
+ _item_exists = staticmethod(os.path.exists)
340
+ _move_item = staticmethod(concurrency_safe_rename)
341
+
342
+ def clear_location(self, location):
343
+ """Delete location on store."""
344
+ if (location == self.location):
345
+ rm_subdirs(location)
346
+ else:
347
+ shutil.rmtree(location, ignore_errors=True)
348
+
349
+ def create_location(self, location):
350
+ """Create object location on store"""
351
+ mkdirp(location)
352
+
353
+ def get_items(self):
354
+ """Returns the whole list of items available in the store."""
355
+ items = []
356
+
357
+ for dirpath, _, filenames in os.walk(self.location):
358
+ is_cache_hash_dir = re.match('[a-f0-9]{32}',
359
+ os.path.basename(dirpath))
360
+
361
+ if is_cache_hash_dir:
362
+ output_filename = os.path.join(dirpath, 'output.pkl')
363
+ try:
364
+ last_access = os.path.getatime(output_filename)
365
+ except OSError:
366
+ try:
367
+ last_access = os.path.getatime(dirpath)
368
+ except OSError:
369
+ # The directory has already been deleted
370
+ continue
371
+
372
+ last_access = datetime.datetime.fromtimestamp(last_access)
373
+ try:
374
+ full_filenames = [os.path.join(dirpath, fn)
375
+ for fn in filenames]
376
+ dirsize = sum(os.path.getsize(fn)
377
+ for fn in full_filenames)
378
+ except OSError:
379
+ # Either output_filename or one of the files in
380
+ # dirpath does not exist any more. We assume this
381
+ # directory is being cleaned by another process already
382
+ continue
383
+
384
+ items.append(CacheItemInfo(dirpath, dirsize,
385
+ last_access))
386
+
387
+ return items
388
+
389
+ def configure(self, location, verbose=1, backend_options=None):
390
+ """Configure the store backend.
391
+
392
+ For this backend, valid store options are 'compress' and 'mmap_mode'
393
+ """
394
+ if backend_options is None:
395
+ backend_options = {}
396
+
397
+ # setup location directory
398
+ self.location = location
399
+ if not os.path.exists(self.location):
400
+ mkdirp(self.location)
401
+
402
+ # item can be stored compressed for faster I/O
403
+ self.compress = backend_options.get('compress', False)
404
+
405
+ # FileSystemStoreBackend can be used with mmap_mode options under
406
+ # certain conditions.
407
+ mmap_mode = backend_options.get('mmap_mode')
408
+ if self.compress and mmap_mode is not None:
409
+ warnings.warn('Compressed items cannot be memmapped in a '
410
+ 'filesystem store. Option will be ignored.',
411
+ stacklevel=2)
412
+
413
+ self.mmap_mode = mmap_mode
414
+ self.verbose = verbose
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/backports.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Backports of fixes for joblib dependencies
3
+ """
4
+ import os
5
+ import time
6
+
7
+ from distutils.version import LooseVersion
8
+ from os.path import basename
9
+ from multiprocessing import util
10
+
11
+
12
+ try:
13
+ import numpy as np
14
+
15
+ def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
16
+ shape=None, order='C', unlink_on_gc_collect=False):
17
+ """Custom memmap constructor compatible with numpy.memmap.
18
+
19
+ This function:
20
+ - is a backport the numpy memmap offset fix (See
21
+ https://github.com/numpy/numpy/pull/8443 for more details.
22
+ The numpy fix is available starting numpy 1.13)
23
+ - adds ``unlink_on_gc_collect``, which specifies explicitly whether
24
+ the process re-constructing the memmap owns a reference to the
25
+ underlying file. If set to True, it adds a finalizer to the
26
+ newly-created memmap that sends a maybe_unlink request for the
27
+ memmaped file to resource_tracker.
28
+ """
29
+ util.debug(
30
+ "[MEMMAP READ] creating a memmap (shape {}, filename {}, "
31
+ "pid {})".format(shape, basename(filename), os.getpid())
32
+ )
33
+
34
+ mm = np.memmap(filename, dtype=dtype, mode=mode, offset=offset,
35
+ shape=shape, order=order)
36
+ if LooseVersion(np.__version__) < '1.13':
37
+ mm.offset = offset
38
+ if unlink_on_gc_collect:
39
+ from ._memmapping_reducer import add_maybe_unlink_finalizer
40
+ add_maybe_unlink_finalizer(mm)
41
+ return mm
42
+ except ImportError:
43
+ def make_memmap(filename, dtype='uint8', mode='r+', offset=0,
44
+ shape=None, order='C', unlink_on_gc_collect=False):
45
+ raise NotImplementedError(
46
+ "'joblib.backports.make_memmap' should not be used "
47
+ 'if numpy is not installed.')
48
+
49
+
50
+ if os.name == 'nt':
51
+ # https://github.com/joblib/joblib/issues/540
52
+ access_denied_errors = (5, 13)
53
+ from os import replace
54
+
55
+ def concurrency_safe_rename(src, dst):
56
+ """Renames ``src`` into ``dst`` overwriting ``dst`` if it exists.
57
+
58
+ On Windows os.replace can yield permission errors if executed by two
59
+ different processes.
60
+ """
61
+ max_sleep_time = 1
62
+ total_sleep_time = 0
63
+ sleep_time = 0.001
64
+ while total_sleep_time < max_sleep_time:
65
+ try:
66
+ replace(src, dst)
67
+ break
68
+ except Exception as exc:
69
+ if getattr(exc, 'winerror', None) in access_denied_errors:
70
+ time.sleep(sleep_time)
71
+ total_sleep_time += sleep_time
72
+ sleep_time *= 2
73
+ else:
74
+ raise
75
+ else:
76
+ raise
77
+ else:
78
+ from os import replace as concurrency_safe_rename # noqa
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/disk.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Disk management utilities.
3
+ """
4
+
5
+ # Authors: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Lars Buitinck
7
+ # Copyright (c) 2010 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+
11
+ import os
12
+ import sys
13
+ import time
14
+ import errno
15
+ import shutil
16
+
17
+ from multiprocessing import util
18
+
19
+
20
+ try:
21
+ WindowsError
22
+ except NameError:
23
+ WindowsError = OSError
24
+
25
+
26
+ def disk_used(path):
27
+ """ Return the disk usage in a directory."""
28
+ size = 0
29
+ for file in os.listdir(path) + ['.']:
30
+ stat = os.stat(os.path.join(path, file))
31
+ if hasattr(stat, 'st_blocks'):
32
+ size += stat.st_blocks * 512
33
+ else:
34
+ # on some platform st_blocks is not available (e.g., Windows)
35
+ # approximate by rounding to next multiple of 512
36
+ size += (stat.st_size // 512 + 1) * 512
37
+ # We need to convert to int to avoid having longs on some systems (we
38
+ # don't want longs to avoid problems we SQLite)
39
+ return int(size / 1024.)
40
+
41
+
42
+ def memstr_to_bytes(text):
43
+ """ Convert a memory text to its value in bytes.
44
+ """
45
+ kilo = 1024
46
+ units = dict(K=kilo, M=kilo ** 2, G=kilo ** 3)
47
+ try:
48
+ size = int(units[text[-1]] * float(text[:-1]))
49
+ except (KeyError, ValueError) as e:
50
+ raise ValueError(
51
+ "Invalid literal for size give: %s (type %s) should be "
52
+ "alike '10G', '500M', '50K'." % (text, type(text))) from e
53
+ return size
54
+
55
+
56
+ def mkdirp(d):
57
+ """Ensure directory d exists (like mkdir -p on Unix)
58
+ No guarantee that the directory is writable.
59
+ """
60
+ try:
61
+ os.makedirs(d)
62
+ except OSError as e:
63
+ if e.errno != errno.EEXIST:
64
+ raise
65
+
66
+
67
+ # if a rmtree operation fails in rm_subdirs, wait for this much time (in secs),
68
+ # then retry up to RM_SUBDIRS_N_RETRY times. If it still fails, raise the
69
+ # exception. this mecanism ensures that the sub-process gc have the time to
70
+ # collect and close the memmaps before we fail.
71
+ RM_SUBDIRS_RETRY_TIME = 0.1
72
+ RM_SUBDIRS_N_RETRY = 5
73
+
74
+
75
+ def rm_subdirs(path, onerror=None):
76
+ """Remove all subdirectories in this path.
77
+
78
+ The directory indicated by `path` is left in place, and its subdirectories
79
+ are erased.
80
+
81
+ If onerror is set, it is called to handle the error with arguments (func,
82
+ path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
83
+ path is the argument to that function that caused it to fail; and
84
+ exc_info is a tuple returned by sys.exc_info(). If onerror is None,
85
+ an exception is raised.
86
+ """
87
+
88
+ # NOTE this code is adapted from the one in shutil.rmtree, and is
89
+ # just as fast
90
+
91
+ names = []
92
+ try:
93
+ names = os.listdir(path)
94
+ except os.error:
95
+ if onerror is not None:
96
+ onerror(os.listdir, path, sys.exc_info())
97
+ else:
98
+ raise
99
+
100
+ for name in names:
101
+ fullname = os.path.join(path, name)
102
+ delete_folder(fullname, onerror=onerror)
103
+
104
+
105
+ def delete_folder(folder_path, onerror=None, allow_non_empty=True):
106
+ """Utility function to cleanup a temporary folder if it still exists."""
107
+ if os.path.isdir(folder_path):
108
+ if onerror is not None:
109
+ shutil.rmtree(folder_path, False, onerror)
110
+ else:
111
+ # allow the rmtree to fail once, wait and re-try.
112
+ # if the error is raised again, fail
113
+ err_count = 0
114
+ while True:
115
+ files = os.listdir(folder_path)
116
+ try:
117
+ if len(files) == 0 or allow_non_empty:
118
+ shutil.rmtree(
119
+ folder_path, ignore_errors=False, onerror=None
120
+ )
121
+ util.debug(
122
+ "Sucessfully deleted {}".format(folder_path))
123
+ break
124
+ else:
125
+ raise OSError(
126
+ "Expected empty folder {} but got {} "
127
+ "files.".format(folder_path, len(files))
128
+ )
129
+ except (OSError, WindowsError):
130
+ err_count += 1
131
+ if err_count > RM_SUBDIRS_N_RETRY:
132
+ # the folder cannot be deleted right now. It maybe
133
+ # because some temporary files have not been deleted
134
+ # yet.
135
+ raise
136
+ time.sleep(RM_SUBDIRS_RETRY_TIME)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/func_inspect.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ My own variation on function-specific inspect-like features.
3
+ """
4
+
5
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
6
+ # Copyright (c) 2009 Gael Varoquaux
7
+ # License: BSD Style, 3 clauses.
8
+
9
+ import inspect
10
+ import warnings
11
+ import re
12
+ import os
13
+ import collections
14
+
15
+ from itertools import islice
16
+ from tokenize import open as open_py_source
17
+
18
+ from .logger import pformat
19
+
20
+ full_argspec_fields = ('args varargs varkw defaults kwonlyargs '
21
+ 'kwonlydefaults annotations')
22
+ full_argspec_type = collections.namedtuple('FullArgSpec', full_argspec_fields)
23
+
24
+
25
+ def get_func_code(func):
26
+ """ Attempts to retrieve a reliable function code hash.
27
+
28
+ The reason we don't use inspect.getsource is that it caches the
29
+ source, whereas we want this to be modified on the fly when the
30
+ function is modified.
31
+
32
+ Returns
33
+ -------
34
+ func_code: string
35
+ The function code
36
+ source_file: string
37
+ The path to the file in which the function is defined.
38
+ first_line: int
39
+ The first line of the code in the source file.
40
+
41
+ Notes
42
+ ------
43
+ This function does a bit more magic than inspect, and is thus
44
+ more robust.
45
+ """
46
+ source_file = None
47
+ try:
48
+ code = func.__code__
49
+ source_file = code.co_filename
50
+ if not os.path.exists(source_file):
51
+ # Use inspect for lambda functions and functions defined in an
52
+ # interactive shell, or in doctests
53
+ source_code = ''.join(inspect.getsourcelines(func)[0])
54
+ line_no = 1
55
+ if source_file.startswith('<doctest '):
56
+ source_file, line_no = re.match(
57
+ r'\<doctest (.*\.rst)\[(.*)\]\>', source_file).groups()
58
+ line_no = int(line_no)
59
+ source_file = '<doctest %s>' % source_file
60
+ return source_code, source_file, line_no
61
+ # Try to retrieve the source code.
62
+ with open_py_source(source_file) as source_file_obj:
63
+ first_line = code.co_firstlineno
64
+ # All the lines after the function definition:
65
+ source_lines = list(islice(source_file_obj, first_line - 1, None))
66
+ return ''.join(inspect.getblock(source_lines)), source_file, first_line
67
+ except:
68
+ # If the source code fails, we use the hash. This is fragile and
69
+ # might change from one session to another.
70
+ if hasattr(func, '__code__'):
71
+ # Python 3.X
72
+ return str(func.__code__.__hash__()), source_file, -1
73
+ else:
74
+ # Weird objects like numpy ufunc don't have __code__
75
+ # This is fragile, as quite often the id of the object is
76
+ # in the repr, so it might not persist across sessions,
77
+ # however it will work for ufuncs.
78
+ return repr(func), source_file, -1
79
+
80
+
81
+ def _clean_win_chars(string):
82
+ """Windows cannot encode some characters in filename."""
83
+ import urllib
84
+ if hasattr(urllib, 'quote'):
85
+ quote = urllib.quote
86
+ else:
87
+ # In Python 3, quote is elsewhere
88
+ import urllib.parse
89
+ quote = urllib.parse.quote
90
+ for char in ('<', '>', '!', ':', '\\'):
91
+ string = string.replace(char, quote(char))
92
+ return string
93
+
94
+
95
+ def get_func_name(func, resolv_alias=True, win_characters=True):
96
+ """ Return the function import path (as a list of module names), and
97
+ a name for the function.
98
+
99
+ Parameters
100
+ ----------
101
+ func: callable
102
+ The func to inspect
103
+ resolv_alias: boolean, optional
104
+ If true, possible local aliases are indicated.
105
+ win_characters: boolean, optional
106
+ If true, substitute special characters using urllib.quote
107
+ This is useful in Windows, as it cannot encode some filenames
108
+ """
109
+ if hasattr(func, '__module__'):
110
+ module = func.__module__
111
+ else:
112
+ try:
113
+ module = inspect.getmodule(func)
114
+ except TypeError:
115
+ if hasattr(func, '__class__'):
116
+ module = func.__class__.__module__
117
+ else:
118
+ module = 'unknown'
119
+ if module is None:
120
+ # Happens in doctests, eg
121
+ module = ''
122
+ if module == '__main__':
123
+ try:
124
+ filename = os.path.abspath(inspect.getsourcefile(func))
125
+ except:
126
+ filename = None
127
+ if filename is not None:
128
+ # mangling of full path to filename
129
+ parts = filename.split(os.sep)
130
+ if parts[-1].startswith('<ipython-input'):
131
+ # We're in a IPython (or notebook) session. parts[-1] comes
132
+ # from func.__code__.co_filename and is of the form
133
+ # <ipython-input-N-XYZ>, where:
134
+ # - N is the cell number where the function was defined
135
+ # - XYZ is a hash representing the function's code (and name).
136
+ # It will be consistent across sessions and kernel restarts,
137
+ # and will change if the function's code/name changes
138
+ # We remove N so that cache is properly hit if the cell where
139
+ # the func is defined is re-exectuted.
140
+ # The XYZ hash should avoid collisions between functions with
141
+ # the same name, both within the same notebook but also across
142
+ # notebooks
143
+ splitted = parts[-1].split('-')
144
+ parts[-1] = '-'.join(splitted[:2] + splitted[3:])
145
+ elif len(parts) > 2 and parts[-2].startswith('ipykernel_'):
146
+ # In a notebook session (ipykernel). Filename seems to be 'xyz'
147
+ # of above. parts[-2] has the structure ipykernel_XXXXXX where
148
+ # XXXXXX is a six-digit number identifying the current run (?).
149
+ # If we split it off, the function again has the same
150
+ # identifier across runs.
151
+ parts[-2] = 'ipykernel'
152
+ filename = '-'.join(parts)
153
+ if filename.endswith('.py'):
154
+ filename = filename[:-3]
155
+ module = module + '-' + filename
156
+ module = module.split('.')
157
+ if hasattr(func, 'func_name'):
158
+ name = func.func_name
159
+ elif hasattr(func, '__name__'):
160
+ name = func.__name__
161
+ else:
162
+ name = 'unknown'
163
+ # Hack to detect functions not defined at the module-level
164
+ if resolv_alias:
165
+ # TODO: Maybe add a warning here?
166
+ if hasattr(func, 'func_globals') and name in func.func_globals:
167
+ if not func.func_globals[name] is func:
168
+ name = '%s-alias' % name
169
+ if inspect.ismethod(func):
170
+ # We need to add the name of the class
171
+ if hasattr(func, 'im_class'):
172
+ klass = func.im_class
173
+ module.append(klass.__name__)
174
+ if os.name == 'nt' and win_characters:
175
+ # Windows can't encode certain characters in filenames
176
+ name = _clean_win_chars(name)
177
+ module = [_clean_win_chars(s) for s in module]
178
+ return module, name
179
+
180
+
181
+ def _signature_str(function_name, arg_sig):
182
+ """Helper function to output a function signature"""
183
+ return '{}{}'.format(function_name, arg_sig)
184
+
185
+
186
+ def _function_called_str(function_name, args, kwargs):
187
+ """Helper function to output a function call"""
188
+ template_str = '{0}({1}, {2})'
189
+
190
+ args_str = repr(args)[1:-1]
191
+ kwargs_str = ', '.join('%s=%s' % (k, v)
192
+ for k, v in kwargs.items())
193
+ return template_str.format(function_name, args_str,
194
+ kwargs_str)
195
+
196
+
197
+ def filter_args(func, ignore_lst, args=(), kwargs=dict()):
198
+ """ Filters the given args and kwargs using a list of arguments to
199
+ ignore, and a function specification.
200
+
201
+ Parameters
202
+ ----------
203
+ func: callable
204
+ Function giving the argument specification
205
+ ignore_lst: list of strings
206
+ List of arguments to ignore (either a name of an argument
207
+ in the function spec, or '*', or '**')
208
+ *args: list
209
+ Positional arguments passed to the function.
210
+ **kwargs: dict
211
+ Keyword arguments passed to the function
212
+
213
+ Returns
214
+ -------
215
+ filtered_args: list
216
+ List of filtered positional and keyword arguments.
217
+ """
218
+ args = list(args)
219
+ if isinstance(ignore_lst, str):
220
+ # Catch a common mistake
221
+ raise ValueError(
222
+ 'ignore_lst must be a list of parameters to ignore '
223
+ '%s (type %s) was given' % (ignore_lst, type(ignore_lst)))
224
+ # Special case for functools.partial objects
225
+ if (not inspect.ismethod(func) and not inspect.isfunction(func)):
226
+ if ignore_lst:
227
+ warnings.warn('Cannot inspect object %s, ignore list will '
228
+ 'not work.' % func, stacklevel=2)
229
+ return {'*': args, '**': kwargs}
230
+ arg_sig = inspect.signature(func)
231
+ arg_names = []
232
+ arg_defaults = []
233
+ arg_kwonlyargs = []
234
+ arg_varargs = None
235
+ arg_varkw = None
236
+ for param in arg_sig.parameters.values():
237
+ if param.kind is param.POSITIONAL_OR_KEYWORD:
238
+ arg_names.append(param.name)
239
+ elif param.kind is param.KEYWORD_ONLY:
240
+ arg_names.append(param.name)
241
+ arg_kwonlyargs.append(param.name)
242
+ elif param.kind is param.VAR_POSITIONAL:
243
+ arg_varargs = param.name
244
+ elif param.kind is param.VAR_KEYWORD:
245
+ arg_varkw = param.name
246
+ if param.default is not param.empty:
247
+ arg_defaults.append(param.default)
248
+ if inspect.ismethod(func):
249
+ # First argument is 'self', it has been removed by Python
250
+ # we need to add it back:
251
+ args = [func.__self__, ] + args
252
+ # func is an instance method, inspect.signature(func) does not
253
+ # include self, we need to fetch it from the class method, i.e
254
+ # func.__func__
255
+ class_method_sig = inspect.signature(func.__func__)
256
+ self_name = next(iter(class_method_sig.parameters))
257
+ arg_names = [self_name] + arg_names
258
+ # XXX: Maybe I need an inspect.isbuiltin to detect C-level methods, such
259
+ # as on ndarrays.
260
+
261
+ _, name = get_func_name(func, resolv_alias=False)
262
+ arg_dict = dict()
263
+ arg_position = -1
264
+ for arg_position, arg_name in enumerate(arg_names):
265
+ if arg_position < len(args):
266
+ # Positional argument or keyword argument given as positional
267
+ if arg_name not in arg_kwonlyargs:
268
+ arg_dict[arg_name] = args[arg_position]
269
+ else:
270
+ raise ValueError(
271
+ "Keyword-only parameter '%s' was passed as "
272
+ 'positional parameter for %s:\n'
273
+ ' %s was called.'
274
+ % (arg_name,
275
+ _signature_str(name, arg_sig),
276
+ _function_called_str(name, args, kwargs))
277
+ )
278
+
279
+ else:
280
+ position = arg_position - len(arg_names)
281
+ if arg_name in kwargs:
282
+ arg_dict[arg_name] = kwargs[arg_name]
283
+ else:
284
+ try:
285
+ arg_dict[arg_name] = arg_defaults[position]
286
+ except (IndexError, KeyError) as e:
287
+ # Missing argument
288
+ raise ValueError(
289
+ 'Wrong number of arguments for %s:\n'
290
+ ' %s was called.'
291
+ % (_signature_str(name, arg_sig),
292
+ _function_called_str(name, args, kwargs))
293
+ ) from e
294
+
295
+ varkwargs = dict()
296
+ for arg_name, arg_value in sorted(kwargs.items()):
297
+ if arg_name in arg_dict:
298
+ arg_dict[arg_name] = arg_value
299
+ elif arg_varkw is not None:
300
+ varkwargs[arg_name] = arg_value
301
+ else:
302
+ raise TypeError("Ignore list for %s() contains an unexpected "
303
+ "keyword argument '%s'" % (name, arg_name))
304
+
305
+ if arg_varkw is not None:
306
+ arg_dict['**'] = varkwargs
307
+ if arg_varargs is not None:
308
+ varargs = args[arg_position + 1:]
309
+ arg_dict['*'] = varargs
310
+
311
+ # Now remove the arguments to be ignored
312
+ for item in ignore_lst:
313
+ if item in arg_dict:
314
+ arg_dict.pop(item)
315
+ else:
316
+ raise ValueError("Ignore list: argument '%s' is not defined for "
317
+ "function %s"
318
+ % (item,
319
+ _signature_str(name, arg_sig))
320
+ )
321
+ # XXX: Return a sorted list of pairs?
322
+ return arg_dict
323
+
324
+
325
+ def _format_arg(arg):
326
+ formatted_arg = pformat(arg, indent=2)
327
+ if len(formatted_arg) > 1500:
328
+ formatted_arg = '%s...' % formatted_arg[:700]
329
+ return formatted_arg
330
+
331
+
332
+ def format_signature(func, *args, **kwargs):
333
+ # XXX: Should this use inspect.formatargvalues/formatargspec?
334
+ module, name = get_func_name(func)
335
+ module = [m for m in module if m]
336
+ if module:
337
+ module.append(name)
338
+ module_path = '.'.join(module)
339
+ else:
340
+ module_path = name
341
+ arg_str = list()
342
+ previous_length = 0
343
+ for arg in args:
344
+ formatted_arg = _format_arg(arg)
345
+ if previous_length > 80:
346
+ formatted_arg = '\n%s' % formatted_arg
347
+ previous_length = len(formatted_arg)
348
+ arg_str.append(formatted_arg)
349
+ arg_str.extend(['%s=%s' % (v, _format_arg(i)) for v, i in kwargs.items()])
350
+ arg_str = ', '.join(arg_str)
351
+
352
+ signature = '%s(%s)' % (name, arg_str)
353
+ return module_path, signature
354
+
355
+
356
+ def format_call(func, args, kwargs, object_name="Memory"):
357
+ """ Returns a nicely formatted statement displaying the function
358
+ call with the given arguments.
359
+ """
360
+ path, signature = format_signature(func, *args, **kwargs)
361
+ msg = '%s\n[%s] Calling %s...\n%s' % (80 * '_', object_name,
362
+ path, signature)
363
+ return msg
364
+ # XXX: Not using logging framework
365
+ # self.debug(msg)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/hashing.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Fast cryptographic hash of Python objects, with a special case for fast
3
+ hashing of numpy arrays.
4
+ """
5
+
6
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
7
+ # Copyright (c) 2009 Gael Varoquaux
8
+ # License: BSD Style, 3 clauses.
9
+
10
+ import pickle
11
+ import hashlib
12
+ import sys
13
+ import types
14
+ import struct
15
+ import io
16
+ import decimal
17
+
18
+
19
+ Pickler = pickle._Pickler
20
+
21
+
22
+ class _ConsistentSet(object):
23
+ """ Class used to ensure the hash of Sets is preserved
24
+ whatever the order of its items.
25
+ """
26
+ def __init__(self, set_sequence):
27
+ # Forces order of elements in set to ensure consistent hash.
28
+ try:
29
+ # Trying first to order the set assuming the type of elements is
30
+ # consistent and orderable.
31
+ # This fails on python 3 when elements are unorderable
32
+ # but we keep it in a try as it's faster.
33
+ self._sequence = sorted(set_sequence)
34
+ except (TypeError, decimal.InvalidOperation):
35
+ # If elements are unorderable, sorting them using their hash.
36
+ # This is slower but works in any case.
37
+ self._sequence = sorted((hash(e) for e in set_sequence))
38
+
39
+
40
+ class _MyHash(object):
41
+ """ Class used to hash objects that won't normally pickle """
42
+
43
+ def __init__(self, *args):
44
+ self.args = args
45
+
46
+
47
+ class Hasher(Pickler):
48
+ """ A subclass of pickler, to do cryptographic hashing, rather than
49
+ pickling.
50
+ """
51
+
52
+ def __init__(self, hash_name='md5'):
53
+ self.stream = io.BytesIO()
54
+ # By default we want a pickle protocol that only changes with
55
+ # the major python version and not the minor one
56
+ protocol = 3
57
+ Pickler.__init__(self, self.stream, protocol=protocol)
58
+ # Initialise the hash obj
59
+ self._hash = hashlib.new(hash_name)
60
+
61
+ def hash(self, obj, return_digest=True):
62
+ try:
63
+ self.dump(obj)
64
+ except pickle.PicklingError as e:
65
+ e.args += ('PicklingError while hashing %r: %r' % (obj, e),)
66
+ raise
67
+ dumps = self.stream.getvalue()
68
+ self._hash.update(dumps)
69
+ if return_digest:
70
+ return self._hash.hexdigest()
71
+
72
+ def save(self, obj):
73
+ if isinstance(obj, (types.MethodType, type({}.pop))):
74
+ # the Pickler cannot pickle instance methods; here we decompose
75
+ # them into components that make them uniquely identifiable
76
+ if hasattr(obj, '__func__'):
77
+ func_name = obj.__func__.__name__
78
+ else:
79
+ func_name = obj.__name__
80
+ inst = obj.__self__
81
+ if type(inst) == type(pickle):
82
+ obj = _MyHash(func_name, inst.__name__)
83
+ elif inst is None:
84
+ # type(None) or type(module) do not pickle
85
+ obj = _MyHash(func_name, inst)
86
+ else:
87
+ cls = obj.__self__.__class__
88
+ obj = _MyHash(func_name, inst, cls)
89
+ Pickler.save(self, obj)
90
+
91
+ def memoize(self, obj):
92
+ # We want hashing to be sensitive to value instead of reference.
93
+ # For example we want ['aa', 'aa'] and ['aa', 'aaZ'[:2]]
94
+ # to hash to the same value and that's why we disable memoization
95
+ # for strings
96
+ if isinstance(obj, (bytes, str)):
97
+ return
98
+ Pickler.memoize(self, obj)
99
+
100
+ # The dispatch table of the pickler is not accessible in Python
101
+ # 3, as these lines are only bugware for IPython, we skip them.
102
+ def save_global(self, obj, name=None, pack=struct.pack):
103
+ # We have to override this method in order to deal with objects
104
+ # defined interactively in IPython that are not injected in
105
+ # __main__
106
+ kwargs = dict(name=name, pack=pack)
107
+ del kwargs['pack']
108
+ try:
109
+ Pickler.save_global(self, obj, **kwargs)
110
+ except pickle.PicklingError:
111
+ Pickler.save_global(self, obj, **kwargs)
112
+ module = getattr(obj, "__module__", None)
113
+ if module == '__main__':
114
+ my_name = name
115
+ if my_name is None:
116
+ my_name = obj.__name__
117
+ mod = sys.modules[module]
118
+ if not hasattr(mod, my_name):
119
+ # IPython doesn't inject the variables define
120
+ # interactively in __main__
121
+ setattr(mod, my_name, obj)
122
+
123
+ dispatch = Pickler.dispatch.copy()
124
+ # builtin
125
+ dispatch[type(len)] = save_global
126
+ # type
127
+ dispatch[type(object)] = save_global
128
+ # classobj
129
+ dispatch[type(Pickler)] = save_global
130
+ # function
131
+ dispatch[type(pickle.dump)] = save_global
132
+
133
+ def _batch_setitems(self, items):
134
+ # forces order of keys in dict to ensure consistent hash.
135
+ try:
136
+ # Trying first to compare dict assuming the type of keys is
137
+ # consistent and orderable.
138
+ # This fails on python 3 when keys are unorderable
139
+ # but we keep it in a try as it's faster.
140
+ Pickler._batch_setitems(self, iter(sorted(items)))
141
+ except TypeError:
142
+ # If keys are unorderable, sorting them using their hash. This is
143
+ # slower but works in any case.
144
+ Pickler._batch_setitems(self, iter(sorted((hash(k), v)
145
+ for k, v in items)))
146
+
147
+ def save_set(self, set_items):
148
+ # forces order of items in Set to ensure consistent hash
149
+ Pickler.save(self, _ConsistentSet(set_items))
150
+
151
+ dispatch[type(set())] = save_set
152
+
153
+
154
+ class NumpyHasher(Hasher):
155
+ """ Special case the hasher for when numpy is loaded.
156
+ """
157
+
158
+ def __init__(self, hash_name='md5', coerce_mmap=False):
159
+ """
160
+ Parameters
161
+ ----------
162
+ hash_name: string
163
+ The hash algorithm to be used
164
+ coerce_mmap: boolean
165
+ Make no difference between np.memmap and np.ndarray
166
+ objects.
167
+ """
168
+ self.coerce_mmap = coerce_mmap
169
+ Hasher.__init__(self, hash_name=hash_name)
170
+ # delayed import of numpy, to avoid tight coupling
171
+ import numpy as np
172
+ self.np = np
173
+ if hasattr(np, 'getbuffer'):
174
+ self._getbuffer = np.getbuffer
175
+ else:
176
+ self._getbuffer = memoryview
177
+
178
+ def save(self, obj):
179
+ """ Subclass the save method, to hash ndarray subclass, rather
180
+ than pickling them. Off course, this is a total abuse of
181
+ the Pickler class.
182
+ """
183
+ if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
184
+ # Compute a hash of the object
185
+ # The update function of the hash requires a c_contiguous buffer.
186
+ if obj.shape == ():
187
+ # 0d arrays need to be flattened because viewing them as bytes
188
+ # raises a ValueError exception.
189
+ obj_c_contiguous = obj.flatten()
190
+ elif obj.flags.c_contiguous:
191
+ obj_c_contiguous = obj
192
+ elif obj.flags.f_contiguous:
193
+ obj_c_contiguous = obj.T
194
+ else:
195
+ # Cater for non-single-segment arrays: this creates a
196
+ # copy, and thus aleviates this issue.
197
+ # XXX: There might be a more efficient way of doing this
198
+ obj_c_contiguous = obj.flatten()
199
+
200
+ # memoryview is not supported for some dtypes, e.g. datetime64, see
201
+ # https://github.com/numpy/numpy/issues/4983. The
202
+ # workaround is to view the array as bytes before
203
+ # taking the memoryview.
204
+ self._hash.update(
205
+ self._getbuffer(obj_c_contiguous.view(self.np.uint8)))
206
+
207
+ # We store the class, to be able to distinguish between
208
+ # Objects with the same binary content, but different
209
+ # classes.
210
+ if self.coerce_mmap and isinstance(obj, self.np.memmap):
211
+ # We don't make the difference between memmap and
212
+ # normal ndarrays, to be able to reload previously
213
+ # computed results with memmap.
214
+ klass = self.np.ndarray
215
+ else:
216
+ klass = obj.__class__
217
+ # We also return the dtype and the shape, to distinguish
218
+ # different views on the same data with different dtypes.
219
+
220
+ # The object will be pickled by the pickler hashed at the end.
221
+ obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
222
+ elif isinstance(obj, self.np.dtype):
223
+ # numpy.dtype consistent hashing is tricky to get right. This comes
224
+ # from the fact that atomic np.dtype objects are interned:
225
+ # ``np.dtype('f4') is np.dtype('f4')``. The situation is
226
+ # complicated by the fact that this interning does not resist a
227
+ # simple pickle.load/dump roundtrip:
228
+ # ``pickle.loads(pickle.dumps(np.dtype('f4'))) is not
229
+ # np.dtype('f4') Because pickle relies on memoization during
230
+ # pickling, it is easy to
231
+ # produce different hashes for seemingly identical objects, such as
232
+ # ``[np.dtype('f4'), np.dtype('f4')]``
233
+ # and ``[np.dtype('f4'), pickle.loads(pickle.dumps('f4'))]``.
234
+ # To prevent memoization from interfering with hashing, we isolate
235
+ # the serialization (and thus the pickle memoization) of each dtype
236
+ # using each time a different ``pickle.dumps`` call unrelated to
237
+ # the current Hasher instance.
238
+ self._hash.update("_HASHED_DTYPE".encode('utf-8'))
239
+ self._hash.update(pickle.dumps(obj))
240
+ return
241
+ Hasher.save(self, obj)
242
+
243
+
244
+ def hash(obj, hash_name='md5', coerce_mmap=False):
245
+ """ Quick calculation of a hash to identify uniquely Python objects
246
+ containing numpy arrays.
247
+
248
+
249
+ Parameters
250
+ -----------
251
+ hash_name: 'md5' or 'sha1'
252
+ Hashing algorithm used. sha1 is supposedly safer, but md5 is
253
+ faster.
254
+ coerce_mmap: boolean
255
+ Make no difference between np.memmap and np.ndarray
256
+ """
257
+ valid_hash_names = ('md5', 'sha1')
258
+ if hash_name not in valid_hash_names:
259
+ raise ValueError("Valid options for 'hash_name' are {}. "
260
+ "Got hash_name={!r} instead."
261
+ .format(valid_hash_names, hash_name))
262
+ if 'numpy' in sys.modules:
263
+ hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
264
+ else:
265
+ hasher = Hasher(hash_name=hash_name)
266
+ return hasher.hash(obj)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/numpy_pickle_compat.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Numpy pickle compatibility functions."""
2
+
3
+ import pickle
4
+ import os
5
+ import zlib
6
+ import inspect
7
+
8
+ from io import BytesIO
9
+
10
+ from .numpy_pickle_utils import _ZFILE_PREFIX
11
+ from .numpy_pickle_utils import Unpickler
12
+ from .numpy_pickle_utils import _ensure_native_byte_order
13
+
14
+ def hex_str(an_int):
15
+ """Convert an int to an hexadecimal string."""
16
+ return '{:#x}'.format(an_int)
17
+
18
+
19
+ def asbytes(s):
20
+ if isinstance(s, bytes):
21
+ return s
22
+ return s.encode('latin1')
23
+
24
+
25
+ _MAX_LEN = len(hex_str(2 ** 64))
26
+ _CHUNK_SIZE = 64 * 1024
27
+
28
+
29
+ def read_zfile(file_handle):
30
+ """Read the z-file and return the content as a string.
31
+
32
+ Z-files are raw data compressed with zlib used internally by joblib
33
+ for persistence. Backward compatibility is not guaranteed. Do not
34
+ use for external purposes.
35
+ """
36
+ file_handle.seek(0)
37
+ header_length = len(_ZFILE_PREFIX) + _MAX_LEN
38
+ length = file_handle.read(header_length)
39
+ length = length[len(_ZFILE_PREFIX):]
40
+ length = int(length, 16)
41
+
42
+ # With python2 and joblib version <= 0.8.4 compressed pickle header is one
43
+ # character wider so we need to ignore an additional space if present.
44
+ # Note: the first byte of the zlib data is guaranteed not to be a
45
+ # space according to
46
+ # https://tools.ietf.org/html/rfc6713#section-2.1
47
+ next_byte = file_handle.read(1)
48
+ if next_byte != b' ':
49
+ # The zlib compressed data has started and we need to go back
50
+ # one byte
51
+ file_handle.seek(header_length)
52
+
53
+ # We use the known length of the data to tell Zlib the size of the
54
+ # buffer to allocate.
55
+ data = zlib.decompress(file_handle.read(), 15, length)
56
+ assert len(data) == length, (
57
+ "Incorrect data length while decompressing %s."
58
+ "The file could be corrupted." % file_handle)
59
+ return data
60
+
61
+
62
+ def write_zfile(file_handle, data, compress=1):
63
+ """Write the data in the given file as a Z-file.
64
+
65
+ Z-files are raw data compressed with zlib used internally by joblib
66
+ for persistence. Backward compatibility is not guarantied. Do not
67
+ use for external purposes.
68
+ """
69
+ file_handle.write(_ZFILE_PREFIX)
70
+ length = hex_str(len(data))
71
+ # Store the length of the data
72
+ file_handle.write(asbytes(length.ljust(_MAX_LEN)))
73
+ file_handle.write(zlib.compress(asbytes(data), compress))
74
+
75
+ ###############################################################################
76
+ # Utility objects for persistence.
77
+
78
+
79
+ class NDArrayWrapper(object):
80
+ """An object to be persisted instead of numpy arrays.
81
+
82
+ The only thing this object does, is to carry the filename in which
83
+ the array has been persisted, and the array subclass.
84
+ """
85
+
86
+ def __init__(self, filename, subclass, allow_mmap=True):
87
+ """Constructor. Store the useful information for later."""
88
+ self.filename = filename
89
+ self.subclass = subclass
90
+ self.allow_mmap = allow_mmap
91
+
92
+ def read(self, unpickler):
93
+ """Reconstruct the array."""
94
+ filename = os.path.join(unpickler._dirname, self.filename)
95
+ # Load the array from the disk
96
+ # use getattr instead of self.allow_mmap to ensure backward compat
97
+ # with NDArrayWrapper instances pickled with joblib < 0.9.0
98
+ allow_mmap = getattr(self, 'allow_mmap', True)
99
+ kwargs = {}
100
+ if allow_mmap:
101
+ kwargs['mmap_mode'] = unpickler.mmap_mode
102
+ if "allow_pickle" in inspect.signature(unpickler.np.load).parameters:
103
+ # Required in numpy 1.16.3 and later to aknowledge the security
104
+ # risk.
105
+ kwargs["allow_pickle"] = True
106
+ array = unpickler.np.load(filename, **kwargs)
107
+
108
+ # Detect byte order mis-match and swap as needed.
109
+ array = _ensure_native_byte_order(array)
110
+
111
+ # Reconstruct subclasses. This does not work with old
112
+ # versions of numpy
113
+ if (hasattr(array, '__array_prepare__') and
114
+ self.subclass not in (unpickler.np.ndarray,
115
+ unpickler.np.memmap)):
116
+ # We need to reconstruct another subclass
117
+ new_array = unpickler.np.core.multiarray._reconstruct(
118
+ self.subclass, (0,), 'b')
119
+ return new_array.__array_prepare__(array)
120
+ else:
121
+ return array
122
+
123
+
124
+ class ZNDArrayWrapper(NDArrayWrapper):
125
+ """An object to be persisted instead of numpy arrays.
126
+
127
+ This object store the Zfile filename in which
128
+ the data array has been persisted, and the meta information to
129
+ retrieve it.
130
+ The reason that we store the raw buffer data of the array and
131
+ the meta information, rather than array representation routine
132
+ (tobytes) is that it enables us to use completely the strided
133
+ model to avoid memory copies (a and a.T store as fast). In
134
+ addition saving the heavy information separately can avoid
135
+ creating large temporary buffers when unpickling data with
136
+ large arrays.
137
+ """
138
+
139
+ def __init__(self, filename, init_args, state):
140
+ """Constructor. Store the useful information for later."""
141
+ self.filename = filename
142
+ self.state = state
143
+ self.init_args = init_args
144
+
145
+ def read(self, unpickler):
146
+ """Reconstruct the array from the meta-information and the z-file."""
147
+ # Here we a simply reproducing the unpickling mechanism for numpy
148
+ # arrays
149
+ filename = os.path.join(unpickler._dirname, self.filename)
150
+ array = unpickler.np.core.multiarray._reconstruct(*self.init_args)
151
+ with open(filename, 'rb') as f:
152
+ data = read_zfile(f)
153
+ state = self.state + (data,)
154
+ array.__setstate__(state)
155
+ return array
156
+
157
+
158
+ class ZipNumpyUnpickler(Unpickler):
159
+ """A subclass of the Unpickler to unpickle our numpy pickles."""
160
+
161
+ dispatch = Unpickler.dispatch.copy()
162
+
163
+ def __init__(self, filename, file_handle, mmap_mode=None):
164
+ """Constructor."""
165
+ self._filename = os.path.basename(filename)
166
+ self._dirname = os.path.dirname(filename)
167
+ self.mmap_mode = mmap_mode
168
+ self.file_handle = self._open_pickle(file_handle)
169
+ Unpickler.__init__(self, self.file_handle)
170
+ try:
171
+ import numpy as np
172
+ except ImportError:
173
+ np = None
174
+ self.np = np
175
+
176
+ def _open_pickle(self, file_handle):
177
+ return BytesIO(read_zfile(file_handle))
178
+
179
+ def load_build(self):
180
+ """Set the state of a newly created object.
181
+
182
+ We capture it to replace our place-holder objects,
183
+ NDArrayWrapper, by the array we are interested in. We
184
+ replace them directly in the stack of pickler.
185
+ """
186
+ Unpickler.load_build(self)
187
+ if isinstance(self.stack[-1], NDArrayWrapper):
188
+ if self.np is None:
189
+ raise ImportError("Trying to unpickle an ndarray, "
190
+ "but numpy didn't import correctly")
191
+ nd_array_wrapper = self.stack.pop()
192
+ array = nd_array_wrapper.read(self)
193
+ self.stack.append(array)
194
+
195
+ dispatch[pickle.BUILD[0]] = load_build
196
+
197
+
198
+ def load_compatibility(filename):
199
+ """Reconstruct a Python object from a file persisted with joblib.dump.
200
+
201
+ This function ensures the compatibility with joblib old persistence format
202
+ (<= 0.9.3).
203
+
204
+ Parameters
205
+ -----------
206
+ filename: string
207
+ The name of the file from which to load the object
208
+
209
+ Returns
210
+ -------
211
+ result: any Python object
212
+ The object stored in the file.
213
+
214
+ See Also
215
+ --------
216
+ joblib.dump : function to save an object
217
+
218
+ Notes
219
+ -----
220
+
221
+ This function can load numpy array files saved separately during the
222
+ dump.
223
+ """
224
+ with open(filename, 'rb') as file_handle:
225
+ # We are careful to open the file handle early and keep it open to
226
+ # avoid race-conditions on renames. That said, if data is stored in
227
+ # companion files, moving the directory will create a race when
228
+ # joblib tries to access the companion files.
229
+ unpickler = ZipNumpyUnpickler(filename, file_handle=file_handle)
230
+ try:
231
+ obj = unpickler.load()
232
+ except UnicodeDecodeError as exc:
233
+ # More user-friendly error message
234
+ new_exc = ValueError(
235
+ 'You may be trying to read with '
236
+ 'python 3 a joblib pickle generated with python 2. '
237
+ 'This feature is not supported by joblib.')
238
+ new_exc.__cause__ = exc
239
+ raise new_exc
240
+ finally:
241
+ if hasattr(unpickler, 'file_handle'):
242
+ unpickler.file_handle.close()
243
+ return obj
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/numpy_pickle_utils.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utilities for fast persistence of big data, with optional compression."""
2
+
3
+ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
4
+ # Copyright (c) 2009 Gael Varoquaux
5
+ # License: BSD Style, 3 clauses.
6
+
7
+ import pickle
8
+ import io
9
+ import sys
10
+ import warnings
11
+ import contextlib
12
+
13
+ from .compressor import _ZFILE_PREFIX
14
+ from .compressor import _COMPRESSORS
15
+
16
+ try:
17
+ import numpy as np
18
+ except ImportError:
19
+ np = None
20
+
21
+ Unpickler = pickle._Unpickler
22
+ Pickler = pickle._Pickler
23
+ xrange = range
24
+
25
+
26
+ try:
27
+ # The python standard library can be built without bz2 so we make bz2
28
+ # usage optional.
29
+ # see https://github.com/scikit-learn/scikit-learn/issues/7526 for more
30
+ # details.
31
+ import bz2
32
+ except ImportError:
33
+ bz2 = None
34
+
35
+ # Buffer size used in io.BufferedReader and io.BufferedWriter
36
+ _IO_BUFFER_SIZE = 1024 ** 2
37
+
38
+
39
+ def _is_raw_file(fileobj):
40
+ """Check if fileobj is a raw file object, e.g created with open."""
41
+ fileobj = getattr(fileobj, 'raw', fileobj)
42
+ return isinstance(fileobj, io.FileIO)
43
+
44
+
45
+ def _get_prefixes_max_len():
46
+ # Compute the max prefix len of registered compressors.
47
+ prefixes = [len(compressor.prefix) for compressor in _COMPRESSORS.values()]
48
+ prefixes += [len(_ZFILE_PREFIX)]
49
+ return max(prefixes)
50
+
51
+
52
+ def _is_numpy_array_byte_order_mismatch(array):
53
+ """Check if numpy array is having byte order mis-match"""
54
+ return ((sys.byteorder == 'big' and
55
+ (array.dtype.byteorder == '<' or
56
+ (array.dtype.byteorder == '|' and array.dtype.fields and
57
+ all(e[0].byteorder == '<'
58
+ for e in array.dtype.fields.values())))) or
59
+ (sys.byteorder == 'little' and
60
+ (array.dtype.byteorder == '>' or
61
+ (array.dtype.byteorder == '|' and array.dtype.fields and
62
+ all(e[0].byteorder == '>'
63
+ for e in array.dtype.fields.values())))))
64
+
65
+
66
+ def _ensure_native_byte_order(array):
67
+ """Use the byte order of the host while preserving values
68
+
69
+ Does nothing if array already uses the system byte order.
70
+ """
71
+ if _is_numpy_array_byte_order_mismatch(array):
72
+ array = array.byteswap().newbyteorder('=')
73
+ return array
74
+
75
+
76
+ ###############################################################################
77
+ # Cache file utilities
78
+ def _detect_compressor(fileobj):
79
+ """Return the compressor matching fileobj.
80
+
81
+ Parameters
82
+ ----------
83
+ fileobj: file object
84
+
85
+ Returns
86
+ -------
87
+ str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat', 'not-compressed'}
88
+ """
89
+ # Read the magic number in the first bytes of the file.
90
+ max_prefix_len = _get_prefixes_max_len()
91
+ if hasattr(fileobj, 'peek'):
92
+ # Peek allows to read those bytes without moving the cursor in the
93
+ # file whic.
94
+ first_bytes = fileobj.peek(max_prefix_len)
95
+ else:
96
+ # Fallback to seek if the fileobject is not peekable.
97
+ first_bytes = fileobj.read(max_prefix_len)
98
+ fileobj.seek(0)
99
+
100
+ if first_bytes.startswith(_ZFILE_PREFIX):
101
+ return "compat"
102
+ else:
103
+ for name, compressor in _COMPRESSORS.items():
104
+ if first_bytes.startswith(compressor.prefix):
105
+ return name
106
+
107
+ return "not-compressed"
108
+
109
+
110
+ def _buffered_read_file(fobj):
111
+ """Return a buffered version of a read file object."""
112
+ return io.BufferedReader(fobj, buffer_size=_IO_BUFFER_SIZE)
113
+
114
+
115
+ def _buffered_write_file(fobj):
116
+ """Return a buffered version of a write file object."""
117
+ return io.BufferedWriter(fobj, buffer_size=_IO_BUFFER_SIZE)
118
+
119
+
120
+ @contextlib.contextmanager
121
+ def _read_fileobject(fileobj, filename, mmap_mode=None):
122
+ """Utility function opening the right fileobject from a filename.
123
+
124
+ The magic number is used to choose between the type of file object to open:
125
+ * regular file object (default)
126
+ * zlib file object
127
+ * gzip file object
128
+ * bz2 file object
129
+ * lzma file object (for xz and lzma compressor)
130
+
131
+ Parameters
132
+ ----------
133
+ fileobj: file object
134
+ compressor: str in {'zlib', 'gzip', 'bz2', 'lzma', 'xz', 'compat',
135
+ 'not-compressed'}
136
+ filename: str
137
+ filename path corresponding to the fileobj parameter.
138
+ mmap_mode: str
139
+ memory map mode that should be used to open the pickle file. This
140
+ parameter is useful to verify that the user is not trying to one with
141
+ compression. Default: None.
142
+
143
+ Returns
144
+ -------
145
+ a file like object
146
+
147
+ """
148
+ # Detect if the fileobj contains compressed data.
149
+ compressor = _detect_compressor(fileobj)
150
+
151
+ if compressor == 'compat':
152
+ # Compatibility with old pickle mode: simply return the input
153
+ # filename "as-is" and let the compatibility function be called by the
154
+ # caller.
155
+ warnings.warn("The file '%s' has been generated with a joblib "
156
+ "version less than 0.10. "
157
+ "Please regenerate this pickle file." % filename,
158
+ DeprecationWarning, stacklevel=2)
159
+ yield filename
160
+ else:
161
+ if compressor in _COMPRESSORS:
162
+ # based on the compressor detected in the file, we open the
163
+ # correct decompressor file object, wrapped in a buffer.
164
+ compressor_wrapper = _COMPRESSORS[compressor]
165
+ inst = compressor_wrapper.decompressor_file(fileobj)
166
+ fileobj = _buffered_read_file(inst)
167
+
168
+ # Checking if incompatible load parameters with the type of file:
169
+ # mmap_mode cannot be used with compressed file or in memory buffers
170
+ # such as io.BytesIO.
171
+ if mmap_mode is not None:
172
+ if isinstance(fileobj, io.BytesIO):
173
+ warnings.warn('In memory persistence is not compatible with '
174
+ 'mmap_mode "%(mmap_mode)s" flag passed. '
175
+ 'mmap_mode option will be ignored.'
176
+ % locals(), stacklevel=2)
177
+ elif compressor != 'not-compressed':
178
+ warnings.warn('mmap_mode "%(mmap_mode)s" is not compatible '
179
+ 'with compressed file %(filename)s. '
180
+ '"%(mmap_mode)s" flag will be ignored.'
181
+ % locals(), stacklevel=2)
182
+ elif not _is_raw_file(fileobj):
183
+ warnings.warn('"%(fileobj)r" is not a raw file, mmap_mode '
184
+ '"%(mmap_mode)s" flag will be ignored.'
185
+ % locals(), stacklevel=2)
186
+
187
+ yield fileobj
188
+
189
+
190
+ def _write_fileobject(filename, compress=("zlib", 3)):
191
+ """Return the right compressor file object in write mode."""
192
+ compressmethod = compress[0]
193
+ compresslevel = compress[1]
194
+
195
+ if compressmethod in _COMPRESSORS.keys():
196
+ file_instance = _COMPRESSORS[compressmethod].compressor_file(
197
+ filename, compresslevel=compresslevel)
198
+ return _buffered_write_file(file_instance)
199
+ else:
200
+ file_instance = _COMPRESSORS['zlib'].compressor_file(
201
+ filename, compresslevel=compresslevel)
202
+ return _buffered_write_file(file_instance)
203
+
204
+
205
+ # Utility functions/variables from numpy required for writing arrays.
206
+ # We need at least the functions introduced in version 1.9 of numpy. Here,
207
+ # we use the ones from numpy 1.10.2.
208
+ BUFFER_SIZE = 2 ** 18 # size of buffer for reading npz files in bytes
209
+
210
+
211
+ def _read_bytes(fp, size, error_template="ran out of data"):
212
+ """Read from file-like object until size bytes are read.
213
+
214
+ TODO python2_drop: is it still needed? The docstring mentions python 2.6
215
+ and it looks like this can be at least simplified ...
216
+
217
+ Raises ValueError if not EOF is encountered before size bytes are read.
218
+ Non-blocking objects only supported if they derive from io objects.
219
+
220
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
221
+ requested.
222
+
223
+ This function was taken from numpy/lib/format.py in version 1.10.2.
224
+
225
+ Parameters
226
+ ----------
227
+ fp: file-like object
228
+ size: int
229
+ error_template: str
230
+
231
+ Returns
232
+ -------
233
+ a bytes object
234
+ The data read in bytes.
235
+
236
+ """
237
+ data = bytes()
238
+ while True:
239
+ # io files (default in python3) return None or raise on
240
+ # would-block, python2 file will truncate, probably nothing can be
241
+ # done about that. note that regular files can't be non-blocking
242
+ try:
243
+ r = fp.read(size - len(data))
244
+ data += r
245
+ if len(r) == 0 or len(data) == size:
246
+ break
247
+ except io.BlockingIOError:
248
+ pass
249
+ if len(data) != size:
250
+ msg = "EOF: reading %s, expected %d bytes got %d"
251
+ raise ValueError(msg % (error_template, size, len(data)))
252
+ else:
253
+ return data
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/parallel.py ADDED
@@ -0,0 +1,1074 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for embarrassingly parallel code.
3
+ """
4
+ # Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
5
+ # Copyright: 2010, Gael Varoquaux
6
+ # License: BSD 3 clause
7
+
8
+ from __future__ import division
9
+
10
+ import os
11
+ import sys
12
+ from math import sqrt
13
+ import functools
14
+ import time
15
+ import threading
16
+ import itertools
17
+ from uuid import uuid4
18
+ from numbers import Integral
19
+ import warnings
20
+ import queue
21
+
22
+ from ._multiprocessing_helpers import mp
23
+
24
+ from .logger import Logger, short_format_time
25
+ from .disk import memstr_to_bytes
26
+ from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
27
+ ThreadingBackend, SequentialBackend,
28
+ LokyBackend)
29
+ from .externals.cloudpickle import dumps, loads
30
+ from .externals import loky
31
+
32
+ # Make sure that those two classes are part of the public joblib.parallel API
33
+ # so that 3rd party backend implementers can import them from here.
34
+ from ._parallel_backends import AutoBatchingMixin # noqa
35
+ from ._parallel_backends import ParallelBackendBase # noqa
36
+
37
+
38
+ BACKENDS = {
39
+ 'multiprocessing': MultiprocessingBackend,
40
+ 'threading': ThreadingBackend,
41
+ 'sequential': SequentialBackend,
42
+ 'loky': LokyBackend,
43
+ }
44
+ # name of the backend used by default by Parallel outside of any context
45
+ # managed by ``parallel_backend``.
46
+ DEFAULT_BACKEND = 'loky'
47
+ DEFAULT_N_JOBS = 1
48
+ DEFAULT_THREAD_BACKEND = 'threading'
49
+
50
+ # Thread local value that can be overridden by the ``parallel_backend`` context
51
+ # manager
52
+ _backend = threading.local()
53
+
54
+ VALID_BACKEND_HINTS = ('processes', 'threads', None)
55
+ VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)
56
+
57
+
58
+ def _register_dask():
59
+ """ Register Dask Backend if called with parallel_backend("dask") """
60
+ try:
61
+ from ._dask import DaskDistributedBackend
62
+ register_parallel_backend('dask', DaskDistributedBackend)
63
+ except ImportError as e:
64
+ msg = ("To use the dask.distributed backend you must install both "
65
+ "the `dask` and distributed modules.\n\n"
66
+ "See https://dask.pydata.org/en/latest/install.html for more "
67
+ "information.")
68
+ raise ImportError(msg) from e
69
+
70
+
71
+ EXTERNAL_BACKENDS = {
72
+ 'dask': _register_dask,
73
+ }
74
+
75
+
76
+ def get_active_backend(prefer=None, require=None, verbose=0):
77
+ """Return the active default backend"""
78
+ if prefer not in VALID_BACKEND_HINTS:
79
+ raise ValueError("prefer=%r is not a valid backend hint, "
80
+ "expected one of %r" % (prefer, VALID_BACKEND_HINTS))
81
+ if require not in VALID_BACKEND_CONSTRAINTS:
82
+ raise ValueError("require=%r is not a valid backend constraint, "
83
+ "expected one of %r"
84
+ % (require, VALID_BACKEND_CONSTRAINTS))
85
+
86
+ if prefer == 'processes' and require == 'sharedmem':
87
+ raise ValueError("prefer == 'processes' and require == 'sharedmem'"
88
+ " are inconsistent settings")
89
+ backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
90
+ if backend_and_jobs is not None:
91
+ # Try to use the backend set by the user with the context manager.
92
+ backend, n_jobs = backend_and_jobs
93
+ nesting_level = backend.nesting_level
94
+ supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
95
+ if require == 'sharedmem' and not supports_sharedmem:
96
+ # This backend does not match the shared memory constraint:
97
+ # fallback to the default thead-based backend.
98
+ sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
99
+ nesting_level=nesting_level)
100
+ if verbose >= 10:
101
+ print("Using %s as joblib.Parallel backend instead of %s "
102
+ "as the latter does not provide shared memory semantics."
103
+ % (sharedmem_backend.__class__.__name__,
104
+ backend.__class__.__name__))
105
+ return sharedmem_backend, DEFAULT_N_JOBS
106
+ else:
107
+ return backend_and_jobs
108
+
109
+ # We are outside of the scope of any parallel_backend context manager,
110
+ # create the default backend instance now.
111
+ backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
112
+ supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
113
+ uses_threads = getattr(backend, 'uses_threads', False)
114
+ if ((require == 'sharedmem' and not supports_sharedmem) or
115
+ (prefer == 'threads' and not uses_threads)):
116
+ # Make sure the selected default backend match the soft hints and
117
+ # hard constraints:
118
+ backend = BACKENDS[DEFAULT_THREAD_BACKEND](nesting_level=0)
119
+ return backend, DEFAULT_N_JOBS
120
+
121
+
122
+ class parallel_backend(object):
123
+ """Change the default backend used by Parallel inside a with block.
124
+
125
+ If ``backend`` is a string it must match a previously registered
126
+ implementation using the ``register_parallel_backend`` function.
127
+
128
+ By default the following backends are available:
129
+
130
+ - 'loky': single-host, process-based parallelism (used by default),
131
+ - 'threading': single-host, thread-based parallelism,
132
+ - 'multiprocessing': legacy single-host, process-based parallelism.
133
+
134
+ 'loky' is recommended to run functions that manipulate Python objects.
135
+ 'threading' is a low-overhead alternative that is most efficient for
136
+ functions that release the Global Interpreter Lock: e.g. I/O-bound code or
137
+ CPU-bound code in a few calls to native code that explicitly releases the
138
+ GIL.
139
+
140
+ In addition, if the `dask` and `distributed` Python packages are installed,
141
+ it is possible to use the 'dask' backend for better scheduling of nested
142
+ parallel calls without over-subscription and potentially distribute
143
+ parallel calls over a networked cluster of several hosts.
144
+
145
+ It is also possible to use the distributed 'ray' backend for distributing
146
+ the workload to a cluster of nodes. To use the 'ray' joblib backend add
147
+ the following lines::
148
+
149
+ >>> from ray.util.joblib import register_ray # doctest: +SKIP
150
+ >>> register_ray() # doctest: +SKIP
151
+ >>> with parallel_backend("ray"): # doctest: +SKIP
152
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
153
+ [-1, -2, -3, -4, -5]
154
+
155
+ Alternatively the backend can be passed directly as an instance.
156
+
157
+ By default all available workers will be used (``n_jobs=-1``) unless the
158
+ caller passes an explicit value for the ``n_jobs`` parameter.
159
+
160
+ This is an alternative to passing a ``backend='backend_name'`` argument to
161
+ the ``Parallel`` class constructor. It is particularly useful when calling
162
+ into library code that uses joblib internally but does not expose the
163
+ backend argument in its own API.
164
+
165
+ >>> from operator import neg
166
+ >>> with parallel_backend('threading'):
167
+ ... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
168
+ ...
169
+ [-1, -2, -3, -4, -5]
170
+
171
+ Warning: this function is experimental and subject to change in a future
172
+ version of joblib.
173
+
174
+ Joblib also tries to limit the oversubscription by limiting the number of
175
+ threads usable in some third-party library threadpools like OpenBLAS, MKL
176
+ or OpenMP. The default limit in each worker is set to
177
+ ``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
178
+ overwritten with the ``inner_max_num_threads`` argument which will be used
179
+ to set this limit in the child processes.
180
+
181
+ .. versionadded:: 0.10
182
+
183
+ """
184
+ def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None,
185
+ **backend_params):
186
+ if isinstance(backend, str):
187
+ if backend not in BACKENDS and backend in EXTERNAL_BACKENDS:
188
+ register = EXTERNAL_BACKENDS[backend]
189
+ register()
190
+
191
+ backend = BACKENDS[backend](**backend_params)
192
+
193
+ if inner_max_num_threads is not None:
194
+ msg = ("{} does not accept setting the inner_max_num_threads "
195
+ "argument.".format(backend.__class__.__name__))
196
+ assert backend.supports_inner_max_num_threads, msg
197
+ backend.inner_max_num_threads = inner_max_num_threads
198
+
199
+ # If the nesting_level of the backend is not set previously, use the
200
+ # nesting level from the previous active_backend to set it
201
+ current_backend_and_jobs = getattr(_backend, 'backend_and_jobs', None)
202
+ if backend.nesting_level is None:
203
+ if current_backend_and_jobs is None:
204
+ nesting_level = 0
205
+ else:
206
+ nesting_level = current_backend_and_jobs[0].nesting_level
207
+
208
+ backend.nesting_level = nesting_level
209
+
210
+ # Save the backends info and set the active backend
211
+ self.old_backend_and_jobs = current_backend_and_jobs
212
+ self.new_backend_and_jobs = (backend, n_jobs)
213
+
214
+ _backend.backend_and_jobs = (backend, n_jobs)
215
+
216
+ def __enter__(self):
217
+ return self.new_backend_and_jobs
218
+
219
+ def __exit__(self, type, value, traceback):
220
+ self.unregister()
221
+
222
+ def unregister(self):
223
+ if self.old_backend_and_jobs is None:
224
+ if getattr(_backend, 'backend_and_jobs', None) is not None:
225
+ del _backend.backend_and_jobs
226
+ else:
227
+ _backend.backend_and_jobs = self.old_backend_and_jobs
228
+
229
+
230
+ # Under Linux or OS X the default start method of multiprocessing
231
+ # can cause third party libraries to crash. Under Python 3.4+ it is possible
232
+ # to set an environment variable to switch the default start method from
233
+ # 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
234
+ # of causing semantic changes and some additional pool instantiation overhead.
235
+ DEFAULT_MP_CONTEXT = None
236
+ if hasattr(mp, 'get_context'):
237
+ method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
238
+ if method is not None:
239
+ DEFAULT_MP_CONTEXT = mp.get_context(method=method)
240
+
241
+
242
+ class BatchedCalls(object):
243
+ """Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
244
+
245
+ def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None,
246
+ pickle_cache=None):
247
+ self.items = list(iterator_slice)
248
+ self._size = len(self.items)
249
+ self._reducer_callback = reducer_callback
250
+ if isinstance(backend_and_jobs, tuple):
251
+ self._backend, self._n_jobs = backend_and_jobs
252
+ else:
253
+ # this is for backward compatibility purposes. Before 0.12.6,
254
+ # nested backends were returned without n_jobs indications.
255
+ self._backend, self._n_jobs = backend_and_jobs, None
256
+ self._pickle_cache = pickle_cache if pickle_cache is not None else {}
257
+
258
+ def __call__(self):
259
+ # Set the default nested backend to self._backend but do not set the
260
+ # change the default number of processes to -1
261
+ with parallel_backend(self._backend, n_jobs=self._n_jobs):
262
+ return [func(*args, **kwargs)
263
+ for func, args, kwargs in self.items]
264
+
265
+ def __reduce__(self):
266
+ if self._reducer_callback is not None:
267
+ self._reducer_callback()
268
+ # no need pickle the callback.
269
+ return (
270
+ BatchedCalls,
271
+ (self.items, (self._backend, self._n_jobs), None,
272
+ self._pickle_cache)
273
+ )
274
+
275
+ def __len__(self):
276
+ return self._size
277
+
278
+
279
+ ###############################################################################
280
+ # CPU count that works also when multiprocessing has been disabled via
281
+ # the JOBLIB_MULTIPROCESSING environment variable
282
+ def cpu_count(only_physical_cores=False):
283
+ """Return the number of CPUs.
284
+
285
+ This delegates to loky.cpu_count that takes into account additional
286
+ constraints such as Linux CFS scheduler quotas (typically set by container
287
+ runtimes such as docker) and CPU affinity (for instance using the taskset
288
+ command on Linux).
289
+
290
+ If only_physical_cores is True, do not take hyperthreading / SMT logical
291
+ cores into account.
292
+ """
293
+ if mp is None:
294
+ return 1
295
+
296
+ return loky.cpu_count(only_physical_cores=only_physical_cores)
297
+
298
+
299
+ ###############################################################################
300
+ # For verbosity
301
+
302
+ def _verbosity_filter(index, verbose):
303
+ """ Returns False for indices increasingly apart, the distance
304
+ depending on the value of verbose.
305
+
306
+ We use a lag increasing as the square of index
307
+ """
308
+ if not verbose:
309
+ return True
310
+ elif verbose > 10:
311
+ return False
312
+ if index == 0:
313
+ return False
314
+ verbose = .5 * (11 - verbose) ** 2
315
+ scale = sqrt(index / verbose)
316
+ next_scale = sqrt((index + 1) / verbose)
317
+ return (int(next_scale) == int(scale))
318
+
319
+
320
+ ###############################################################################
321
+ def delayed(function):
322
+ """Decorator used to capture the arguments of a function."""
323
+
324
+ def delayed_function(*args, **kwargs):
325
+ return function, args, kwargs
326
+ try:
327
+ delayed_function = functools.wraps(function)(delayed_function)
328
+ except AttributeError:
329
+ " functools.wraps fails on some callable objects "
330
+ return delayed_function
331
+
332
+
333
+ ###############################################################################
334
+ class BatchCompletionCallBack(object):
335
+ """Callback used by joblib.Parallel's multiprocessing backend.
336
+
337
+ This callable is executed by the parent process whenever a worker process
338
+ has returned the results of a batch of tasks.
339
+
340
+ It is used for progress reporting, to update estimate of the batch
341
+ processing duration and to schedule the next batch of tasks to be
342
+ processed.
343
+
344
+ """
345
+ def __init__(self, dispatch_timestamp, batch_size, parallel):
346
+ self.dispatch_timestamp = dispatch_timestamp
347
+ self.batch_size = batch_size
348
+ self.parallel = parallel
349
+
350
+ def __call__(self, out):
351
+ self.parallel.n_completed_tasks += self.batch_size
352
+ this_batch_duration = time.time() - self.dispatch_timestamp
353
+
354
+ self.parallel._backend.batch_completed(self.batch_size,
355
+ this_batch_duration)
356
+ self.parallel.print_progress()
357
+ with self.parallel._lock:
358
+ if self.parallel._original_iterator is not None:
359
+ self.parallel.dispatch_next()
360
+
361
+
362
+ ###############################################################################
363
+ def register_parallel_backend(name, factory, make_default=False):
364
+ """Register a new Parallel backend factory.
365
+
366
+ The new backend can then be selected by passing its name as the backend
367
+ argument to the Parallel class. Moreover, the default backend can be
368
+ overwritten globally by setting make_default=True.
369
+
370
+ The factory can be any callable that takes no argument and return an
371
+ instance of ``ParallelBackendBase``.
372
+
373
+ Warning: this function is experimental and subject to change in a future
374
+ version of joblib.
375
+
376
+ .. versionadded:: 0.10
377
+
378
+ """
379
+ BACKENDS[name] = factory
380
+ if make_default:
381
+ global DEFAULT_BACKEND
382
+ DEFAULT_BACKEND = name
383
+
384
+
385
+ def effective_n_jobs(n_jobs=-1):
386
+ """Determine the number of jobs that can actually run in parallel
387
+
388
+ n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
389
+ means requesting all available workers for instance matching the number of
390
+ CPU cores on the worker host(s).
391
+
392
+ This method should return a guesstimate of the number of workers that can
393
+ actually perform work concurrently with the currently enabled default
394
+ backend. The primary use case is to make it possible for the caller to know
395
+ in how many chunks to slice the work.
396
+
397
+ In general working on larger data chunks is more efficient (less scheduling
398
+ overhead and better use of CPU cache prefetching heuristics) as long as all
399
+ the workers have enough work to do.
400
+
401
+ Warning: this function is experimental and subject to change in a future
402
+ version of joblib.
403
+
404
+ .. versionadded:: 0.10
405
+
406
+ """
407
+ backend, backend_n_jobs = get_active_backend()
408
+ if n_jobs is None:
409
+ n_jobs = backend_n_jobs
410
+ return backend.effective_n_jobs(n_jobs=n_jobs)
411
+
412
+
413
+ ###############################################################################
414
+ class Parallel(Logger):
415
+ ''' Helper class for readable parallel mapping.
416
+
417
+ Read more in the :ref:`User Guide <parallel>`.
418
+
419
+ Parameters
420
+ -----------
421
+ n_jobs: int, default: None
422
+ The maximum number of concurrently running jobs, such as the number
423
+ of Python worker processes when backend="multiprocessing"
424
+ or the size of the thread-pool when backend="threading".
425
+ If -1 all CPUs are used. If 1 is given, no parallel computing code
426
+ is used at all, which is useful for debugging. For n_jobs below -1,
427
+ (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
428
+ CPUs but one are used.
429
+ None is a marker for 'unset' that will be interpreted as n_jobs=1
430
+ (sequential execution) unless the call is performed under a
431
+ parallel_backend context manager that sets another value for
432
+ n_jobs.
433
+ backend: str, ParallelBackendBase instance or None, default: 'loky'
434
+ Specify the parallelization backend implementation.
435
+ Supported backends are:
436
+
437
+ - "loky" used by default, can induce some
438
+ communication and memory overhead when exchanging input and
439
+ output data with the worker Python processes.
440
+ - "multiprocessing" previous process-based backend based on
441
+ `multiprocessing.Pool`. Less robust than `loky`.
442
+ - "threading" is a very low-overhead backend but it suffers
443
+ from the Python Global Interpreter Lock if the called function
444
+ relies a lot on Python objects. "threading" is mostly useful
445
+ when the execution bottleneck is a compiled extension that
446
+ explicitly releases the GIL (for instance a Cython loop wrapped
447
+ in a "with nogil" block or an expensive call to a library such
448
+ as NumPy).
449
+ - finally, you can register backends by calling
450
+ register_parallel_backend. This will allow you to implement
451
+ a backend of your liking.
452
+
453
+ It is not recommended to hard-code the backend name in a call to
454
+ Parallel in a library. Instead it is recommended to set soft hints
455
+ (prefer) or hard constraints (require) so as to make it possible
456
+ for library users to change the backend from the outside using the
457
+ parallel_backend context manager.
458
+ prefer: str in {'processes', 'threads'} or None, default: None
459
+ Soft hint to choose the default backend if no specific backend
460
+ was selected with the parallel_backend context manager. The
461
+ default process-based backend is 'loky' and the default
462
+ thread-based backend is 'threading'. Ignored if the ``backend``
463
+ parameter is specified.
464
+ require: 'sharedmem' or None, default None
465
+ Hard constraint to select the backend. If set to 'sharedmem',
466
+ the selected backend will be single-host and thread-based even
467
+ if the user asked for a non-thread based backend with
468
+ parallel_backend.
469
+ verbose: int, optional
470
+ The verbosity level: if non zero, progress messages are
471
+ printed. Above 50, the output is sent to stdout.
472
+ The frequency of the messages increases with the verbosity level.
473
+ If it more than 10, all iterations are reported.
474
+ timeout: float, optional
475
+ Timeout limit for each task to complete. If any task takes longer
476
+ a TimeOutError will be raised. Only applied when n_jobs != 1
477
+ pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
478
+ The number of batches (of tasks) to be pre-dispatched.
479
+ Default is '2*n_jobs'. When batch_size="auto" this is reasonable
480
+ default and the workers should never starve.
481
+ batch_size: int or 'auto', default: 'auto'
482
+ The number of atomic tasks to dispatch at once to each
483
+ worker. When individual evaluations are very fast, dispatching
484
+ calls to workers can be slower than sequential computation because
485
+ of the overhead. Batching fast computations together can mitigate
486
+ this.
487
+ The ``'auto'`` strategy keeps track of the time it takes for a batch
488
+ to complete, and dynamically adjusts the batch size to keep the time
489
+ on the order of half a second, using a heuristic. The initial batch
490
+ size is 1.
491
+ ``batch_size="auto"`` with ``backend="threading"`` will dispatch
492
+ batches of a single task at a time as the threading backend has
493
+ very little overhead and using larger batch size has not proved to
494
+ bring any gain in that case.
495
+ temp_folder: str, optional
496
+ Folder to be used by the pool for memmapping large arrays
497
+ for sharing memory with worker processes. If None, this will try in
498
+ order:
499
+
500
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment
501
+ variable,
502
+ - /dev/shm if the folder exists and is writable: this is a
503
+ RAM disk filesystem available by default on modern Linux
504
+ distributions,
505
+ - the default system temporary folder that can be
506
+ overridden with TMP, TMPDIR or TEMP environment
507
+ variables, typically /tmp under Unix operating systems.
508
+
509
+ Only active when backend="loky" or "multiprocessing".
510
+ max_nbytes int, str, or None, optional, 1M by default
511
+ Threshold on the size of arrays passed to the workers that
512
+ triggers automated memory mapping in temp_folder. Can be an int
513
+ in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
514
+ Use None to disable memmapping of large arrays.
515
+ Only active when backend="loky" or "multiprocessing".
516
+ mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default: 'r'
517
+ Memmapping mode for numpy arrays passed to workers. None will
518
+ disable memmapping, other modes defined in the numpy.memmap doc:
519
+ https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
520
+ Also, see 'max_nbytes' parameter documentation for more details.
521
+
522
+ Notes
523
+ -----
524
+
525
+ This object uses workers to compute in parallel the application of a
526
+ function to many different arguments. The main functionality it brings
527
+ in addition to using the raw multiprocessing or concurrent.futures API
528
+ are (see examples for details):
529
+
530
+ * More readable code, in particular since it avoids
531
+ constructing list of arguments.
532
+
533
+ * Easier debugging:
534
+ - informative tracebacks even when the error happens on
535
+ the client side
536
+ - using 'n_jobs=1' enables to turn off parallel computing
537
+ for debugging without changing the codepath
538
+ - early capture of pickling errors
539
+
540
+ * An optional progress meter.
541
+
542
+ * Interruption of multiprocesses jobs with 'Ctrl-C'
543
+
544
+ * Flexible pickling control for the communication to and from
545
+ the worker processes.
546
+
547
+ * Ability to use shared memory efficiently with worker
548
+ processes for large numpy-based datastructures.
549
+
550
+ Examples
551
+ --------
552
+
553
+ A simple example:
554
+
555
+ >>> from math import sqrt
556
+ >>> from joblib import Parallel, delayed
557
+ >>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
558
+ [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
559
+
560
+ Reshaping the output when the function has several return
561
+ values:
562
+
563
+ >>> from math import modf
564
+ >>> from joblib import Parallel, delayed
565
+ >>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
566
+ >>> res, i = zip(*r)
567
+ >>> res
568
+ (0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
569
+ >>> i
570
+ (0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
571
+
572
+ The progress meter: the higher the value of `verbose`, the more
573
+ messages:
574
+
575
+ >>> from time import sleep
576
+ >>> from joblib import Parallel, delayed
577
+ >>> r = Parallel(n_jobs=2, verbose=10)(delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
578
+ [Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
579
+ [Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
580
+ [Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
581
+
582
+ Traceback example, note how the line of the error is indicated
583
+ as well as the values of the parameter passed to the function that
584
+ triggered the exception, even though the traceback happens in the
585
+ child process:
586
+
587
+ >>> from heapq import nlargest
588
+ >>> from joblib import Parallel, delayed
589
+ >>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
590
+ #...
591
+ ---------------------------------------------------------------------------
592
+ Sub-process traceback:
593
+ ---------------------------------------------------------------------------
594
+ TypeError Mon Nov 12 11:37:46 2012
595
+ PID: 12934 Python 2.7.3: /usr/bin/python
596
+ ...........................................................................
597
+ /usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
598
+ 419 if n >= size:
599
+ 420 return sorted(iterable, key=key, reverse=True)[:n]
600
+ 421
601
+ 422 # When key is none, use simpler decoration
602
+ 423 if key is None:
603
+ --> 424 it = izip(iterable, count(0,-1)) # decorate
604
+ 425 result = _nlargest(n, it)
605
+ 426 return map(itemgetter(0), result) # undecorate
606
+ 427
607
+ 428 # General case, slowest method
608
+ TypeError: izip argument #1 must support iteration
609
+ ___________________________________________________________________________
610
+
611
+
612
+ Using pre_dispatch in a producer/consumer situation, where the
613
+ data is generated on the fly. Note how the producer is first
614
+ called 3 times before the parallel loop is initiated, and then
615
+ called to generate new data on the fly:
616
+
617
+ >>> from math import sqrt
618
+ >>> from joblib import Parallel, delayed
619
+ >>> def producer():
620
+ ... for i in range(6):
621
+ ... print('Produced %s' % i)
622
+ ... yield i
623
+ >>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
624
+ ... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
625
+ Produced 0
626
+ Produced 1
627
+ Produced 2
628
+ [Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
629
+ Produced 3
630
+ [Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
631
+ Produced 4
632
+ [Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
633
+ Produced 5
634
+ [Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
635
+ [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
636
+ [Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
637
+
638
+ '''
639
+ def __init__(self, n_jobs=None, backend=None, verbose=0, timeout=None,
640
+ pre_dispatch='2 * n_jobs', batch_size='auto',
641
+ temp_folder=None, max_nbytes='1M', mmap_mode='r',
642
+ prefer=None, require=None):
643
+ active_backend, context_n_jobs = get_active_backend(
644
+ prefer=prefer, require=require, verbose=verbose)
645
+ nesting_level = active_backend.nesting_level
646
+ if backend is None and n_jobs is None:
647
+ # If we are under a parallel_backend context manager, look up
648
+ # the default number of jobs and use that instead:
649
+ n_jobs = context_n_jobs
650
+ if n_jobs is None:
651
+ # No specific context override and no specific value request:
652
+ # default to 1.
653
+ n_jobs = 1
654
+ self.n_jobs = n_jobs
655
+ self.verbose = verbose
656
+ self.timeout = timeout
657
+ self.pre_dispatch = pre_dispatch
658
+ self._ready_batches = queue.Queue()
659
+ self._id = uuid4().hex
660
+ self._reducer_callback = None
661
+
662
+ if isinstance(max_nbytes, str):
663
+ max_nbytes = memstr_to_bytes(max_nbytes)
664
+
665
+ self._backend_args = dict(
666
+ max_nbytes=max_nbytes,
667
+ mmap_mode=mmap_mode,
668
+ temp_folder=temp_folder,
669
+ prefer=prefer,
670
+ require=require,
671
+ verbose=max(0, self.verbose - 50),
672
+ )
673
+ if DEFAULT_MP_CONTEXT is not None:
674
+ self._backend_args['context'] = DEFAULT_MP_CONTEXT
675
+ elif hasattr(mp, "get_context"):
676
+ self._backend_args['context'] = mp.get_context()
677
+
678
+ if backend is None:
679
+ backend = active_backend
680
+
681
+ elif isinstance(backend, ParallelBackendBase):
682
+ # Use provided backend as is, with the current nesting_level if it
683
+ # is not set yet.
684
+ if backend.nesting_level is None:
685
+ backend.nesting_level = nesting_level
686
+
687
+ elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
688
+ # Make it possible to pass a custom multiprocessing context as
689
+ # backend to change the start method to forkserver or spawn or
690
+ # preload modules on the forkserver helper process.
691
+ self._backend_args['context'] = backend
692
+ backend = MultiprocessingBackend(nesting_level=nesting_level)
693
+ else:
694
+ try:
695
+ backend_factory = BACKENDS[backend]
696
+ except KeyError as e:
697
+ raise ValueError("Invalid backend: %s, expected one of %r"
698
+ % (backend, sorted(BACKENDS.keys()))) from e
699
+ backend = backend_factory(nesting_level=nesting_level)
700
+
701
+ if (require == 'sharedmem' and
702
+ not getattr(backend, 'supports_sharedmem', False)):
703
+ raise ValueError("Backend %s does not support shared memory"
704
+ % backend)
705
+
706
+ if (batch_size == 'auto' or isinstance(batch_size, Integral) and
707
+ batch_size > 0):
708
+ self.batch_size = batch_size
709
+ else:
710
+ raise ValueError(
711
+ "batch_size must be 'auto' or a positive integer, got: %r"
712
+ % batch_size)
713
+
714
+ self._backend = backend
715
+ self._output = None
716
+ self._jobs = list()
717
+ self._managed_backend = False
718
+
719
+ # This lock is used coordinate the main thread of this process with
720
+ # the async callback thread of our the pool.
721
+ self._lock = threading.RLock()
722
+
723
+ def __enter__(self):
724
+ self._managed_backend = True
725
+ self._initialize_backend()
726
+ return self
727
+
728
+ def __exit__(self, exc_type, exc_value, traceback):
729
+ self._terminate_backend()
730
+ self._managed_backend = False
731
+
732
+ def _initialize_backend(self):
733
+ """Build a process or thread pool and return the number of workers"""
734
+ try:
735
+ n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
736
+ **self._backend_args)
737
+ if self.timeout is not None and not self._backend.supports_timeout:
738
+ warnings.warn(
739
+ 'The backend class {!r} does not support timeout. '
740
+ "You have set 'timeout={}' in Parallel but "
741
+ "the 'timeout' parameter will not be used.".format(
742
+ self._backend.__class__.__name__,
743
+ self.timeout))
744
+
745
+ except FallbackToBackend as e:
746
+ # Recursively initialize the backend in case of requested fallback.
747
+ self._backend = e.backend
748
+ n_jobs = self._initialize_backend()
749
+
750
+ return n_jobs
751
+
752
+ def _effective_n_jobs(self):
753
+ if self._backend:
754
+ return self._backend.effective_n_jobs(self.n_jobs)
755
+ return 1
756
+
757
+ def _terminate_backend(self):
758
+ if self._backend is not None:
759
+ self._backend.terminate()
760
+
761
+ def _dispatch(self, batch):
762
+ """Queue the batch for computing, with or without multiprocessing
763
+
764
+ WARNING: this method is not thread-safe: it should be only called
765
+ indirectly via dispatch_one_batch.
766
+
767
+ """
768
+ # If job.get() catches an exception, it closes the queue:
769
+ if self._aborting:
770
+ return
771
+
772
+ self.n_dispatched_tasks += len(batch)
773
+ self.n_dispatched_batches += 1
774
+
775
+ dispatch_timestamp = time.time()
776
+ cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
777
+ with self._lock:
778
+ job_idx = len(self._jobs)
779
+ job = self._backend.apply_async(batch, callback=cb)
780
+ # A job can complete so quickly than its callback is
781
+ # called before we get here, causing self._jobs to
782
+ # grow. To ensure correct results ordering, .insert is
783
+ # used (rather than .append) in the following line
784
+ self._jobs.insert(job_idx, job)
785
+
786
+ def dispatch_next(self):
787
+ """Dispatch more data for parallel processing
788
+
789
+ This method is meant to be called concurrently by the multiprocessing
790
+ callback. We rely on the thread-safety of dispatch_one_batch to protect
791
+ against concurrent consumption of the unprotected iterator.
792
+
793
+ """
794
+ if not self.dispatch_one_batch(self._original_iterator):
795
+ self._iterating = False
796
+ self._original_iterator = None
797
+
798
+ def dispatch_one_batch(self, iterator):
799
+ """Prefetch the tasks for the next batch and dispatch them.
800
+
801
+ The effective size of the batch is computed here.
802
+ If there are no more jobs to dispatch, return False, else return True.
803
+
804
+ The iterator consumption and dispatching is protected by the same
805
+ lock so calling this function should be thread safe.
806
+
807
+ """
808
+ if self.batch_size == 'auto':
809
+ batch_size = self._backend.compute_batch_size()
810
+ else:
811
+ # Fixed batch size strategy
812
+ batch_size = self.batch_size
813
+
814
+ with self._lock:
815
+ # to ensure an even distribution of the workolad between workers,
816
+ # we look ahead in the original iterators more than batch_size
817
+ # tasks - However, we keep consuming only one batch at each
818
+ # dispatch_one_batch call. The extra tasks are stored in a local
819
+ # queue, _ready_batches, that is looked-up prior to re-consuming
820
+ # tasks from the origal iterator.
821
+ try:
822
+ tasks = self._ready_batches.get(block=False)
823
+ except queue.Empty:
824
+ # slice the iterator n_jobs * batchsize items at a time. If the
825
+ # slice returns less than that, then the current batchsize puts
826
+ # too much weight on a subset of workers, while other may end
827
+ # up starving. So in this case, re-scale the batch size
828
+ # accordingly to distribute evenly the last items between all
829
+ # workers.
830
+ n_jobs = self._cached_effective_n_jobs
831
+ big_batch_size = batch_size * n_jobs
832
+
833
+ islice = list(itertools.islice(iterator, big_batch_size))
834
+ if len(islice) == 0:
835
+ return False
836
+ elif (iterator is self._original_iterator
837
+ and len(islice) < big_batch_size):
838
+ # We reached the end of the original iterator (unless
839
+ # iterator is the ``pre_dispatch``-long initial slice of
840
+ # the original iterator) -- decrease the batch size to
841
+ # account for potential variance in the batches running
842
+ # time.
843
+ final_batch_size = max(1, len(islice) // (10 * n_jobs))
844
+ else:
845
+ final_batch_size = max(1, len(islice) // n_jobs)
846
+
847
+ # enqueue n_jobs batches in a local queue
848
+ for i in range(0, len(islice), final_batch_size):
849
+ tasks = BatchedCalls(islice[i:i + final_batch_size],
850
+ self._backend.get_nested_backend(),
851
+ self._reducer_callback,
852
+ self._pickle_cache)
853
+ self._ready_batches.put(tasks)
854
+
855
+ # finally, get one task.
856
+ tasks = self._ready_batches.get(block=False)
857
+ if len(tasks) == 0:
858
+ # No more tasks available in the iterator: tell caller to stop.
859
+ return False
860
+ else:
861
+ self._dispatch(tasks)
862
+ return True
863
+
864
+ def _print(self, msg, msg_args):
865
+ """Display the message on stout or stderr depending on verbosity"""
866
+ # XXX: Not using the logger framework: need to
867
+ # learn to use logger better.
868
+ if not self.verbose:
869
+ return
870
+ if self.verbose < 50:
871
+ writer = sys.stderr.write
872
+ else:
873
+ writer = sys.stdout.write
874
+ msg = msg % msg_args
875
+ writer('[%s]: %s\n' % (self, msg))
876
+
877
+ def print_progress(self):
878
+ """Display the process of the parallel execution only a fraction
879
+ of time, controlled by self.verbose.
880
+ """
881
+ if not self.verbose:
882
+ return
883
+ elapsed_time = time.time() - self._start_time
884
+
885
+ # Original job iterator becomes None once it has been fully
886
+ # consumed : at this point we know the total number of jobs and we are
887
+ # able to display an estimation of the remaining time based on already
888
+ # completed jobs. Otherwise, we simply display the number of completed
889
+ # tasks.
890
+ if self._original_iterator is not None:
891
+ if _verbosity_filter(self.n_dispatched_batches, self.verbose):
892
+ return
893
+ self._print('Done %3i tasks | elapsed: %s',
894
+ (self.n_completed_tasks,
895
+ short_format_time(elapsed_time), ))
896
+ else:
897
+ index = self.n_completed_tasks
898
+ # We are finished dispatching
899
+ total_tasks = self.n_dispatched_tasks
900
+ # We always display the first loop
901
+ if not index == 0:
902
+ # Display depending on the number of remaining items
903
+ # A message as soon as we finish dispatching, cursor is 0
904
+ cursor = (total_tasks - index + 1 -
905
+ self._pre_dispatch_amount)
906
+ frequency = (total_tasks // self.verbose) + 1
907
+ is_last_item = (index + 1 == total_tasks)
908
+ if (is_last_item or cursor % frequency):
909
+ return
910
+ remaining_time = (elapsed_time / index) * \
911
+ (self.n_dispatched_tasks - index * 1.0)
912
+ # only display status if remaining time is greater or equal to 0
913
+ self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
914
+ (index,
915
+ total_tasks,
916
+ short_format_time(elapsed_time),
917
+ short_format_time(remaining_time),
918
+ ))
919
+
920
+ def retrieve(self):
921
+ self._output = list()
922
+ while self._iterating or len(self._jobs) > 0:
923
+ if len(self._jobs) == 0:
924
+ # Wait for an async callback to dispatch new jobs
925
+ time.sleep(0.01)
926
+ continue
927
+ # We need to be careful: the job list can be filling up as
928
+ # we empty it and Python list are not thread-safe by default hence
929
+ # the use of the lock
930
+ with self._lock:
931
+ job = self._jobs.pop(0)
932
+
933
+ try:
934
+ if getattr(self._backend, 'supports_timeout', False):
935
+ self._output.extend(job.get(timeout=self.timeout))
936
+ else:
937
+ self._output.extend(job.get())
938
+
939
+ except BaseException as exception:
940
+ # Note: we catch any BaseException instead of just Exception
941
+ # instances to also include KeyboardInterrupt.
942
+
943
+ # Stop dispatching any new job in the async callback thread
944
+ self._aborting = True
945
+
946
+ # If the backend allows it, cancel or kill remaining running
947
+ # tasks without waiting for the results as we will raise
948
+ # the exception we got back to the caller instead of returning
949
+ # any result.
950
+ backend = self._backend
951
+ if (backend is not None and
952
+ hasattr(backend, 'abort_everything')):
953
+ # If the backend is managed externally we need to make sure
954
+ # to leave it in a working state to allow for future jobs
955
+ # scheduling.
956
+ ensure_ready = self._managed_backend
957
+ backend.abort_everything(ensure_ready=ensure_ready)
958
+ raise
959
+
960
+ def __call__(self, iterable):
961
+ if self._jobs:
962
+ raise ValueError('This Parallel instance is already running')
963
+ # A flag used to abort the dispatching of jobs in case an
964
+ # exception is found
965
+ self._aborting = False
966
+
967
+ if not self._managed_backend:
968
+ n_jobs = self._initialize_backend()
969
+ else:
970
+ n_jobs = self._effective_n_jobs()
971
+
972
+ if isinstance(self._backend, LokyBackend):
973
+ # For the loky backend, we add a callback executed when reducing
974
+ # BatchCalls, that makes the loky executor use a temporary folder
975
+ # specific to this Parallel object when pickling temporary memmaps.
976
+ # This callback is necessary to ensure that several Parallel
977
+ # objects using the same resuable executor don't use the same
978
+ # temporary resources.
979
+
980
+ def _batched_calls_reducer_callback():
981
+ # Relevant implementation detail: the following lines, called
982
+ # when reducing BatchedCalls, are called in a thread-safe
983
+ # situation, meaning that the context of the temporary folder
984
+ # manager will not be changed in between the callback execution
985
+ # and the end of the BatchedCalls pickling. The reason is that
986
+ # pickling (the only place where set_current_context is used)
987
+ # is done from a single thread (the queue_feeder_thread).
988
+ self._backend._workers._temp_folder_manager.set_current_context( # noqa
989
+ self._id
990
+ )
991
+ self._reducer_callback = _batched_calls_reducer_callback
992
+
993
+ # self._effective_n_jobs should be called in the Parallel.__call__
994
+ # thread only -- store its value in an attribute for further queries.
995
+ self._cached_effective_n_jobs = n_jobs
996
+
997
+ backend_name = self._backend.__class__.__name__
998
+ if n_jobs == 0:
999
+ raise RuntimeError("%s has no active worker." % backend_name)
1000
+
1001
+ self._print("Using backend %s with %d concurrent workers.",
1002
+ (backend_name, n_jobs))
1003
+ if hasattr(self._backend, 'start_call'):
1004
+ self._backend.start_call()
1005
+ iterator = iter(iterable)
1006
+ pre_dispatch = self.pre_dispatch
1007
+
1008
+ if pre_dispatch == 'all' or n_jobs == 1:
1009
+ # prevent further dispatch via multiprocessing callback thread
1010
+ self._original_iterator = None
1011
+ self._pre_dispatch_amount = 0
1012
+ else:
1013
+ self._original_iterator = iterator
1014
+ if hasattr(pre_dispatch, 'endswith'):
1015
+ pre_dispatch = eval(pre_dispatch)
1016
+ self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
1017
+
1018
+ # The main thread will consume the first pre_dispatch items and
1019
+ # the remaining items will later be lazily dispatched by async
1020
+ # callbacks upon task completions.
1021
+
1022
+ # TODO: this iterator should be batch_size * n_jobs
1023
+ iterator = itertools.islice(iterator, self._pre_dispatch_amount)
1024
+
1025
+ self._start_time = time.time()
1026
+ self.n_dispatched_batches = 0
1027
+ self.n_dispatched_tasks = 0
1028
+ self.n_completed_tasks = 0
1029
+ # Use a caching dict for callables that are pickled with cloudpickle to
1030
+ # improve performances. This cache is used only in the case of
1031
+ # functions that are defined in the __main__ module, functions that are
1032
+ # defined locally (inside another function) and lambda expressions.
1033
+ self._pickle_cache = dict()
1034
+ try:
1035
+ # Only set self._iterating to True if at least a batch
1036
+ # was dispatched. In particular this covers the edge
1037
+ # case of Parallel used with an exhausted iterator. If
1038
+ # self._original_iterator is None, then this means either
1039
+ # that pre_dispatch == "all", n_jobs == 1 or that the first batch
1040
+ # was very quick and its callback already dispatched all the
1041
+ # remaining jobs.
1042
+ self._iterating = False
1043
+ if self.dispatch_one_batch(iterator):
1044
+ self._iterating = self._original_iterator is not None
1045
+
1046
+ while self.dispatch_one_batch(iterator):
1047
+ pass
1048
+
1049
+ if pre_dispatch == "all" or n_jobs == 1:
1050
+ # The iterable was consumed all at once by the above for loop.
1051
+ # No need to wait for async callbacks to trigger to
1052
+ # consumption.
1053
+ self._iterating = False
1054
+
1055
+ with self._backend.retrieval_context():
1056
+ self.retrieve()
1057
+ # Make sure that we get a last message telling us we are done
1058
+ elapsed_time = time.time() - self._start_time
1059
+ self._print('Done %3i out of %3i | elapsed: %s finished',
1060
+ (len(self._output), len(self._output),
1061
+ short_format_time(elapsed_time)))
1062
+ finally:
1063
+ if hasattr(self._backend, 'stop_call'):
1064
+ self._backend.stop_call()
1065
+ if not self._managed_backend:
1066
+ self._terminate_backend()
1067
+ self._jobs = list()
1068
+ self._pickle_cache = None
1069
+ output = self._output
1070
+ self._output = None
1071
+ return output
1072
+
1073
+ def __repr__(self):
1074
+ return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/pool.py ADDED
@@ -0,0 +1,352 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Custom implementation of multiprocessing.Pool with custom pickler.
2
+
3
+ This module provides efficient ways of working with data stored in
4
+ shared memory with numpy.memmap arrays without inducing any memory
5
+ copy between the parent and child processes.
6
+
7
+ This module should not be imported if multiprocessing is not
8
+ available as it implements subclasses of multiprocessing Pool
9
+ that uses a custom alternative to SimpleQueue.
10
+
11
+ """
12
+ # Author: Olivier Grisel <olivier.grisel@ensta.org>
13
+ # Copyright: 2012, Olivier Grisel
14
+ # License: BSD 3 clause
15
+
16
+ import copyreg
17
+ import sys
18
+ import warnings
19
+ from time import sleep
20
+
21
+ try:
22
+ WindowsError
23
+ except NameError:
24
+ WindowsError = type(None)
25
+
26
+ from pickle import Pickler
27
+
28
+ from pickle import HIGHEST_PROTOCOL
29
+ from io import BytesIO
30
+
31
+ from ._memmapping_reducer import get_memmapping_reducers
32
+ from ._memmapping_reducer import TemporaryResourcesManager
33
+ from ._multiprocessing_helpers import mp, assert_spawning
34
+
35
+ # We need the class definition to derive from it, not the multiprocessing.Pool
36
+ # factory function
37
+ from multiprocessing.pool import Pool
38
+
39
+ try:
40
+ import numpy as np
41
+ except ImportError:
42
+ np = None
43
+
44
+
45
+ ###############################################################################
46
+ # Enable custom pickling in Pool queues
47
+
48
+ class CustomizablePickler(Pickler):
49
+ """Pickler that accepts custom reducers.
50
+
51
+ TODO python2_drop : can this be simplified ?
52
+
53
+ HIGHEST_PROTOCOL is selected by default as this pickler is used
54
+ to pickle ephemeral datastructures for interprocess communication
55
+ hence no backward compatibility is required.
56
+
57
+ `reducers` is expected to be a dictionary with key/values
58
+ being `(type, callable)` pairs where `callable` is a function that
59
+ give an instance of `type` will return a tuple `(constructor,
60
+ tuple_of_objects)` to rebuild an instance out of the pickled
61
+ `tuple_of_objects` as would return a `__reduce__` method. See the
62
+ standard library documentation on pickling for more details.
63
+
64
+ """
65
+
66
+ # We override the pure Python pickler as its the only way to be able to
67
+ # customize the dispatch table without side effects in Python 2.7
68
+ # to 3.2. For Python 3.3+ leverage the new dispatch_table
69
+ # feature from https://bugs.python.org/issue14166 that makes it possible
70
+ # to use the C implementation of the Pickler which is faster.
71
+
72
+ def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
73
+ Pickler.__init__(self, writer, protocol=protocol)
74
+ if reducers is None:
75
+ reducers = {}
76
+ if hasattr(Pickler, 'dispatch'):
77
+ # Make the dispatch registry an instance level attribute instead of
78
+ # a reference to the class dictionary under Python 2
79
+ self.dispatch = Pickler.dispatch.copy()
80
+ else:
81
+ # Under Python 3 initialize the dispatch table with a copy of the
82
+ # default registry
83
+ self.dispatch_table = copyreg.dispatch_table.copy()
84
+ for type, reduce_func in reducers.items():
85
+ self.register(type, reduce_func)
86
+
87
+ def register(self, type, reduce_func):
88
+ """Attach a reducer function to a given type in the dispatch table."""
89
+ if hasattr(Pickler, 'dispatch'):
90
+ # Python 2 pickler dispatching is not explicitly customizable.
91
+ # Let us use a closure to workaround this limitation.
92
+ def dispatcher(self, obj):
93
+ reduced = reduce_func(obj)
94
+ self.save_reduce(obj=obj, *reduced)
95
+ self.dispatch[type] = dispatcher
96
+ else:
97
+ self.dispatch_table[type] = reduce_func
98
+
99
+
100
+ class CustomizablePicklingQueue(object):
101
+ """Locked Pipe implementation that uses a customizable pickler.
102
+
103
+ This class is an alternative to the multiprocessing implementation
104
+ of SimpleQueue in order to make it possible to pass custom
105
+ pickling reducers, for instance to avoid memory copy when passing
106
+ memory mapped datastructures.
107
+
108
+ `reducers` is expected to be a dict with key / values being
109
+ `(type, callable)` pairs where `callable` is a function that, given an
110
+ instance of `type`, will return a tuple `(constructor, tuple_of_objects)`
111
+ to rebuild an instance out of the pickled `tuple_of_objects` as would
112
+ return a `__reduce__` method.
113
+
114
+ See the standard library documentation on pickling for more details.
115
+ """
116
+
117
+ def __init__(self, context, reducers=None):
118
+ self._reducers = reducers
119
+ self._reader, self._writer = context.Pipe(duplex=False)
120
+ self._rlock = context.Lock()
121
+ if sys.platform == 'win32':
122
+ self._wlock = None
123
+ else:
124
+ self._wlock = context.Lock()
125
+ self._make_methods()
126
+
127
+ def __getstate__(self):
128
+ assert_spawning(self)
129
+ return (self._reader, self._writer, self._rlock, self._wlock,
130
+ self._reducers)
131
+
132
+ def __setstate__(self, state):
133
+ (self._reader, self._writer, self._rlock, self._wlock,
134
+ self._reducers) = state
135
+ self._make_methods()
136
+
137
+ def empty(self):
138
+ return not self._reader.poll()
139
+
140
+ def _make_methods(self):
141
+ self._recv = recv = self._reader.recv
142
+ racquire, rrelease = self._rlock.acquire, self._rlock.release
143
+
144
+ def get():
145
+ racquire()
146
+ try:
147
+ return recv()
148
+ finally:
149
+ rrelease()
150
+
151
+ self.get = get
152
+
153
+ if self._reducers:
154
+ def send(obj):
155
+ buffer = BytesIO()
156
+ CustomizablePickler(buffer, self._reducers).dump(obj)
157
+ self._writer.send_bytes(buffer.getvalue())
158
+ self._send = send
159
+ else:
160
+ self._send = send = self._writer.send
161
+ if self._wlock is None:
162
+ # writes to a message oriented win32 pipe are atomic
163
+ self.put = send
164
+ else:
165
+ wlock_acquire, wlock_release = (
166
+ self._wlock.acquire, self._wlock.release)
167
+
168
+ def put(obj):
169
+ wlock_acquire()
170
+ try:
171
+ return send(obj)
172
+ finally:
173
+ wlock_release()
174
+
175
+ self.put = put
176
+
177
+
178
+ class PicklingPool(Pool):
179
+ """Pool implementation with customizable pickling reducers.
180
+
181
+ This is useful to control how data is shipped between processes
182
+ and makes it possible to use shared memory without useless
183
+ copies induces by the default pickling methods of the original
184
+ objects passed as arguments to dispatch.
185
+
186
+ `forward_reducers` and `backward_reducers` are expected to be
187
+ dictionaries with key/values being `(type, callable)` pairs where
188
+ `callable` is a function that, given an instance of `type`, will return a
189
+ tuple `(constructor, tuple_of_objects)` to rebuild an instance out of the
190
+ pickled `tuple_of_objects` as would return a `__reduce__` method.
191
+ See the standard library documentation about pickling for more details.
192
+
193
+ """
194
+
195
+ def __init__(self, processes=None, forward_reducers=None,
196
+ backward_reducers=None, **kwargs):
197
+ if forward_reducers is None:
198
+ forward_reducers = dict()
199
+ if backward_reducers is None:
200
+ backward_reducers = dict()
201
+ self._forward_reducers = forward_reducers
202
+ self._backward_reducers = backward_reducers
203
+ poolargs = dict(processes=processes)
204
+ poolargs.update(kwargs)
205
+ super(PicklingPool, self).__init__(**poolargs)
206
+
207
+ def _setup_queues(self):
208
+ context = getattr(self, '_ctx', mp)
209
+ self._inqueue = CustomizablePicklingQueue(context,
210
+ self._forward_reducers)
211
+ self._outqueue = CustomizablePicklingQueue(context,
212
+ self._backward_reducers)
213
+ self._quick_put = self._inqueue._send
214
+ self._quick_get = self._outqueue._recv
215
+
216
+
217
+ class MemmappingPool(PicklingPool):
218
+ """Process pool that shares large arrays to avoid memory copy.
219
+
220
+ This drop-in replacement for `multiprocessing.pool.Pool` makes
221
+ it possible to work efficiently with shared memory in a numpy
222
+ context.
223
+
224
+ Existing instances of numpy.memmap are preserved: the child
225
+ suprocesses will have access to the same shared memory in the
226
+ original mode except for the 'w+' mode that is automatically
227
+ transformed as 'r+' to avoid zeroing the original data upon
228
+ instantiation.
229
+
230
+ Furthermore large arrays from the parent process are automatically
231
+ dumped to a temporary folder on the filesystem such as child
232
+ processes to access their content via memmapping (file system
233
+ backed shared memory).
234
+
235
+ Note: it is important to call the terminate method to collect
236
+ the temporary folder used by the pool.
237
+
238
+ Parameters
239
+ ----------
240
+ processes: int, optional
241
+ Number of worker processes running concurrently in the pool.
242
+ initializer: callable, optional
243
+ Callable executed on worker process creation.
244
+ initargs: tuple, optional
245
+ Arguments passed to the initializer callable.
246
+ temp_folder: (str, callable) optional
247
+ If str:
248
+ Folder to be used by the pool for memmapping large arrays
249
+ for sharing memory with worker processes. If None, this will try in
250
+ order:
251
+ - a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
252
+ - /dev/shm if the folder exists and is writable: this is a RAMdisk
253
+ filesystem available by default on modern Linux distributions,
254
+ - the default system temporary folder that can be overridden
255
+ with TMP, TMPDIR or TEMP environment variables, typically /tmp
256
+ under Unix operating systems.
257
+ if callable:
258
+ An callable in charge of dynamically resolving a temporary folder
259
+ for memmapping large arrays.
260
+ max_nbytes int or None, optional, 1e6 by default
261
+ Threshold on the size of arrays passed to the workers that
262
+ triggers automated memory mapping in temp_folder.
263
+ Use None to disable memmapping of large arrays.
264
+ mmap_mode: {'r+', 'r', 'w+', 'c'}
265
+ Memmapping mode for numpy arrays passed to workers.
266
+ See 'max_nbytes' parameter documentation for more details.
267
+ forward_reducers: dictionary, optional
268
+ Reducers used to pickle objects passed from master to worker
269
+ processes: see below.
270
+ backward_reducers: dictionary, optional
271
+ Reducers used to pickle return values from workers back to the
272
+ master process.
273
+ verbose: int, optional
274
+ Make it possible to monitor how the communication of numpy arrays
275
+ with the subprocess is handled (pickling or memmapping)
276
+ prewarm: bool or str, optional, "auto" by default.
277
+ If True, force a read on newly memmapped array to make sure that OS
278
+ pre-cache it in memory. This can be useful to avoid concurrent disk
279
+ access when the same data array is passed to different worker
280
+ processes. If "auto" (by default), prewarm is set to True, unless the
281
+ Linux shared memory partition /dev/shm is available and used as temp
282
+ folder.
283
+
284
+ `forward_reducers` and `backward_reducers` are expected to be
285
+ dictionaries with key/values being `(type, callable)` pairs where
286
+ `callable` is a function that give an instance of `type` will return
287
+ a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
288
+ of the pickled `tuple_of_objects` as would return a `__reduce__`
289
+ method. See the standard library documentation on pickling for more
290
+ details.
291
+
292
+ """
293
+
294
+ def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
295
+ mmap_mode='r', forward_reducers=None, backward_reducers=None,
296
+ verbose=0, context_id=None, prewarm=False, **kwargs):
297
+
298
+ if context_id is not None:
299
+ warnings.warn('context_id is deprecated and ignored in joblib'
300
+ ' 0.9.4 and will be removed in 0.11',
301
+ DeprecationWarning)
302
+
303
+ manager = TemporaryResourcesManager(temp_folder)
304
+ self._temp_folder_manager = manager
305
+
306
+ # The usage of a temp_folder_resolver over a simple temp_folder is
307
+ # superfluous for multiprocessing pools, as they don't get reused, see
308
+ # get_memmapping_executor for more details. We still use it for code
309
+ # simplicity.
310
+ forward_reducers, backward_reducers = \
311
+ get_memmapping_reducers(
312
+ temp_folder_resolver=manager.resolve_temp_folder_name,
313
+ max_nbytes=max_nbytes, mmap_mode=mmap_mode,
314
+ forward_reducers=forward_reducers,
315
+ backward_reducers=backward_reducers, verbose=verbose,
316
+ unlink_on_gc_collect=False, prewarm=prewarm)
317
+
318
+ poolargs = dict(
319
+ processes=processes,
320
+ forward_reducers=forward_reducers,
321
+ backward_reducers=backward_reducers)
322
+ poolargs.update(kwargs)
323
+ super(MemmappingPool, self).__init__(**poolargs)
324
+
325
+ def terminate(self):
326
+ n_retries = 10
327
+ for i in range(n_retries):
328
+ try:
329
+ super(MemmappingPool, self).terminate()
330
+ break
331
+ except OSError as e:
332
+ if isinstance(e, WindowsError):
333
+ # Workaround occasional "[Error 5] Access is denied" issue
334
+ # when trying to terminate a process under windows.
335
+ sleep(0.1)
336
+ if i + 1 == n_retries:
337
+ warnings.warn("Failed to terminate worker processes in"
338
+ " multiprocessing pool: %r" % e)
339
+ self._temp_folder_manager._unlink_temporary_resources()
340
+
341
+ @property
342
+ def _temp_folder(self):
343
+ # Legacy property in tests. could be removed if we refactored the
344
+ # memmapping tests. SHOULD ONLY BE USED IN TESTS!
345
+ # We cache this property because it is called late in the tests - at
346
+ # this point, all context have been unregistered, and
347
+ # resolve_temp_folder_name raises an error.
348
+ if getattr(self, '_cached_temp_folder', None) is not None:
349
+ return self._cached_temp_folder
350
+ else:
351
+ self._cached_temp_folder = self._temp_folder_manager.resolve_temp_folder_name() # noqa
352
+ return self._cached_temp_folder
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/joblib/testing.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper for testing.
3
+ """
4
+
5
+ import sys
6
+ import warnings
7
+ import os.path
8
+ import re
9
+ import subprocess
10
+ import threading
11
+
12
+ import pytest
13
+ import _pytest
14
+
15
+
16
+ raises = pytest.raises
17
+ warns = pytest.warns
18
+ SkipTest = _pytest.runner.Skipped
19
+ skipif = pytest.mark.skipif
20
+ fixture = pytest.fixture
21
+ parametrize = pytest.mark.parametrize
22
+ timeout = pytest.mark.timeout
23
+ xfail = pytest.mark.xfail
24
+ param = pytest.param
25
+
26
+
27
+ def warnings_to_stdout():
28
+ """ Redirect all warnings to stdout.
29
+ """
30
+ showwarning_orig = warnings.showwarning
31
+
32
+ def showwarning(msg, cat, fname, lno, file=None, line=0):
33
+ showwarning_orig(msg, cat, os.path.basename(fname), line, sys.stdout)
34
+
35
+ warnings.showwarning = showwarning
36
+ # warnings.simplefilter('always')
37
+
38
+
39
+ def check_subprocess_call(cmd, timeout=5, stdout_regex=None,
40
+ stderr_regex=None):
41
+ """Runs a command in a subprocess with timeout in seconds.
42
+
43
+ Also checks returncode is zero, stdout if stdout_regex is set, and
44
+ stderr if stderr_regex is set.
45
+ """
46
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
47
+ stderr=subprocess.PIPE)
48
+
49
+ def kill_process():
50
+ warnings.warn("Timeout running {}".format(cmd))
51
+ proc.kill()
52
+
53
+ timer = threading.Timer(timeout, kill_process)
54
+ try:
55
+ timer.start()
56
+ stdout, stderr = proc.communicate()
57
+ stdout, stderr = stdout.decode(), stderr.decode()
58
+ if proc.returncode != 0:
59
+ message = (
60
+ 'Non-zero return code: {}.\nStdout:\n{}\n'
61
+ 'Stderr:\n{}').format(
62
+ proc.returncode, stdout, stderr)
63
+ raise ValueError(message)
64
+
65
+ if (stdout_regex is not None and
66
+ not re.search(stdout_regex, stdout)):
67
+ raise ValueError(
68
+ "Unexpected stdout: {!r} does not match:\n{!r}".format(
69
+ stdout_regex, stdout))
70
+ if (stderr_regex is not None and
71
+ not re.search(stderr_regex, stderr)):
72
+ raise ValueError(
73
+ "Unexpected stderr: {!r} does not match:\n{!r}".format(
74
+ stderr_regex, stderr))
75
+
76
+ finally:
77
+ timer.cancel()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Machine Learning module for NeuroImaging in python
3
+ --------------------------------------------------
4
+
5
+ Documentation is available in the docstrings and online at
6
+ http://nilearn.github.io.
7
+
8
+ Contents
9
+ --------
10
+ Nilearn aims at simplifying the use of the scikit-learn package in the context of
11
+ neuroimaging. It provides specific input/output functions, algorithms and
12
+ visualization tools.
13
+
14
+ Submodules
15
+ ---------
16
+ datasets --- Utilities to download NeuroImaging datasets
17
+ decoding --- Decoding tools and algorithms
18
+ decomposition --- Includes a subject level variant of the ICA
19
+ algorithm called Canonical ICA
20
+ connectome --- Set of tools for computing functional connectivity matrices
21
+ and for sparse multi-subjects learning of Gaussian graphical models
22
+ image --- Set of functions defining mathematical operations
23
+ working on Niimg-like objects
24
+ maskers --- Includes scikit-learn transformers.
25
+ masking --- Utilities to compute and operate on brain masks
26
+ interfaces --- Includes tools to preprocess neuro-imaging data
27
+ from various common interfaces like fMRIPrep.
28
+ mass_univariate --- Defines a Massively Univariate Linear Model
29
+ estimated with OLS and permutation test
30
+ plotting --- Plotting code for nilearn
31
+ region --- Set of functions for extracting region-defined
32
+ signals, clustering methods, connected regions extraction
33
+ signal --- Set of preprocessing functions for time series
34
+ """
35
+
36
+ import gzip
37
+ import os
38
+ import sys
39
+ import pkg_resources
40
+ import warnings
41
+
42
+ from .version import (
43
+ _check_module_dependencies, __version__, _compare_version
44
+ )
45
+
46
+ # Workaround issue discovered in intel-openmp 2019.5:
47
+ # https://github.com/ContinuumIO/anaconda-issues/issues/11294
48
+ #
49
+ # see also https://github.com/scikit-learn/scikit-learn/pull/15020
50
+ os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE")
51
+
52
+
53
+ def _py36_deprecation_warning():
54
+ py36_warning = ("Python 3.6 support is deprecated and will be removed in "
55
+ "release 0.10 of Nilearn. Consider switching to "
56
+ "Python 3.8 or 3.9.")
57
+ warnings.filterwarnings('once', message=py36_warning)
58
+ warnings.warn(message=py36_warning,
59
+ category=FutureWarning,
60
+ stacklevel=3)
61
+
62
+
63
+ def _python_deprecation_warnings():
64
+ if sys.version_info.major == 3 and sys.version_info.minor == 6:
65
+ _py36_deprecation_warning()
66
+
67
+
68
+ _check_module_dependencies()
69
+ _python_deprecation_warnings()
70
+
71
+
72
+ # Monkey-patch gzip to have faster reads on large gzip files
73
+ if hasattr(gzip.GzipFile, 'max_read_chunk'):
74
+ gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb
75
+
76
+ # Boolean controlling the default globbing technique when using check_niimg
77
+ # and the os.path.expanduser usage in CacheMixin.
78
+ # Default value it True, set it to False to completely deactivate this
79
+ # behavior.
80
+ EXPAND_PATH_WILDCARDS = True
81
+
82
+ # Boolean controlling whether the joblib caches should be
83
+ # flushed if the version of certain modules changes (eg nibabel, as it
84
+ # does not respect the backward compatibility in some of its internal
85
+ # structures
86
+ # This is used in nilearn._utils.cache_mixin
87
+ CHECK_CACHE_VERSION = True
88
+
89
+ # list all submodules available in nilearn and version
90
+ __all__ = ['datasets', 'decoding', 'decomposition', 'connectome',
91
+ 'image', 'maskers', 'masking', 'interfaces', 'mass_univariate',
92
+ 'plotting', 'regions', 'signal', 'surface', '__version__']
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/conftest.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ import pytest
4
+
5
+ from _pytest.doctest import DoctestItem
6
+
7
+ # we need to import these fixtures even if not used in this module
8
+ from nilearn.datasets._testing import request_mocker # noqa: F401
9
+ from nilearn.datasets._testing import temp_nilearn_data_dir # noqa: F401
10
+ from nilearn.version import _compare_version
11
+
12
+
13
+ collect_ignore = ["datasets/data/convert_templates.py"]
14
+
15
+
16
+ try:
17
+ import matplotlib # noqa: F401
18
+ except ImportError:
19
+ collect_ignore.extend(['plotting', 'reporting'])
20
+ matplotlib = None
21
+
22
+
23
+ def pytest_configure(config):
24
+ """Use Agg so that no figures pop up."""
25
+ if matplotlib is not None:
26
+ matplotlib.use('Agg', force=True)
27
+
28
+
29
+ @pytest.fixture(autouse=True)
30
+ def close_all():
31
+ """Close all matplotlib figures."""
32
+ yield
33
+ if matplotlib is not None:
34
+ import matplotlib.pyplot as plt
35
+ plt.close('all') # takes < 1 us so just always do it
36
+
37
+
38
+ def pytest_collection_modifyitems(items):
39
+ # numpy changed the str/repr formatting of numpy arrays in 1.14.
40
+ # We want to run doctests only for numpy >= 1.14.Adapted from scikit-learn
41
+ if _compare_version(np.__version__, '<', '1.14'):
42
+ reason = 'doctests are only run for numpy >= 1.14'
43
+ skip_doctests = True
44
+ else:
45
+ skip_doctests = False
46
+
47
+ if skip_doctests:
48
+ skip_marker = pytest.mark.skip(reason=reason)
49
+ for item in items:
50
+ if isinstance(item, DoctestItem):
51
+ item.add_marker(skip_marker)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/masking.py ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities to compute and operate on brain masks
3
+ """
4
+ # Authors: Gael Varoquaux, Alexandre Abraham, Philippe Gervais, Ana Luisa Pinho
5
+ # License: simplified BSD
6
+ import warnings
7
+ import numbers
8
+
9
+ import numpy as np
10
+ from scipy import ndimage
11
+ from joblib import Parallel, delayed
12
+
13
+ from sklearn.utils import deprecated
14
+ from . import _utils
15
+ from .image import get_data, new_img_like, resampling
16
+ from ._utils import fill_doc
17
+ from ._utils.cache_mixin import cache
18
+ from ._utils.ndimage import largest_connected_component, get_border_data
19
+ from ._utils.niimg import _safe_get_data
20
+ from .datasets import (load_mni152_template, load_mni152_gm_template,
21
+ load_mni152_wm_template)
22
+
23
+
24
+ class MaskWarning(UserWarning):
25
+ "A class to always raise warnings"
26
+
27
+
28
+ warnings.simplefilter("always", MaskWarning)
29
+
30
+
31
+ def _load_mask_img(mask_img, allow_empty=False):
32
+ """Check that a mask is valid, ie with two values including 0 and load it.
33
+
34
+ Parameters
35
+ ----------
36
+ mask_img : Niimg-like object
37
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
38
+ The mask to check.
39
+
40
+ allow_empty : :obj:`bool`, optional
41
+ Allow loading an empty mask (full of 0 values).
42
+ Default=False.
43
+
44
+ Returns
45
+ -------
46
+ mask : :class:`numpy.ndarray`
47
+ Boolean version of the mask.
48
+
49
+ mask_affine: None or (4,4) array-like
50
+ Affine of the mask.
51
+ """
52
+ mask_img = _utils.check_niimg_3d(mask_img)
53
+ mask = _safe_get_data(mask_img, ensure_finite=True)
54
+ values = np.unique(mask)
55
+
56
+ if len(values) == 1:
57
+ # We accept a single value if it is not 0 (full true mask).
58
+ if values[0] == 0 and not allow_empty:
59
+ raise ValueError(
60
+ 'The mask is invalid as it is empty: it masks all data.')
61
+ elif len(values) == 2:
62
+ # If there are 2 different values, one of them must be 0 (background)
63
+ if 0 not in values:
64
+ raise ValueError('Background of the mask must be represented with'
65
+ '0. Given mask contains: %s.' % values)
66
+ elif len(values) != 2:
67
+ # If there are more than 2 values, the mask is invalid
68
+ raise ValueError('Given mask is not made of 2 values: %s'
69
+ '. Cannot interpret as true or false'
70
+ % values)
71
+
72
+ mask = _utils.as_ndarray(mask, dtype=bool)
73
+ return mask, mask_img.affine
74
+
75
+
76
+ def _extrapolate_out_mask(data, mask, iterations=1):
77
+ """Extrapolate values outside of the mask."""
78
+ if iterations > 1:
79
+ data, mask = _extrapolate_out_mask(data, mask,
80
+ iterations=iterations - 1)
81
+ new_mask = ndimage.binary_dilation(mask)
82
+ larger_mask = np.zeros(np.array(mask.shape) + 2, dtype=bool)
83
+ larger_mask[1:-1, 1:-1, 1:-1] = mask
84
+ # Use nans as missing value: ugly
85
+ masked_data = np.zeros(larger_mask.shape + data.shape[3:])
86
+ masked_data[1:-1, 1:-1, 1:-1] = data.copy()
87
+ masked_data[np.logical_not(larger_mask)] = np.nan
88
+ outer_shell = larger_mask.copy()
89
+ outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask)
90
+ outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell)
91
+ extrapolation = list()
92
+ for i, j, k in [(1, 0, 0), (-1, 0, 0),
93
+ (0, 1, 0), (0, -1, 0),
94
+ (0, 0, 1), (0, 0, -1)]:
95
+ this_x = outer_shell_x + i
96
+ this_y = outer_shell_y + j
97
+ this_z = outer_shell_z + k
98
+ extrapolation.append(masked_data[this_x, this_y, this_z])
99
+
100
+ extrapolation = np.array(extrapolation)
101
+ extrapolation = (np.nansum(extrapolation, axis=0) /
102
+ np.sum(np.isfinite(extrapolation), axis=0))
103
+ extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0
104
+ new_data = np.zeros_like(masked_data)
105
+ new_data[outer_shell] = extrapolation
106
+ new_data[larger_mask] = masked_data[larger_mask]
107
+ return new_data[1:-1, 1:-1, 1:-1], new_mask
108
+
109
+
110
+ #
111
+ # Utilities to compute masks
112
+ #
113
+ @_utils.fill_doc
114
+ def intersect_masks(mask_imgs, threshold=0.5, connected=True):
115
+ """Compute intersection of several masks.
116
+
117
+ Given a list of input mask images, generate the output image which
118
+ is the threshold-level intersection of the inputs.
119
+
120
+ Parameters
121
+ ----------
122
+ mask_imgs : :obj:`list` of Niimg-like objects
123
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
124
+ 3D individual masks with same shape and affine.
125
+
126
+ threshold : :obj:`float`, optional
127
+ Gives the level of the intersection, must be within [0, 1].
128
+ threshold=1 corresponds to keeping the intersection of all
129
+ masks, whereas threshold=0 is the union of all masks.
130
+ Default=0.5.
131
+ %(connected)s
132
+ Default=True.
133
+
134
+ Returns
135
+ -------
136
+ grp_mask : 3D :class:`nibabel.nifti1.Nifti1Image`
137
+ Intersection of all masks.
138
+ """
139
+ if len(mask_imgs) == 0:
140
+ raise ValueError('No mask provided for intersection')
141
+ grp_mask = None
142
+ first_mask, ref_affine = _load_mask_img(mask_imgs[0], allow_empty=True)
143
+ ref_shape = first_mask.shape
144
+ if threshold > 1:
145
+ raise ValueError('The threshold should be smaller than 1')
146
+ if threshold < 0:
147
+ raise ValueError('The threshold should be greater than 0')
148
+ threshold = min(threshold, 1 - 1.e-7)
149
+
150
+ for this_mask in mask_imgs:
151
+ mask, affine = _load_mask_img(this_mask, allow_empty=True)
152
+ if np.any(affine != ref_affine):
153
+ raise ValueError("All masks should have the same affine")
154
+ if np.any(mask.shape != ref_shape):
155
+ raise ValueError("All masks should have the same shape")
156
+
157
+ if grp_mask is None:
158
+ # We use int here because there may be a lot of masks to merge
159
+ grp_mask = _utils.as_ndarray(mask, dtype=int)
160
+ else:
161
+ # If this_mask is floating point and grp_mask is integer, numpy 2
162
+ # casting rules raise an error for in-place addition. Hence we do
163
+ # it long-hand.
164
+ # XXX should the masks be coerced to int before addition?
165
+ grp_mask += mask
166
+
167
+ grp_mask = grp_mask > (threshold * len(list(mask_imgs)))
168
+
169
+ if np.any(grp_mask > 0) and connected:
170
+ grp_mask = largest_connected_component(grp_mask)
171
+ grp_mask = _utils.as_ndarray(grp_mask, dtype=np.int8)
172
+ return new_img_like(_utils.check_niimg_3d(mask_imgs[0]), grp_mask,
173
+ ref_affine)
174
+
175
+
176
+ def _post_process_mask(mask, affine, opening=2, connected=True,
177
+ warning_msg=""):
178
+ """Helper function for mask computing functions.
179
+
180
+ Performs opening and keep only largest connected component is
181
+ ``connected=True``.
182
+ """
183
+ if opening:
184
+ opening = int(opening)
185
+ mask = ndimage.binary_erosion(mask, iterations=opening)
186
+ mask_any = mask.any()
187
+ if not mask_any:
188
+ warnings.warn("Computed an empty mask. %s" % warning_msg,
189
+ MaskWarning, stacklevel=2)
190
+ if connected and mask_any:
191
+ mask = largest_connected_component(mask)
192
+ if opening:
193
+ mask = ndimage.binary_dilation(mask, iterations=2 * opening)
194
+ mask = ndimage.binary_erosion(mask, iterations=opening)
195
+ return mask, affine
196
+
197
+
198
+ @_utils.fill_doc
199
+ def compute_epi_mask(epi_img, lower_cutoff=0.2, upper_cutoff=0.85,
200
+ connected=True, opening=2, exclude_zeros=False,
201
+ ensure_finite=True,
202
+ target_affine=None, target_shape=None,
203
+ memory=None, verbose=0,):
204
+ """Compute a brain mask from :term:`fMRI` data in 3D or
205
+ 4D :class:`numpy.ndarray`.
206
+
207
+ This is based on an heuristic proposed by T.Nichols:
208
+ find the least dense point of the histogram, between fractions
209
+ ``lower_cutoff`` and ``upper_cutoff`` of the total image histogram.
210
+
211
+ .. note::
212
+
213
+ In case of failure, it is usually advisable to
214
+ increase ``lower_cutoff``.
215
+
216
+ Parameters
217
+ ----------
218
+ epi_img : Niimg-like object
219
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
220
+ :term:`EPI` image, used to compute the mask.
221
+ 3D and 4D images are accepted.
222
+
223
+ .. note::
224
+ If a 3D image is given, we suggest to use the mean image.
225
+
226
+ %(lower_cutoff)s
227
+ Default=0.2.
228
+ %(upper_cutoff)s
229
+ Default=0.85.
230
+ %(connected)s
231
+ Default=True.
232
+ %(opening)s
233
+ Default=2.
234
+ ensure_finite : :obj:`bool`
235
+ If ensure_finite is True, the non-finite values (NaNs and infs)
236
+ found in the images will be replaced by zeros
237
+ Default=True.
238
+
239
+ exclude_zeros : :obj:`bool`, optional
240
+ Consider zeros as missing values for the computation of the
241
+ threshold. This option is useful if the images have been
242
+ resliced with a large padding of zeros.
243
+ Default=False.
244
+ %(target_affine)s
245
+
246
+ .. note::
247
+ This parameter is passed to :func:`nilearn.image.resample_img`.
248
+
249
+ %(target_shape)s
250
+
251
+ .. note::
252
+ This parameter is passed to :func:`nilearn.image.resample_img`.
253
+
254
+ %(memory)s
255
+ %(verbose0)s
256
+
257
+ Returns
258
+ -------
259
+ mask : :class:`nibabel.nifti1.Nifti1Image`
260
+ The brain mask (3D image).
261
+ """
262
+ if verbose > 0:
263
+ print("EPI mask computation")
264
+
265
+ # Delayed import to avoid circular imports
266
+ from .image.image import _compute_mean
267
+ mean_epi, affine = \
268
+ cache(_compute_mean, memory)(epi_img, target_affine=target_affine,
269
+ target_shape=target_shape,
270
+ smooth=(1 if opening else False))
271
+
272
+ if ensure_finite:
273
+ # Get rid of memmapping
274
+ mean_epi = _utils.as_ndarray(mean_epi)
275
+ # SPM tends to put NaNs in the data outside the brain
276
+ mean_epi[np.logical_not(np.isfinite(mean_epi))] = 0
277
+ sorted_input = np.sort(np.ravel(mean_epi))
278
+ if exclude_zeros:
279
+ sorted_input = sorted_input[sorted_input != 0]
280
+ lower_cutoff = int(np.floor(lower_cutoff * len(sorted_input)))
281
+ upper_cutoff = min(int(np.floor(upper_cutoff * len(sorted_input))),
282
+ len(sorted_input) - 1)
283
+
284
+ delta = sorted_input[lower_cutoff + 1:upper_cutoff + 1] \
285
+ - sorted_input[lower_cutoff:upper_cutoff]
286
+ ia = delta.argmax()
287
+ threshold = 0.5 * (sorted_input[ia + lower_cutoff] +
288
+ sorted_input[ia + lower_cutoff + 1])
289
+
290
+ mask = mean_epi >= threshold
291
+
292
+ mask, affine = _post_process_mask(mask, affine, opening=opening,
293
+ connected=connected,
294
+ warning_msg="Are you sure that input "
295
+ "data are EPI images not detrended. ")
296
+ return new_img_like(epi_img, mask, affine)
297
+
298
+
299
+ @_utils.fill_doc
300
+ def compute_multi_epi_mask(epi_imgs, lower_cutoff=0.2, upper_cutoff=0.85,
301
+ connected=True, opening=2, threshold=0.5,
302
+ target_affine=None, target_shape=None,
303
+ exclude_zeros=False, n_jobs=1,
304
+ memory=None, verbose=0):
305
+ """Compute a common mask for several sessions or subjects
306
+ of :term:`fMRI` data.
307
+
308
+ Uses the mask-finding algorithms to extract masks for each session
309
+ or subject, and then keep only the main connected component of the
310
+ a given fraction of the intersection of all the masks.
311
+
312
+ Parameters
313
+ ----------
314
+ epi_imgs : :obj:`list` of Niimg-like objects
315
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
316
+ A list of arrays, each item being a subject or a session.
317
+ 3D and 4D images are accepted.
318
+
319
+ .. note::
320
+
321
+ If 3D images are given, we suggest to use the mean image
322
+ of each session.
323
+
324
+ threshold : :obj:`float`, optional
325
+ The inter-session threshold: the fraction of the
326
+ total number of sessions in for which a :term:`voxel` must be
327
+ in the mask to be kept in the common mask.
328
+ threshold=1 corresponds to keeping the intersection of all
329
+ masks, whereas threshold=0 is the union of all masks.
330
+ %(lower_cutoff)s
331
+ Default=0.2.
332
+ %(upper_cutoff)s
333
+ Default=0.85.
334
+ %(connected)s
335
+ Default=True.
336
+ exclude_zeros : :obj:`bool`, optional
337
+ Consider zeros as missing values for the computation of the
338
+ threshold. This option is useful if the images have been
339
+ resliced with a large padding of zeros.
340
+ Default=False.
341
+ %(target_affine)s
342
+
343
+ .. note::
344
+ This parameter is passed to :func:`nilearn.image.resample_img`.
345
+
346
+ %(target_shape)s
347
+
348
+ .. note::
349
+ This parameter is passed to :func:`nilearn.image.resample_img`.
350
+
351
+ %(memory)s
352
+ %(n_jobs)s
353
+
354
+ Returns
355
+ -------
356
+ mask : 3D :class:`nibabel.nifti1.Nifti1Image`
357
+ The brain mask.
358
+ """
359
+ if len(epi_imgs) == 0:
360
+ raise TypeError('An empty object - %r - was passed instead of an '
361
+ 'image or a list of images' % epi_imgs)
362
+ masks = Parallel(n_jobs=n_jobs, verbose=verbose)(
363
+ delayed(compute_epi_mask)(epi_img,
364
+ lower_cutoff=lower_cutoff,
365
+ upper_cutoff=upper_cutoff,
366
+ connected=connected,
367
+ opening=opening,
368
+ exclude_zeros=exclude_zeros,
369
+ target_affine=target_affine,
370
+ target_shape=target_shape,
371
+ memory=memory)
372
+ for epi_img in epi_imgs)
373
+
374
+ mask = intersect_masks(masks, connected=connected, threshold=threshold)
375
+ return mask
376
+
377
+
378
+ @_utils.fill_doc
379
+ def compute_background_mask(data_imgs, border_size=2,
380
+ connected=False, opening=False,
381
+ target_affine=None, target_shape=None,
382
+ memory=None, verbose=0):
383
+ """Compute a brain mask for the images by guessing the value of the
384
+ background from the border of the image.
385
+
386
+ Parameters
387
+ ----------
388
+ data_imgs : Niimg-like object
389
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
390
+ Images used to compute the mask. 3D and 4D images are accepted.
391
+
392
+ .. note::
393
+
394
+ If a 3D image is given, we suggest to use the mean image.
395
+
396
+ %(border_size)s
397
+ Default=2.
398
+ %(connected)s
399
+ Default=False.
400
+ %(opening)s
401
+ Default=False.
402
+ %(target_affine)s
403
+
404
+ .. note::
405
+ This parameter is passed to :func:`nilearn.image.resample_img`.
406
+
407
+ %(target_shape)s
408
+
409
+ .. note::
410
+ This parameter is passed to :func:`nilearn.image.resample_img`.
411
+
412
+ %(memory)s
413
+ %(verbose0)s
414
+
415
+ Returns
416
+ -------
417
+ mask : :class:`nibabel.nifti1.Nifti1Image`
418
+ The brain mask (3D image).
419
+ """
420
+ if verbose > 0:
421
+ print("Background mask computation")
422
+
423
+ data_imgs = _utils.check_niimg(data_imgs)
424
+
425
+ # Delayed import to avoid circular imports
426
+ from .image.image import _compute_mean
427
+ data, affine = cache(_compute_mean, memory)(data_imgs,
428
+ target_affine=target_affine,
429
+ target_shape=target_shape,
430
+ smooth=False)
431
+
432
+ if np.isnan(get_border_data(data, border_size)).any():
433
+ # We absolutely need to catter for NaNs as a background:
434
+ # SPM does that by default
435
+ mask = np.logical_not(np.isnan(data))
436
+ else:
437
+ background = np.median(get_border_data(data, border_size))
438
+ mask = data != background
439
+
440
+ mask, affine = _post_process_mask(mask, affine, opening=opening,
441
+ connected=connected,
442
+ warning_msg="Are you sure that input "
443
+ "images have a homogeneous background.")
444
+ return new_img_like(data_imgs, mask, affine)
445
+
446
+
447
+ @_utils.fill_doc
448
+ def compute_multi_background_mask(data_imgs, border_size=2, upper_cutoff=0.85,
449
+ connected=True, opening=2, threshold=0.5,
450
+ target_affine=None, target_shape=None,
451
+ exclude_zeros=False, n_jobs=1,
452
+ memory=None, verbose=0):
453
+ """Compute a common mask for several sessions or subjects of data.
454
+
455
+ Uses the mask-finding algorithms to extract masks for each session
456
+ or subject, and then keep only the main connected component of the
457
+ a given fraction of the intersection of all the masks.
458
+
459
+ Parameters
460
+ ----------
461
+ data_imgs : :obj:`list` of Niimg-like objects
462
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
463
+ A list of arrays, each item being a subject or a session.
464
+ 3D and 4D images are accepted.
465
+
466
+ .. note::
467
+ If 3D images are given, we suggest to use the mean image
468
+ of each session.
469
+
470
+ threshold : :obj:`float`, optional
471
+ The inter-session threshold: the fraction of the
472
+ total number of session in for which a :term:`voxel` must be
473
+ in the mask to be kept in the common mask.
474
+ threshold=1 corresponds to keeping the intersection of all
475
+ masks, whereas threshold=0 is the union of all masks.
476
+ %(border_size)s
477
+ Default=2.
478
+ %(connected)s
479
+ Default=True.
480
+ %(target_affine)s
481
+
482
+ .. note::
483
+ This parameter is passed to :func:`nilearn.image.resample_img`.
484
+
485
+ %(target_shape)s
486
+
487
+ .. note::
488
+ This parameter is passed to :func:`nilearn.image.resample_img`.
489
+
490
+ %(memory)s
491
+ %(n_jobs)s
492
+
493
+ Returns
494
+ -------
495
+ mask : 3D :class:`nibabel.nifti1.Nifti1Image`
496
+ The brain mask.
497
+ """
498
+ if len(data_imgs) == 0:
499
+ raise TypeError('An empty object - %r - was passed instead of an '
500
+ 'image or a list of images' % data_imgs)
501
+ masks = Parallel(n_jobs=n_jobs, verbose=verbose)(
502
+ delayed(compute_background_mask)(img,
503
+ border_size=border_size,
504
+ connected=connected,
505
+ opening=opening,
506
+ target_affine=target_affine,
507
+ target_shape=target_shape,
508
+ memory=memory)
509
+ for img in data_imgs)
510
+
511
+ mask = intersect_masks(masks, connected=connected, threshold=threshold)
512
+ return mask
513
+
514
+
515
+ @_utils.fill_doc
516
+ def compute_brain_mask(target_img, threshold=.5, connected=True, opening=2,
517
+ memory=None, verbose=0, mask_type='whole-brain'):
518
+ """Compute the whole-brain, grey-matter or white-matter mask.
519
+ This mask is calculated using MNI152 1mm-resolution template mask onto the
520
+ target image.
521
+
522
+ Parameters
523
+ ----------
524
+ target_img : Niimg-like object
525
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
526
+ Images used to compute the mask. 3D and 4D images are accepted.
527
+ Only the shape and affine of ``target_img`` will be used here.
528
+
529
+ threshold : :obj:`float`, optional
530
+ The value under which the :term:`MNI` template is cut off.
531
+ Default=0.5
532
+ %(connected)s
533
+ Default=True.
534
+ %(opening)s
535
+ Default=2.
536
+ %(memory)s
537
+ %(verbose0)s
538
+ %(mask_type)s
539
+
540
+ .. versionadded:: 0.8.1
541
+
542
+ Returns
543
+ -------
544
+ mask : :class:`nibabel.nifti1.Nifti1Image`
545
+ The whole-brain mask (3D image).
546
+ """
547
+ if verbose > 0:
548
+ print("Template", mask_type, "mask computation")
549
+
550
+ target_img = _utils.check_niimg(target_img)
551
+
552
+ if mask_type == 'whole-brain':
553
+ template = load_mni152_template(resolution=1)
554
+ elif mask_type == 'gm':
555
+ template = load_mni152_gm_template(resolution=1)
556
+ elif mask_type == 'wm':
557
+ template = load_mni152_wm_template(resolution=1)
558
+ else:
559
+ raise ValueError(f"Unknown mask type {mask_type}. "
560
+ "Only 'whole-brain', 'gm' or 'wm' are accepted.")
561
+
562
+ resampled_template = cache(resampling.resample_to_img, memory)(
563
+ template, target_img)
564
+
565
+ mask = (get_data(resampled_template) >= threshold).astype("int8")
566
+
567
+ warning_message = (f"{mask_type} mask is empty, "
568
+ "lower the threshold or check your input FOV")
569
+ mask, affine = _post_process_mask(mask, target_img.affine, opening=opening,
570
+ connected=connected,
571
+ warning_msg=warning_message)
572
+
573
+ return new_img_like(target_img, mask, affine)
574
+
575
+
576
+ @deprecated("Function 'compute_multi_gray_matter_mask' has been renamed to "
577
+ "'compute_multi_brain_mask' and 'compute_multi_gray_matter_mask' "
578
+ "will be removed in release 0.10.0")
579
+ @_utils.fill_doc
580
+ def compute_multi_gray_matter_mask(target_imgs, threshold=.5,
581
+ connected=True, opening=2,
582
+ memory=None, verbose=0, n_jobs=1, **kwargs):
583
+ """Compute a mask corresponding to the gray matter part of the brain for
584
+ a list of images.
585
+
586
+ The gray matter part is calculated through the resampling of MNI152
587
+ template gray matter mask onto the target image
588
+
589
+ Parameters
590
+ ----------
591
+ target_imgs : :obj:`list` of Niimg-like object
592
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
593
+ Images used to compute the mask. 3D and 4D images are accepted.
594
+
595
+ .. note::
596
+ The images in this list must be of same shape and affine.
597
+ The mask is calculated with the first element of the list
598
+ for only the shape/affine of the image is used for this
599
+ masking strategy.
600
+
601
+ threshold : :obj:`float`, optional
602
+ The value under which the :term:`MNI` template is cut off.
603
+ Default=0.5.
604
+ %(connected)s
605
+ Default=True.
606
+ %(opening)s
607
+ Default=2.
608
+ %(memory)s
609
+ %(verbose0)s
610
+ %(n_jobs)s
611
+
612
+ .. note::
613
+ Argument not used but kept to fit the API.
614
+
615
+ **kwargs : optional arguments
616
+ arguments such as 'target_affine' are used in the call of other
617
+ masking strategies, which then would raise an error for this function
618
+ which does not need such arguments.
619
+
620
+ Returns
621
+ -------
622
+ mask : :class:`nibabel.nifti1.Nifti1Image`
623
+ The brain mask (3D image).
624
+
625
+ See also
626
+ --------
627
+ nilearn.masking.compute_brain_mask
628
+ """
629
+ return compute_multi_brain_mask(target_imgs=target_imgs,
630
+ threshold=threshold, connected=connected,
631
+ opening=opening, memory=memory,
632
+ verbose=verbose, n_jobs=n_jobs,
633
+ mask_type='whole-brain', **kwargs)
634
+
635
+
636
+ @_utils.fill_doc
637
+ def compute_multi_brain_mask(target_imgs, threshold=.5, connected=True,
638
+ opening=2, memory=None, verbose=0, n_jobs=1,
639
+ mask_type='whole-brain', **kwargs):
640
+ """Compute the whole-brain, grey-matter or white-matter mask for a list of
641
+ images. The mask is calculated through the resampling of the corresponding
642
+ MNI152 template mask onto the target image.
643
+
644
+ .. versionadded:: 0.8.1
645
+
646
+ Parameters
647
+ ----------
648
+ target_imgs : :obj:`list` of Niimg-like object
649
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
650
+ Images used to compute the mask. 3D and 4D images are accepted.
651
+
652
+ .. note::
653
+ The images in this list must be of same shape and affine.
654
+ The mask is calculated with the first element of the list
655
+ for only the shape/affine of the image is used for this
656
+ masking strategy.
657
+
658
+ threshold : :obj:`float`, optional
659
+ The value under which the :term:`MNI` template is cut off.
660
+ Default=0.5.
661
+ %(connected)s
662
+ Default=True.
663
+ %(opening)s
664
+ Default=2.
665
+ %(mask_type)s
666
+ %(memory)s
667
+ %(verbose0)s
668
+ %(n_jobs)s
669
+
670
+ .. note::
671
+ Argument not used but kept to fit the API
672
+
673
+ **kwargs : optional arguments
674
+ Arguments such as 'target_affine' are used in the call of other
675
+ masking strategies, which then would raise an error for this function
676
+ which does not need such arguments.
677
+
678
+ Returns
679
+ -------
680
+ mask : :class:`nibabel.nifti1.Nifti1Image`
681
+ The brain mask (3D image).
682
+
683
+ See also
684
+ --------
685
+ nilearn.masking.compute_brain_mask
686
+ """
687
+ if len(target_imgs) == 0:
688
+ raise TypeError('An empty object - %r - was passed instead of an '
689
+ 'image or a list of images' % target_imgs)
690
+
691
+ # Check images in the list have the same FOV without loading them in memory
692
+ imgs_generator = _utils.check_niimg(target_imgs, return_iterator=True)
693
+ for _ in imgs_generator:
694
+ pass
695
+
696
+ mask = compute_brain_mask(target_imgs[0], threshold=threshold,
697
+ connected=connected, opening=opening,
698
+ memory=memory, verbose=verbose,
699
+ mask_type=mask_type)
700
+ return mask
701
+
702
+
703
+ #
704
+ # Time series extraction
705
+ #
706
+
707
+ @fill_doc
708
+ def apply_mask(imgs, mask_img, dtype='f',
709
+ smoothing_fwhm=None, ensure_finite=True):
710
+ """Extract signals from images using specified mask.
711
+
712
+ Read the time series from the given Niimg-like object, using the mask.
713
+
714
+ Parameters
715
+ -----------
716
+ imgs : :obj:`list` of 4D Niimg-like objects
717
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
718
+ Images to be masked. list of lists of 3D images are also accepted.
719
+
720
+ mask_img : Niimg-like object
721
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
722
+ 3D mask array: True where a :term:`voxel` should be used.
723
+
724
+ dtype: numpy dtype or 'f'
725
+ The dtype of the output, if 'f', any float output is acceptable
726
+ and if the data is stored on the disk as floats the data type
727
+ will not be changed.
728
+ %(smoothing_fwhm)s
729
+
730
+ .. note::
731
+
732
+ Implies ensure_finite=True.
733
+
734
+ ensure_finite : :obj:`bool`
735
+ If ensure_finite is True, the non-finite values (NaNs and
736
+ infs) found in the images will be replaced by zeros.
737
+ Default=True.
738
+
739
+ Returns
740
+ --------
741
+ session_series : :class:`numpy.ndarray`
742
+ 2D array of series with shape (image number, :term:`voxel` number)
743
+
744
+ Notes
745
+ -----
746
+ When using smoothing, ``ensure_finite`` is set to True, as non-finite
747
+ values would spread across the image.
748
+ """
749
+ mask_img = _utils.check_niimg_3d(mask_img)
750
+ mask, mask_affine = _load_mask_img(mask_img)
751
+ mask_img = new_img_like(mask_img, mask, mask_affine)
752
+ return _apply_mask_fmri(imgs, mask_img, dtype=dtype,
753
+ smoothing_fwhm=smoothing_fwhm,
754
+ ensure_finite=ensure_finite)
755
+
756
+
757
+ def _apply_mask_fmri(imgs, mask_img, dtype='f',
758
+ smoothing_fwhm=None, ensure_finite=True):
759
+ """Same as :func:`nilearn.masking.apply_mask`.
760
+
761
+ The only difference with :func:`nilearn.masking.apply_mask` is that
762
+ some costly checks on ``mask_img`` are not performed: ``mask_img`` is
763
+ assumed to contain only two different values (this is checked for in
764
+ :func:`nilearn.masking.apply_mask`, not in this function).
765
+ """
766
+ mask_img = _utils.check_niimg_3d(mask_img)
767
+ mask_affine = mask_img.affine
768
+ mask_data = _utils.as_ndarray(get_data(mask_img),
769
+ dtype=bool)
770
+
771
+ if smoothing_fwhm is not None:
772
+ ensure_finite = True
773
+
774
+ imgs_img = _utils.check_niimg(imgs)
775
+ affine = imgs_img.affine[:3, :3]
776
+
777
+ if not np.allclose(mask_affine, imgs_img.affine):
778
+ raise ValueError('Mask affine: \n%s\n is different from img affine:'
779
+ '\n%s' % (str(mask_affine),
780
+ str(imgs_img.affine)))
781
+
782
+ if not mask_data.shape == imgs_img.shape[:3]:
783
+ raise ValueError('Mask shape: %s is different from img shape:%s'
784
+ % (str(mask_data.shape), str(imgs_img.shape[:3])))
785
+
786
+ # All the following has been optimized for C order.
787
+ # Time that may be lost in conversion here is regained multiple times
788
+ # afterward, especially if smoothing is applied.
789
+ series = _safe_get_data(imgs_img)
790
+
791
+ if dtype == 'f':
792
+ if series.dtype.kind == 'f':
793
+ dtype = series.dtype
794
+ else:
795
+ dtype = np.float32
796
+ series = _utils.as_ndarray(series, dtype=dtype, order="C",
797
+ copy=True)
798
+ del imgs_img # frees a lot of memory
799
+
800
+ # Delayed import to avoid circular imports
801
+ from .image.image import _smooth_array
802
+ _smooth_array(series, affine, fwhm=smoothing_fwhm,
803
+ ensure_finite=ensure_finite, copy=False)
804
+ return series[mask_data].T
805
+
806
+
807
+ def _unmask_3d(X, mask, order="C"):
808
+ """Take masked data and bring them back to 3D (space only).
809
+
810
+ Parameters
811
+ ----------
812
+ X : :class:`numpy.ndarray`
813
+ Masked data. shape: (features,)
814
+
815
+ mask : Niimg-like object
816
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
817
+ Mask. mask.ndim must be equal to 3, and dtype *must* be bool.
818
+ """
819
+ if mask.dtype != bool:
820
+ raise TypeError("mask must be a boolean array")
821
+ if X.ndim != 1:
822
+ raise TypeError("X must be a 1-dimensional array")
823
+ n_features = mask.sum()
824
+ if X.shape[0] != n_features:
825
+ raise TypeError('X must be of shape (samples, %d).' % n_features)
826
+
827
+ data = np.zeros(
828
+ (mask.shape[0], mask.shape[1], mask.shape[2]),
829
+ dtype=X.dtype, order=order)
830
+ data[mask] = X
831
+ return data
832
+
833
+
834
+ def _unmask_4d(X, mask, order="C"):
835
+ """Take masked data and bring them back to 4D.
836
+
837
+ Parameters
838
+ ----------
839
+ X : :class:`numpy.ndarray`
840
+ Masked data. shape: (samples, features)
841
+
842
+ mask : :class:`numpy.ndarray`
843
+ Mask. mask.ndim must be equal to 4, and dtype *must* be bool.
844
+
845
+ Returns
846
+ -------
847
+ data : :class:`numpy.ndarray`
848
+ Unmasked data.
849
+ Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[0])
850
+ """
851
+ if mask.dtype != bool:
852
+ raise TypeError("mask must be a boolean array")
853
+ if X.ndim != 2:
854
+ raise TypeError("X must be a 2-dimensional array")
855
+ n_features = mask.sum()
856
+ if X.shape[1] != n_features:
857
+ raise TypeError('X must be of shape (samples, %d).' % n_features)
858
+
859
+ data = np.zeros(mask.shape + (X.shape[0],), dtype=X.dtype, order=order)
860
+ data[mask, :] = X.T
861
+ return data
862
+
863
+
864
+ def unmask(X, mask_img, order="F"):
865
+ """Take masked data and bring them back into 3D/4D.
866
+
867
+ This function can be applied to a list of masked data.
868
+
869
+ Parameters
870
+ ----------
871
+ X : :class:`numpy.ndarray` (or :obj:`list` of)
872
+ Masked data. shape: (samples #, features #).
873
+ If X is one-dimensional, it is assumed that samples# == 1.
874
+
875
+ mask_img : Niimg-like object
876
+ See https://nilearn.github.io/stable/manipulating_images/input_output.html # noqa:E501
877
+ Must be 3-dimensional.
878
+
879
+ Returns
880
+ -------
881
+ data : :class:`nibabel.nifti1.Nifti1Image`
882
+ Unmasked data. Depending on the shape of X, data can have
883
+ different shapes:
884
+
885
+ - X.ndim == 2:
886
+ Shape: (mask.shape[0], mask.shape[1], mask.shape[2], X.shape[0])
887
+ - X.ndim == 1:
888
+ Shape: (mask.shape[0], mask.shape[1], mask.shape[2])
889
+ """
890
+ # Handle lists. This can be a list of other lists / arrays, or a list or
891
+ # numbers. In the latter case skip.
892
+ if isinstance(X, list) and not isinstance(X[0], numbers.Number):
893
+ ret = []
894
+ for x in X:
895
+ ret.append(unmask(x, mask_img, order=order)) # 1-level recursion
896
+ return ret
897
+
898
+ # The code after this block assumes that X is an ndarray; ensure this
899
+ X = np.asanyarray(X)
900
+
901
+ mask_img = _utils.check_niimg_3d(mask_img)
902
+ mask, affine = _load_mask_img(mask_img)
903
+
904
+ if np.ndim(X) == 2:
905
+ unmasked = _unmask_4d(X, mask, order=order)
906
+ elif np.ndim(X) == 1:
907
+ unmasked = _unmask_3d(X, mask, order=order)
908
+ else:
909
+ raise TypeError("Masked data X must be 2D or 1D array; "
910
+ "got shape: %s" % str(X.shape))
911
+
912
+ return new_img_like(mask_img, unmasked, affine)
913
+
914
+
915
+ def _unmask_from_to_3d_array(w, mask):
916
+ """Unmask an image into whole brain, with off-mask :term:`voxels<voxel>`
917
+ set to 0.
918
+
919
+ Used as a stand-alone function in low-level decoding (SpaceNet) and
920
+ clustering (ReNA) functions.
921
+
922
+ Parameters
923
+ ----------
924
+ w : :class:`numpy.ndarray`, shape (n_features,)
925
+ The image to be unmasked.
926
+
927
+ mask : :class:`numpy.ndarray`, shape (nx, ny, nz)
928
+ The mask used in the unmasking operation. It is required that
929
+ ``mask.sum() == n_features``.
930
+
931
+ Returns
932
+ -------
933
+ out : 3D :class:`numpy.ndarray` (same shape as `mask`)
934
+ The unmasked version of `w`.
935
+ """
936
+ if mask.sum() != len(w):
937
+ raise ValueError("Expecting mask.sum() == len(w).")
938
+ out = np.zeros(mask.shape, dtype=w.dtype)
939
+ out[mask] = w
940
+ return out
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/signal.py ADDED
@@ -0,0 +1,870 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Preprocessing functions for time series.
3
+
4
+ All functions in this module should take X matrices with samples x
5
+ features
6
+ """
7
+ # Authors: Alexandre Abraham, Gael Varoquaux, Philippe Gervais
8
+ # License: simplified BSD
9
+
10
+ import warnings
11
+
12
+ import numpy as np
13
+ import pandas as pd
14
+ from scipy import linalg, signal as sp_signal
15
+ from sklearn.utils import gen_even_slices, as_float_array
16
+
17
+ from ._utils.numpy_conversions import csv_to_array, as_ndarray
18
+ from ._utils import fill_doc
19
+
20
+
21
+ availiable_filters = ['butterworth',
22
+ 'cosine'
23
+ ]
24
+
25
+
26
+ def _standardize(signals, detrend=False, standardize='zscore'):
27
+ """Center and standardize a given signal (time is along first axis).
28
+
29
+ Parameters
30
+ ----------
31
+ signals : :class:`numpy.ndarray`
32
+ Timeseries to standardize.
33
+
34
+ detrend : :obj:`bool`, optional
35
+ If detrending of timeseries is requested.
36
+ Default=False.
37
+
38
+ standardize : {'zscore', 'psc', True, False}, optional
39
+ Strategy to standardize the signal:
40
+
41
+ - 'zscore': The signal is z-scored. Timeseries are shifted
42
+ to zero mean and scaled to unit variance.
43
+ - 'psc': Timeseries are shifted to zero mean value and scaled
44
+ to percent signal change (as compared to original mean signal).
45
+ - True: The signal is z-scored (same as option `zscore`).
46
+ Timeseries are shifted to zero mean and scaled to unit variance.
47
+ - False: Do not standardize the data.
48
+
49
+ Default='zscore'.
50
+
51
+ Returns
52
+ -------
53
+ std_signals : :class:`numpy.ndarray`
54
+ Copy of signals, standardized.
55
+ """
56
+ if standardize not in [True, False, 'psc', 'zscore']:
57
+ raise ValueError('{} is no valid standardize strategy.'
58
+ .format(standardize))
59
+
60
+ if detrend:
61
+ signals = _detrend(signals, inplace=False)
62
+ else:
63
+ signals = signals.copy()
64
+
65
+ if standardize:
66
+ if signals.shape[0] == 1:
67
+ warnings.warn('Standardization of 3D signal has been requested but '
68
+ 'would lead to zero values. Skipping.')
69
+ return signals
70
+
71
+ elif (standardize == 'zscore') or (standardize is True):
72
+ if not detrend:
73
+ # remove mean if not already detrended
74
+ signals = signals - signals.mean(axis=0)
75
+
76
+ std = signals.std(axis=0)
77
+ std[std < np.finfo(np.float64).eps] = 1. # avoid numerical problems
78
+ signals /= std
79
+
80
+ elif standardize == 'psc':
81
+ mean_signal = signals.mean(axis=0)
82
+ invalid_ix = np.absolute(mean_signal) < np.finfo(np.float64).eps
83
+ signals = (signals - mean_signal) / np.absolute(mean_signal)
84
+ signals *= 100
85
+
86
+ if np.any(invalid_ix):
87
+ warnings.warn('psc standardization strategy is meaningless '
88
+ 'for features that have a mean of 0. '
89
+ 'These time series are set to 0.')
90
+ signals[:, invalid_ix] = 0
91
+
92
+ return signals
93
+
94
+
95
+ def _mean_of_squares(signals, n_batches=20):
96
+ """Compute mean of squares for each signal.
97
+
98
+ This function is equivalent to:
99
+
100
+ .. code-block:: python
101
+
102
+ var = np.copy(signals)
103
+ var **= 2
104
+ var = var.mean(axis=0)
105
+
106
+ but uses a lot less memory.
107
+
108
+ Parameters
109
+ ----------
110
+ signals : :class:`numpy.ndarray`, shape (n_samples, n_features)
111
+ Signal whose mean of squares must be computed.
112
+
113
+ n_batches : :obj:`int`, optional
114
+ Number of batches to use in the computation.
115
+
116
+ .. note::
117
+ Tweaking this value can lead to variation of memory usage
118
+ and computation time. The higher the value, the lower the
119
+ memory consumption.
120
+
121
+ Default=20.
122
+
123
+ Returns
124
+ -------
125
+ var : :class:`numpy.ndarray`
126
+ 1D array holding the mean of squares.
127
+ """
128
+ # No batching for small arrays
129
+ if signals.shape[1] < 500:
130
+ n_batches = 1
131
+
132
+ # Fastest for C order
133
+ var = np.empty(signals.shape[1])
134
+ for batch in gen_even_slices(signals.shape[1], n_batches):
135
+ tvar = np.copy(signals[:, batch])
136
+ tvar **= 2
137
+ var[batch] = tvar.mean(axis=0)
138
+
139
+ return var
140
+
141
+
142
+ def _row_sum_of_squares(signals, n_batches=20):
143
+ """Compute sum of squares for each signal.
144
+
145
+ This function is equivalent to:
146
+
147
+ .. code-block:: python
148
+
149
+ signals **= 2
150
+ signals = signals.sum(axis=0)
151
+
152
+ but uses a lot less memory.
153
+
154
+ Parameters
155
+ ----------
156
+ signals : :class:`numpy.ndarray`, shape (n_samples, n_features)
157
+ Signal whose sum of squares must be computed.
158
+
159
+ n_batches : :obj:`int`, optional
160
+ Number of batches to use in the computation.
161
+
162
+ .. note::
163
+ Tweaking this value can lead to variation of memory usage
164
+ and computation time. The higher the value, the lower the
165
+ memory consumption.
166
+
167
+ Default=20.
168
+
169
+ Returns
170
+ -------
171
+ var : :class:`numpy.ndarray`
172
+ 1D array holding the sum of squares.
173
+ """
174
+ # No batching for small arrays
175
+ if signals.shape[1] < 500:
176
+ n_batches = 1
177
+
178
+ # Fastest for C order
179
+ var = np.empty(signals.shape[1])
180
+ for batch in gen_even_slices(signals.shape[1], n_batches):
181
+ var[batch] = np.sum(signals[:, batch] ** 2, 0)
182
+
183
+ return var
184
+
185
+
186
+ def _detrend(signals, inplace=False, type="linear", n_batches=10):
187
+ """Detrend columns of input array.
188
+
189
+ Signals are supposed to be columns of `signals`.
190
+ This function is significantly faster than :func:`scipy.signal.detrend`
191
+ on this case and uses a lot less memory.
192
+
193
+ Parameters
194
+ ----------
195
+ signals : :class:`numpy.ndarray`
196
+ This parameter must be two-dimensional.
197
+ Signals to detrend. A signal is a column.
198
+
199
+ inplace : :obj:`bool`, optional
200
+ Tells if the computation must be made inplace or not.
201
+ Default=False.
202
+
203
+ type : {"linear", "constant"}, optional
204
+ Detrending type, either "linear" or "constant".
205
+ See also :func:`scipy.signal.detrend`.
206
+ Default="linear".
207
+
208
+ n_batches : :obj:`int`, optional
209
+ Number of batches to use in the computation.
210
+
211
+ .. note::
212
+ Tweaking this value can lead to variation of memory usage
213
+ and computation time. The higher the value, the lower the
214
+ memory consumption.
215
+
216
+ Returns
217
+ -------
218
+ detrended_signals : :class:`numpy.ndarray`
219
+ Detrended signals. The shape is that of ``signals``.
220
+
221
+ Notes
222
+ -----
223
+ If a signal of length 1 is given, it is returned unchanged.
224
+ """
225
+ signals = as_float_array(signals, copy=not inplace)
226
+ if signals.shape[0] == 1:
227
+ warnings.warn('Detrending of 3D signal has been requested but '
228
+ 'would lead to zero values. Skipping.')
229
+ return signals
230
+
231
+ signals -= np.mean(signals, axis=0)
232
+ if type == "linear":
233
+ # Keeping "signals" dtype avoids some type conversion further down,
234
+ # and can save a lot of memory if dtype is single-precision.
235
+ regressor = np.arange(signals.shape[0], dtype=signals.dtype)
236
+ regressor -= regressor.mean()
237
+ std = np.sqrt((regressor ** 2).sum())
238
+ # avoid numerical problems
239
+ if not std < np.finfo(np.float64).eps:
240
+ regressor /= std
241
+ regressor = regressor[:, np.newaxis]
242
+
243
+ # No batching for small arrays
244
+ if signals.shape[1] < 500:
245
+ n_batches = 1
246
+
247
+ # This is fastest for C order.
248
+ for batch in gen_even_slices(signals.shape[1], n_batches):
249
+ signals[:, batch] -= np.dot(regressor[:, 0], signals[:, batch]
250
+ ) * regressor
251
+ return signals
252
+
253
+
254
+ def _check_wn(btype, freq, nyq):
255
+ wn = freq / float(nyq)
256
+ if wn >= 1.:
257
+ # results looked unstable when the critical frequencies are
258
+ # exactly at the Nyquist frequency. See issue at SciPy
259
+ # https://github.com/scipy/scipy/issues/6265. Before, SciPy 1.0.0 ("wn
260
+ # should be btw 0 and 1"). But, after ("0 < wn < 1"). Due to unstable
261
+ # results as pointed in the issue above. Hence, we forced the
262
+ # critical frequencies to be slightly less than 1. but not 1.
263
+ wn = 1 - 10 * np.finfo(1.).eps
264
+ warnings.warn(
265
+ 'The frequency specified for the %s pass filter is '
266
+ 'too high to be handled by a digital filter (superior to '
267
+ 'nyquist frequency). It has been lowered to %.2f (nyquist '
268
+ 'frequency).' % (btype, wn))
269
+
270
+ if wn < 0.0: # equal to 0.0 is okay
271
+ wn = np.finfo(1.).eps
272
+ warnings.warn(
273
+ 'The frequency specified for the %s pass filter is '
274
+ 'too low to be handled by a digital filter (must be non-negative).'
275
+ ' It has been set to eps: %.5e' % (btype, wn))
276
+
277
+ return wn
278
+
279
+
280
+ @fill_doc
281
+ def butterworth(signals, sampling_rate, low_pass=None, high_pass=None,
282
+ order=5, copy=False):
283
+ """Apply a low-pass, high-pass or band-pass
284
+ `Butterworth filter <https://en.wikipedia.org/wiki/Butterworth_filter>`_.
285
+
286
+ Apply a filter to remove signal below the `low` frequency and above the
287
+ `high` frequency.
288
+
289
+ Parameters
290
+ ----------
291
+ signals : :class:`numpy.ndarray` (1D sequence or n_samples x n_sources)
292
+ Signals to be filtered. A signal is assumed to be a column
293
+ of `signals`.
294
+
295
+ sampling_rate : :obj:`float`
296
+ Number of samples per time unit (sample frequency).
297
+ %(low_pass)s
298
+ %(high_pass)s
299
+ order : :obj:`int`, optional
300
+ Order of the `Butterworth filter
301
+ <https://en.wikipedia.org/wiki/Butterworth_filter>`_.
302
+ When filtering signals, the filter has a decay to avoid ringing.
303
+ Increasing the order sharpens this decay. Be aware that very high
304
+ orders can lead to numerical instability.
305
+ Default=5.
306
+
307
+ copy : :obj:`bool`, optional
308
+ If False, `signals` is modified inplace, and memory consumption is
309
+ lower than for ``copy=True``, though computation time is higher.
310
+
311
+ Returns
312
+ -------
313
+ filtered_signals : :class:`numpy.ndarray`
314
+ Signals filtered according to the given parameters.
315
+ """
316
+ if low_pass is None and high_pass is None:
317
+ if copy:
318
+ return signals.copy()
319
+ else:
320
+ return signals
321
+
322
+ if low_pass is not None and high_pass is not None \
323
+ and high_pass >= low_pass:
324
+ raise ValueError(
325
+ "High pass cutoff frequency (%f) is greater or equal"
326
+ "to low pass filter frequency (%f). This case is not handled "
327
+ "by this function."
328
+ % (high_pass, low_pass))
329
+
330
+ nyq = sampling_rate * 0.5
331
+
332
+ critical_freq = []
333
+ if high_pass is not None:
334
+ btype = 'high'
335
+ critical_freq.append(_check_wn(btype, high_pass, nyq))
336
+
337
+ if low_pass is not None:
338
+ btype = 'low'
339
+ critical_freq.append(_check_wn(btype, low_pass, nyq))
340
+
341
+ if len(critical_freq) == 2:
342
+ btype = 'band'
343
+ else:
344
+ critical_freq = critical_freq[0]
345
+
346
+ b, a = sp_signal.butter(order, critical_freq, btype=btype, output='ba')
347
+ if signals.ndim == 1:
348
+ # 1D case
349
+ output = sp_signal.filtfilt(b, a, signals)
350
+ if copy: # filtfilt does a copy in all cases.
351
+ signals = output
352
+ else:
353
+ signals[...] = output
354
+ else:
355
+ if copy:
356
+ # No way to save memory when a copy has been requested,
357
+ # because filtfilt does out-of-place processing
358
+ signals = sp_signal.filtfilt(b, a, signals, axis=0)
359
+ else:
360
+ # Lesser memory consumption, slower.
361
+ for timeseries in signals.T:
362
+ timeseries[:] = sp_signal.filtfilt(b, a, timeseries)
363
+
364
+ # results returned in-place
365
+
366
+ return signals
367
+
368
+
369
+ @fill_doc
370
+ def high_variance_confounds(series, n_confounds=5, percentile=2.,
371
+ detrend=True):
372
+ """Return confounds time series extracted from series with highest
373
+ variance.
374
+
375
+ Parameters
376
+ ----------
377
+ series : :class:`numpy.ndarray`
378
+ Timeseries. A timeseries is a column in the "series" array.
379
+ shape (sample number, feature number)
380
+
381
+ n_confounds : :obj:`int`, optional
382
+ Number of confounds to return. Default=5.
383
+
384
+ percentile : :obj:`float`, optional
385
+ Highest-variance series percentile to keep before computing the
386
+ singular value decomposition, 0. <= `percentile` <= 100.
387
+ ``series.shape[0] * percentile / 100`` must be greater
388
+ than ``n_confounds``. Default=2.0.
389
+ %(detrend)s
390
+ Default=True.
391
+
392
+ Returns
393
+ -------
394
+ v : :class:`numpy.ndarray`
395
+ Highest variance confounds. Shape: (samples, n_confounds)
396
+
397
+ Notes
398
+ -----
399
+ This method is related to what has been published in the literature
400
+ as 'CompCor' :footcite:`BEHZADI200790`.
401
+
402
+ The implemented algorithm does the following:
403
+
404
+ - compute sum of squares for each time series (no mean removal)
405
+ - keep a given percentile of series with highest variances (percentile)
406
+ - compute an svd of the extracted series
407
+ - return a given number (n_confounds) of series from the svd with
408
+ highest singular values.
409
+
410
+ References
411
+ ----------
412
+ .. footbibliography::
413
+
414
+ See also
415
+ --------
416
+ nilearn.image.high_variance_confounds
417
+ """
418
+ if detrend:
419
+ series = _detrend(series) # copy
420
+
421
+ # Retrieve the voxels|features with highest variance
422
+
423
+ # Compute variance without mean removal.
424
+ var = _mean_of_squares(series)
425
+ var_thr = np.nanpercentile(var, 100. - percentile)
426
+ series = series[:, var > var_thr] # extract columns (i.e. features)
427
+ # Return the singular vectors with largest singular values
428
+ # We solve the symmetric eigenvalue problem here, increasing stability
429
+ s, u = linalg.eigh(series.dot(series.T) / series.shape[0])
430
+ ix_ = np.argsort(s)[::-1]
431
+ u = u[:, ix_[:n_confounds]].copy()
432
+ return u
433
+
434
+
435
+ def _ensure_float(data):
436
+ "Make sure that data is a float type"
437
+ if not data.dtype.kind == 'f':
438
+ if data.dtype.itemsize == '8':
439
+ data = data.astype(np.float64)
440
+ else:
441
+ data = data.astype(np.float32)
442
+ return data
443
+
444
+
445
+ @fill_doc
446
+ def clean(signals, runs=None, detrend=True, standardize='zscore',
447
+ sample_mask=None, confounds=None, standardize_confounds=True,
448
+ filter='butterworth', low_pass=None, high_pass=None, t_r=2.5,
449
+ ensure_finite=False):
450
+ """Improve :term:`SNR` on masked :term:`fMRI` signals.
451
+
452
+ This function can do several things on the input signals, in
453
+ the following order:
454
+
455
+ - detrend
456
+ - low- and high-pass filter
457
+ - remove confounds
458
+ - standardize
459
+
460
+ Low-pass filtering improves specificity.
461
+
462
+ High-pass filtering should be kept small, to keep some sensitivity.
463
+
464
+ Filtering is only meaningful on evenly-sampled signals.
465
+
466
+ According to :footcite:`Lindquist407676`, removal of confounds will be done
467
+ orthogonally to temporal filters (low- and/or high-pass filters), if both
468
+ are specified.
469
+
470
+ Parameters
471
+ ----------
472
+ signals : :class:`numpy.ndarray`
473
+ Timeseries. Must have shape (instant number, features number).
474
+ This array is not modified.
475
+
476
+ runs : :class:`numpy.ndarray`, optional
477
+ Add a run level to the cleaning process. Each run will be
478
+ cleaned independently. Must be a 1D array of n_samples elements.
479
+ Default is None.
480
+
481
+ confounds : :class:`numpy.ndarray`, :obj:`str`,\
482
+ :class:`pandas.DataFrame` or :obj:`list` of
483
+ Confounds timeseries. Shape must be
484
+ (instant number, confound number), or just (instant number,)
485
+ The number of time instants in ``signals`` and ``confounds`` must be
486
+ identical (i.e. ``signals.shape[0] == confounds.shape[0]``).
487
+ If a string is provided, it is assumed to be the name of a csv file
488
+ containing signals as columns, with an optional one-line header.
489
+ If a list is provided, all confounds are removed from the input
490
+ signal, as if all were in the same array.
491
+ Default is None.
492
+
493
+ sample_mask : None, :class:`numpy.ndarray`, :obj:`list`,\
494
+ :obj:`tuple`, or :obj:`list` of
495
+ shape: (number of scans - number of volumes removed, )
496
+ Masks the niimgs along time/fourth dimension to perform scrubbing
497
+ (remove volumes with high motion) and/or non-steady-state volumes.
498
+ This masking step is applied before signal cleaning. When supplying run
499
+ information, sample_mask must be a list containing sets of indexes for
500
+ each run.
501
+
502
+ .. versionadded:: 0.8.0
503
+
504
+ Default is None.
505
+ %(t_r)s
506
+ Default=2.5.
507
+ filter : {'butterworth', 'cosine', False}, optional
508
+ Filtering methods:
509
+
510
+ - 'butterworth': perform butterworth filtering.
511
+ - 'cosine': generate discrete cosine transformation drift terms.
512
+ - False: Do not perform filtering.
513
+
514
+ Default='butterworth'.
515
+ %(low_pass)s
516
+
517
+ .. note::
518
+ `low_pass` is not implemented for filter='cosine'.
519
+
520
+ %(high_pass)s
521
+ %(detrend)s
522
+ standardize : {'zscore', 'psc', False}, optional
523
+ Strategy to standardize the signal:
524
+
525
+ - 'zscore': The signal is z-scored. Timeseries are shifted
526
+ to zero mean and scaled to unit variance.
527
+ - 'psc': Timeseries are shifted to zero mean value and scaled
528
+ to percent signal change (as compared to original mean signal).
529
+ - True: The signal is z-scored (same as option `zscore`).
530
+ Timeseries are shifted to zero mean and scaled to unit variance.
531
+ - False: Do not standardize the data.
532
+
533
+ Default="zscore".
534
+ %(standardize_confounds)s
535
+ %(ensure_finite)s
536
+ Default=False.
537
+
538
+ Returns
539
+ -------
540
+ cleaned_signals : :class:`numpy.ndarray`
541
+ Input signals, cleaned. Same shape as `signals`.
542
+
543
+ Notes
544
+ -----
545
+ Confounds removal is based on a projection on the orthogonal
546
+ of the signal space. See :footcite:`Friston1994`.
547
+
548
+ Orthogonalization between temporal filters and confound removal is based on
549
+ suggestions in :footcite:`Lindquist407676`.
550
+
551
+ References
552
+ ----------
553
+ .. footbibliography::
554
+
555
+ See Also
556
+ --------
557
+ nilearn.image.clean_img
558
+ """
559
+ # Raise warning for some parameter combinations when confounds present
560
+ if confounds is not None:
561
+ _check_signal_parameters(detrend, standardize_confounds)
562
+
563
+ # Read confounds and signals
564
+ signals, runs, confounds = _sanitize_inputs(
565
+ signals, runs, confounds, sample_mask, ensure_finite
566
+ )
567
+ use_filter = _check_filter_parameters(filter, low_pass, high_pass, t_r)
568
+ # Restrict the signal to the orthogonal of the confounds
569
+ if runs is not None:
570
+ signals = _process_runs(signals, runs, detrend, standardize,
571
+ confounds, low_pass, high_pass, t_r)
572
+
573
+ # Detrend
574
+ # Detrend and filtering should apply to confounds, if confound presents
575
+ # keep filters orthogonal (according to Lindquist et al. (2018))
576
+ if detrend:
577
+ mean_signals = signals.mean(axis=0)
578
+ signals = _standardize(signals, standardize=False, detrend=detrend)
579
+ if confounds is not None:
580
+ confounds = _standardize(confounds, standardize=False,
581
+ detrend=detrend)
582
+ if use_filter:
583
+ # check if filter parameters are satisfied and filter according to the strategy
584
+ signals, confounds = _filter_signal(signals, confounds, filter,
585
+ low_pass, high_pass, t_r)
586
+
587
+ # Remove confounds
588
+ if confounds is not None:
589
+ confounds = _standardize(confounds, standardize=standardize_confounds,
590
+ detrend=False)
591
+ if not standardize_confounds:
592
+ # Improve numerical stability by controlling the range of
593
+ # confounds. We don't rely on _standardize as it removes any
594
+ # constant contribution to confounds.
595
+ confound_max = np.max(np.abs(confounds), axis=0)
596
+ confound_max[confound_max == 0] = 1
597
+ confounds /= confound_max
598
+
599
+ # Pivoting in qr decomposition was added in scipy 0.10
600
+ Q, R, _ = linalg.qr(confounds, mode='economic', pivoting=True)
601
+ Q = Q[:, np.abs(np.diag(R)) > np.finfo(np.float64).eps * 100.]
602
+ signals -= Q.dot(Q.T).dot(signals)
603
+
604
+ # Standardize
605
+ if detrend and (standardize == 'psc'):
606
+ # If the signal is detrended, we have to know the original mean
607
+ # signal to calculate the psc.
608
+ signals = _standardize(signals + mean_signals, standardize=standardize,
609
+ detrend=False)
610
+ else:
611
+ signals = _standardize(signals, standardize=standardize,
612
+ detrend=False)
613
+
614
+ return signals
615
+
616
+
617
+ def _filter_signal(signals, confounds, filter, low_pass, high_pass, t_r):
618
+ '''Filter signal based on provided strategy.'''
619
+ if filter == 'butterworth':
620
+ signals = butterworth(signals, sampling_rate=1. / t_r,
621
+ low_pass=low_pass, high_pass=high_pass)
622
+ if confounds is not None:
623
+ # Apply low- and high-pass filters to keep filters orthogonal
624
+ # (according to Lindquist et al. (2018))
625
+ confounds = butterworth(confounds, sampling_rate=1. / t_r,
626
+ low_pass=low_pass, high_pass=high_pass)
627
+ elif filter == 'cosine':
628
+ from .glm.first_level.design_matrix import _cosine_drift
629
+ frame_times = np.arange(signals.shape[0]) * t_r
630
+ cosine_drift = _cosine_drift(high_pass, frame_times)
631
+ if confounds is None:
632
+ confounds = cosine_drift.copy()
633
+ else:
634
+ confounds = np.hstack((confounds, cosine_drift))
635
+ return signals, confounds
636
+
637
+
638
+ def _process_runs(signals, runs, detrend, standardize, confounds,
639
+ low_pass, high_pass, t_r):
640
+ """Process each run independently."""
641
+ if len(runs) != len(signals):
642
+ raise ValueError(
643
+ (
644
+ 'The length of the run vector (%i) '
645
+ 'does not match the length of the signals (%i)'
646
+ ) % (len(runs), len(signals))
647
+ )
648
+ for run in np.unique(runs):
649
+ run_confounds = None
650
+ if confounds is not None:
651
+ run_confounds = confounds[runs == run]
652
+ signals[runs == run, :] = \
653
+ clean(signals[runs == run],
654
+ detrend=detrend, standardize=standardize,
655
+ confounds=run_confounds, low_pass=low_pass,
656
+ high_pass=high_pass, t_r=t_r)
657
+ return signals
658
+
659
+
660
+ def _sanitize_inputs(signals, runs, confounds, sample_mask, ensure_finite):
661
+ """Clean up signals and confounds before processing."""
662
+ n_time = len(signals) # original length of the signal
663
+ n_runs, runs = _sanitize_runs(n_time, runs)
664
+ confounds = _sanitize_confounds(n_time, n_runs, confounds)
665
+ sample_mask = _sanitize_sample_mask(n_time, n_runs, runs, sample_mask)
666
+ signals = _sanitize_signals(signals, ensure_finite)
667
+
668
+ if sample_mask is None:
669
+ return signals, runs, confounds
670
+
671
+ if confounds is not None:
672
+ confounds = confounds[sample_mask, :]
673
+ if runs is not None:
674
+ runs = runs[sample_mask]
675
+ return signals[sample_mask, :], runs, confounds
676
+
677
+
678
+ def _sanitize_confounds(n_time, n_runs, confounds):
679
+ """Check confounds are the correct type. When passing multiple runs, ensure the
680
+ number of runs matches the sets of confound regressors.
681
+ """
682
+ if confounds is None:
683
+ return confounds
684
+
685
+ if not isinstance(confounds, (list, tuple, str, np.ndarray, pd.DataFrame)):
686
+ raise TypeError(
687
+ "confounds keyword has an unhandled type: %s" % confounds.__class__
688
+ )
689
+
690
+ if not isinstance(confounds, (list, tuple)):
691
+ confounds = (confounds,)
692
+
693
+ all_confounds = []
694
+ for confound in confounds:
695
+ confound = _sanitize_confound_dtype(n_time, confound)
696
+ all_confounds.append(confound)
697
+ confounds = np.hstack(all_confounds)
698
+ return _ensure_float(confounds)
699
+
700
+
701
+ def _sanitize_sample_mask(n_time, n_runs, runs, sample_mask):
702
+ """Check sample_mask is the right data type and matches the run index."""
703
+ if sample_mask is None:
704
+ return sample_mask
705
+ if not isinstance(sample_mask, (list, tuple, np.ndarray)):
706
+ raise TypeError(
707
+ "sample_mask has an unhandled type: %s" % sample_mask.__class__
708
+ )
709
+ if not isinstance(sample_mask, (list, tuple)):
710
+ sample_mask = (sample_mask, )
711
+
712
+ if len(sample_mask) != n_runs:
713
+ raise ValueError(
714
+ "Number of sample_mask ({}) not matching "
715
+ "number of runs ({}).".format(len(sample_mask), n_runs)
716
+ )
717
+
718
+ if runs is None:
719
+ runs = np.zeros(n_time)
720
+
721
+ # handle multiple runs
722
+ masks = []
723
+ starting_index = 0
724
+ for i, current_mask in enumerate(sample_mask):
725
+ _check_sample_mask_index(i, n_runs, runs, current_mask)
726
+ current_mask += starting_index
727
+ masks.append(current_mask)
728
+ starting_index = sum(i == runs)
729
+ sample_mask = np.hstack(masks)
730
+ return sample_mask
731
+
732
+
733
+ def _check_sample_mask_index(i, n_runs, runs, current_mask):
734
+ """Ensure the index in sample mask is valid."""
735
+ len_run = sum(i == runs)
736
+ len_current_mask = len(current_mask)
737
+ # sample_mask longer than signal
738
+ if len_current_mask > len_run:
739
+ raise IndexError(
740
+ "sample_mask {} of {} is has more timepoints than the current "
741
+ "run ;sample_mask contains {} index but the run has {} "
742
+ "timepoints.".format(
743
+ (i + 1), n_runs, len_current_mask, len_run
744
+ )
745
+ )
746
+ # sample_mask index exceed signal timepoints
747
+ invalid_index = current_mask[current_mask > len_run]
748
+ if invalid_index.size > 0:
749
+ raise IndexError(
750
+ "sample_mask {} of {} contains invalid index {}; "
751
+ "The signal contains {} time points.".format(
752
+ (i + 1), n_runs, invalid_index, len_run
753
+ )
754
+ )
755
+
756
+
757
+ def _sanitize_runs(n_time, runs):
758
+ """Check runs are supplied in the correct format and detect the number of
759
+ unique runs.
760
+ """
761
+ if runs is not None and len(runs) != n_time:
762
+ raise ValueError(
763
+ (
764
+ "The length of the run vector (%i) "
765
+ "does not match the length of the signals (%i)"
766
+ )
767
+ % (len(runs), n_time)
768
+ )
769
+ n_runs = 1 if runs is None else len(np.unique(runs))
770
+ return n_runs, runs
771
+
772
+
773
+ def _sanitize_confound_dtype(n_signal, confound):
774
+ """Check confound is the correct datatype."""
775
+ if isinstance(confound, pd.DataFrame):
776
+ confound = confound.values
777
+ if isinstance(confound, str):
778
+ filename = confound
779
+ confound = csv_to_array(filename)
780
+ if np.isnan(confound.flat[0]):
781
+ # There may be a header
782
+ confound = csv_to_array(filename, skip_header=1)
783
+ if confound.shape[0] != n_signal:
784
+ raise ValueError(
785
+ "Confound signal has an incorrect length"
786
+ "Signal length: {0}; confound length: {1}".format(
787
+ n_signal, confound.shape[0])
788
+ )
789
+ elif isinstance(confound, np.ndarray):
790
+ if confound.ndim == 1:
791
+ confound = np.atleast_2d(confound).T
792
+ elif confound.ndim != 2:
793
+ raise ValueError("confound array has an incorrect number "
794
+ "of dimensions: %d" % confound.ndim)
795
+ if confound.shape[0] != n_signal:
796
+ raise ValueError(
797
+ "Confound signal has an incorrect length"
798
+ "Signal length: {0}; confound length: {1}".format(
799
+ n_signal, confound.shape[0])
800
+ )
801
+
802
+ else:
803
+ raise TypeError("confound has an unhandled type: %s"
804
+ % confound.__class__)
805
+ return confound
806
+
807
+
808
+ def _check_filter_parameters(filter, low_pass, high_pass, t_r):
809
+ """Check all filter related parameters are set correctly."""
810
+ if not filter:
811
+ if any(isinstance(item, float) for item in [low_pass, high_pass]):
812
+ warnings.warn(
813
+ "No filter type selected but cutoff frequency provided."
814
+ "Will not perform filtering."
815
+ )
816
+ return False
817
+ elif filter in availiable_filters:
818
+ if filter == 'cosine' and not all(isinstance(item, float)
819
+ for item in [t_r, high_pass]):
820
+ raise ValueError(
821
+ "Repetition time (t_r) and low cutoff frequency "
822
+ "(high_pass) must be specified for cosine filtering."
823
+ "t_r='{0}', high_pass='{1}'".format(t_r, high_pass)
824
+ )
825
+ if filter == 'butterworth':
826
+ if all(item is None for item in [low_pass, high_pass, t_r]):
827
+ # Butterworth was switched off by passing
828
+ # None to all these parameters
829
+ return False
830
+ if t_r is None:
831
+ raise ValueError("Repetition time (t_r) must be specified for "
832
+ "butterworth filtering.")
833
+ if any(isinstance(item, bool) for item in [low_pass, high_pass]):
834
+ raise TypeError(
835
+ "high/low pass must be float or None but you provided "
836
+ "high_pass='{0}', low_pass='{1}'"
837
+ .format(high_pass, low_pass)
838
+ )
839
+ return True
840
+ else:
841
+ raise ValueError("Filter method {} not implemented.".format(filter))
842
+
843
+
844
+ def _sanitize_signals(signals, ensure_finite):
845
+ """Ensure signals are in the correct state."""
846
+ if not isinstance(ensure_finite, bool):
847
+ raise ValueError("'ensure_finite' must be boolean type True or False "
848
+ "but you provided ensure_finite={0}"
849
+ .format(ensure_finite))
850
+ signals = signals.copy()
851
+ if not isinstance(signals, np.ndarray):
852
+ signals = as_ndarray(signals)
853
+ if ensure_finite:
854
+ mask = np.logical_not(np.isfinite(signals))
855
+ if mask.any():
856
+ signals[mask] = 0
857
+ return _ensure_float(signals)
858
+
859
+
860
+ def _check_signal_parameters(detrend, standardize_confounds):
861
+ """Raise warning if the combination is illogical"""
862
+ if not detrend and not standardize_confounds:
863
+ warnings.warn("When confounds are provided, one must perform detrend "
864
+ "and/or standardize confounds. You provided "
865
+ "detrend={0}, standardize_confounds={1}. If confounds "
866
+ "were not standardized or demeaned before passing to "
867
+ "signal.clean signal will not be correctly "
868
+ "cleaned. ".format(
869
+ detrend, standardize_confounds)
870
+ )
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/nilearn/version.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # *- encoding: utf-8 -*-
2
+ """
3
+ nilearn version, required package versions, and utilities for checking
4
+ """
5
+ # Author: Loic Esteve, Ben Cipollini
6
+ # License: simplified BSD
7
+
8
+ # PEP0440 compatible formatted version, see:
9
+ # https://www.python.org/dev/peps/pep-0440/
10
+ #
11
+ # Generic release markers:
12
+ # X.Y
13
+ # X.Y.Z # For bugfix releases
14
+ #
15
+ # Admissible pre-release markers:
16
+ # X.YaN # Alpha release
17
+ # X.YbN # Beta release
18
+ # X.YrcN # Release Candidate
19
+ # X.Y # Final release
20
+ #
21
+ # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
22
+ # 'X.Y.dev0' is the canonical version of 'X.Y.dev'
23
+ #
24
+ __version__ = '0.9.1'
25
+
26
+ _NILEARN_INSTALL_MSG = 'See %s for installation information.' % (
27
+ 'http://nilearn.github.io/introduction.html#installation')
28
+
29
+ import operator
30
+
31
+ # This is a tuple to preserve order, so that dependencies are checked
32
+ # in some meaningful order (more => less 'core').
33
+ REQUIRED_MODULE_METADATA = (
34
+ ('numpy', {
35
+ 'min_version': '1.16',
36
+ 'required_at_installation': True,
37
+ 'install_info': _NILEARN_INSTALL_MSG}),
38
+ ('scipy', {
39
+ 'min_version': '1.2',
40
+ 'required_at_installation': True,
41
+ 'install_info': _NILEARN_INSTALL_MSG}),
42
+ ('sklearn', {
43
+ 'min_version': '0.21',
44
+ 'required_at_installation': True,
45
+ 'install_info': _NILEARN_INSTALL_MSG}),
46
+ ('joblib', {
47
+ 'min_version': '0.12',
48
+ 'required_at_installation': True,
49
+ 'install_info': _NILEARN_INSTALL_MSG}),
50
+ ('nibabel', {
51
+ 'min_version': '2.5',
52
+ 'required_at_installation': False}),
53
+ ('pandas', {
54
+ 'min_version': '0.24.0',
55
+ 'required_at_installation': True,
56
+ 'install_info': _NILEARN_INSTALL_MSG}),
57
+ ("requests", {
58
+ "min_version": "2",
59
+ "required_at_installation": False
60
+ })
61
+ )
62
+
63
+ OPTIONAL_MATPLOTLIB_MIN_VERSION = '2.0'
64
+
65
+
66
+ def _import_module_with_version_check(
67
+ module_name,
68
+ minimum_version,
69
+ install_info=None):
70
+ """Check that module is installed with a recent enough version."""
71
+ try:
72
+ module = __import__(module_name)
73
+ except ImportError as exc:
74
+ user_friendly_info = ('Module "{0}" could not be found. {1}').format(
75
+ module_name,
76
+ install_info or 'Please install it properly to use nilearn.')
77
+ exc.args += (user_friendly_info,)
78
+ # Necessary for Python 3 because the repr/str of ImportError
79
+ # objects was changed in Python 3
80
+ if hasattr(exc, 'msg'):
81
+ exc.msg += '. ' + user_friendly_info
82
+ raise
83
+
84
+ # Avoid choking on modules with no __version__ attribute
85
+ module_version = getattr(module, '__version__', '0.0.0')
86
+
87
+ version_too_old = (
88
+ not _compare_version(module_version, '>=', minimum_version)
89
+ )
90
+
91
+ if version_too_old:
92
+ message = (
93
+ 'A {module_name} version of at least {minimum_version} '
94
+ 'is required to use nilearn. {module_version} was found. '
95
+ 'Please upgrade {module_name}').format(
96
+ module_name=module_name,
97
+ minimum_version=minimum_version,
98
+ module_version=module_version)
99
+
100
+ raise ImportError(message)
101
+
102
+ return module
103
+
104
+
105
+ VERSION_OPERATORS = {
106
+ "==": operator.eq,
107
+ "!=": operator.ne,
108
+ ">": operator.gt,
109
+ ">=": operator.ge,
110
+ "<": operator.lt,
111
+ "<=": operator.le,
112
+ }
113
+
114
+
115
+ def _compare_version(version_a, operator, version_b):
116
+ """Compare two version strings via a user-specified operator.
117
+
118
+ ``distutils`` has been deprecated since Python 3.10 and is scheduled
119
+ for removal from the standard library with the release of Python 3.12.
120
+ For version comparisons, we use setuptools's `parse_version` if available.
121
+
122
+ Note: This function is inspired from MNE-Python.
123
+ See https://github.com/mne-tools/mne-python/blob/main/mne/fixes.py
124
+
125
+ Parameters
126
+ ----------
127
+ version_a : :obj:`str`
128
+ First version string.
129
+
130
+ operator : {'==', '!=','>', '<', '>=', '<='}
131
+ Operator to compare ``version_a`` and ``version_b`` in the form of
132
+ ``version_a operator version_b``.
133
+
134
+ version_b : :obj:`str`
135
+ Second version string.
136
+
137
+ Returns
138
+ -------
139
+ result : :obj:`bool`
140
+ The result of the version comparison.
141
+
142
+ """
143
+ # TODO:
144
+ # The setuptools doc encourages the use of importlib.metadata instead
145
+ # of pkg_resources. However, importlib.metadata is only part of the stdlib
146
+ # for Python >= 3.8. When Nilearn will only support Python >= 3.8,
147
+ # please consider changing the following line to:
148
+ # from importlib.metadata import version as parse
149
+ try:
150
+ from pkg_resources import parse_version as parse # noqa:F401
151
+ except ImportError:
152
+ from distutils.version import LooseVersion as parse # noqa:F401
153
+ if operator not in VERSION_OPERATORS:
154
+ raise ValueError(
155
+ "'_compare_version' received an unexpected "
156
+ "operator {0}.".format(operator)
157
+ )
158
+ return VERSION_OPERATORS[operator](parse(version_a), parse(version_b))
159
+
160
+
161
+ def _check_module_dependencies(is_nilearn_installing=False):
162
+ """Throw an exception if nilearn dependencies are not installed.
163
+
164
+ Parameters
165
+ ----------
166
+ is_nilearn_installing: boolean
167
+ if True, only error on missing packages that cannot be auto-installed.
168
+ if False, error on any missing package.
169
+
170
+ Throws
171
+ -------
172
+ ImportError
173
+ """
174
+
175
+ for (module_name, module_metadata) in REQUIRED_MODULE_METADATA:
176
+ if not (is_nilearn_installing and
177
+ not module_metadata['required_at_installation']):
178
+ # Skip check only when installing and it's a module that
179
+ # will be auto-installed.
180
+ _import_module_with_version_check(
181
+ module_name=module_name,
182
+ minimum_version=module_metadata['min_version'],
183
+ install_info=module_metadata.get('install_info'))
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/LICENSE ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2020-2021, Jiri Borovec
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without modification,
5
+ are permitted provided that the following conditions are met:
6
+
7
+ 1. Redistributions of source code must retain the above copyright notice,
8
+ this list of conditions and the following disclaimer.
9
+
10
+ 2. Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ 3. Neither the name of the copyright holder nor the names of its contributors
15
+ may be used to endorse or promote products derived from this software without
16
+ specific prior written permission.
17
+
18
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20
+ THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
22
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24
+ OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
26
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/METADATA ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: pyDeprecate
3
+ Version: 0.3.1
4
+ Summary: Deprecation tooling
5
+ Home-page: https://borda.github.io/pyDeprecate
6
+ Author: Jiri Borovec
7
+ Author-email: jiri.borovec@fel.cvut.cz
8
+ License: MIT
9
+ Project-URL: Source Code, https://github.com/Borda/pyDeprecate
10
+ Keywords: python,development,deprecation
11
+ Platform: UNKNOWN
12
+ Classifier: Environment :: Console
13
+ Classifier: Natural Language :: English
14
+ Classifier: Development Status :: 3 - Alpha
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.6
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Requires-Python: >=3.6
23
+ Description-Content-Type: text/markdown
24
+ License-File: LICENSE
25
+
26
+ # pyDeprecate
27
+
28
+ **Simple tooling for marking deprecated functions or classes and re-routing to the new successors' instance.**
29
+
30
+ [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pyDeprecate)](https://pypi.org/project/pyDeprecate/)
31
+ [![PyPI Status](https://badge.fury.io/py/pyDeprecate.svg)](https://badge.fury.io/py/pyDeprecate)
32
+ [![PyPI Status](https://pepy.tech/badge/pyDeprecate)](https://pepy.tech/project/pyDeprecate)
33
+ [![Conda](https://img.shields.io/conda/v/conda-forge/pyDeprecate?label=conda&color=success)](https://anaconda.org/conda-forge/pyDeprecate)
34
+ ![Conda](https://img.shields.io/conda/dn/conda-forge/pyDeprecate)
35
+ [![license](https://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/Borda/pyDeprecate/blob/master/LICENSE)
36
+
37
+ [![CI testing](https://github.com/Borda/pyDeprecate/actions/workflows/ci_testing.yml/badge.svg?tag=0.3.1)](https://github.com/Borda/pyDeprecate/actions/workflows/ci_testing.yml)
38
+ [![Code formatting](https://github.com/Borda/pyDeprecate/actions/workflows/code-format.yml/badge.svg?tag=0.3.1)](https://github.com/Borda/pyDeprecate/actions/workflows/code-format.yml)
39
+ [![codecov](https://codecov.io/gh/Borda/pyDeprecate/release/0.3.1/graph/badge.svg?token=BG7RQ86UJA)](https://codecov.io/gh/Borda/pyDeprecate)
40
+ [![CodeFactor](https://www.codefactor.io/repository/github/borda/pydeprecate/badge)](https://www.codefactor.io/repository/github/borda/pydeprecate)
41
+ [![pre-commit.ci status](https://results.pre-commit.ci/badge/github/Borda/pyDeprecate/main.svg)](https://results.pre-commit.ci/latest/github/Borda/pyDeprecate/main)
42
+
43
+ <!--
44
+ [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Borda/pyDeprecate.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Borda/pyDeprecate/context:python)
45
+ -->
46
+
47
+ ---
48
+
49
+ The common use-case is moving your functions across codebase or outsourcing some functionalities to new packages.
50
+ For most of these cases, you want to hold some compatibility, so you cannot simply remove past function, and also for some time you want to warn users that functionality they have been using is moved and not it is deprecated in favor of another function (which shall be used instead) and soon it will be removed completely.
51
+
52
+ Another good aspect is to do not overwhelm a user with too many warnings, so per function/class, this warning is raised only N times in the preferable stream (warning, logger, etc.).
53
+
54
+ ## Installation
55
+
56
+ Simple installation from PyPI:
57
+ ```bash
58
+ pip install pyDeprecate
59
+ ```
60
+
61
+ <details>
62
+ <summary>Other installations</summary>
63
+
64
+ Simply install with pip from source:
65
+ ```bash
66
+ pip install https://github.com/Borda/pyDeprecate/archive/main.zip
67
+ ```
68
+
69
+ </details>
70
+
71
+ ## Use-cases
72
+
73
+ The functionality is kept simple and all default shall be reasonable, but still you can do extra customization such as:
74
+
75
+ * define user warning message and preferable stream
76
+ * extended argument mapping to target function/method
77
+ * define deprecation logic for self arguments
78
+ * specify warning count per:
79
+ - called function (for func deprecation)
80
+ - used arguments (for argument deprecation)
81
+ * define conditional skip (e.g. depending on some package version)
82
+
83
+ In particular the target values (cases):
84
+
85
+ - _None_ - raise only warning message (ignore all argument mapping)
86
+ - _True_ - deprecation some argument of itself (argument mapping shall be specified)
87
+ - _Callable_ - forward call to new methods (optional also argument mapping or extras)
88
+
89
+ ### Simple function forwarding
90
+
91
+ It is very straight forward, you forward your function call to new function and all arguments are mapped:
92
+
93
+ ```python
94
+ def base_sum(a: int = 0, b: int = 3) -> int:
95
+ """My new function anywhere in codebase or even other package."""
96
+ return a + b
97
+
98
+ # ---------------------------
99
+
100
+ from deprecate import deprecated
101
+
102
+ @deprecated(target=base_sum, deprecated_in="0.1", remove_in="0.5")
103
+ def depr_sum(a: int, b: int = 5) -> int:
104
+ """
105
+ My deprecated function which now has empty body
106
+ as all calls are routed to the new function.
107
+ """
108
+ pass # or you can just place docstring as one above
109
+
110
+ # call this function will raise deprecation warning:
111
+ # The `depr_sum` was deprecated since v0.1 in favor of `__main__.base_sum`.
112
+ # It will be removed in v0.5.
113
+ print(depr_sum(1, 2))
114
+ ```
115
+ <details>
116
+ <summary>sample output:</summary>
117
+ ```
118
+ 3
119
+ ```
120
+ </details>
121
+
122
+ ### Advanced target argument mapping
123
+
124
+ Another more complex example is using argument mapping is:
125
+
126
+
127
+ <details>
128
+ <summary>Advanced example</summary>
129
+
130
+ ```python
131
+ import logging
132
+ from sklearn.metrics import accuracy_score
133
+ from deprecate import deprecated, void
134
+
135
+ @deprecated(
136
+ # use standard sklearn accuracy implementation
137
+ target=accuracy_score,
138
+ # custom warning stream
139
+ stream=logging.warning,
140
+ # number or warnings per lifetime (with -1 for always_
141
+ num_warns=5,
142
+ # custom message template
143
+ template_mgs="`%(source_name)s` was deprecated, use `%(target_path)s`",
144
+ # as target args are different, define mapping from source to target func
145
+ args_mapping={'preds': 'y_pred', 'target': 'y_true', 'blabla': None}
146
+ )
147
+ def depr_accuracy(preds: list, target: list, blabla: float) -> float:
148
+ """My deprecated function which is mapping to sklearn accuracy."""
149
+ # to stop complain your IDE about unused argument you can use void/empty function
150
+ return void(preds, target, blabla)
151
+
152
+ # call this function will raise deprecation warning:
153
+ # WARNING:root:`depr_accuracy` was deprecated, use `sklearn.metrics.accuracy_score`
154
+ print(depr_accuracy([1, 0, 1, 2], [0, 1, 1, 2], 1.23))
155
+ ```
156
+ sample output:
157
+ ```
158
+ 0.5
159
+ ```
160
+
161
+ </details>
162
+
163
+
164
+ ### Deprecation warning only
165
+
166
+ Base use-case with no forwarding and just raising warning :
167
+
168
+ ```python
169
+ from deprecate import deprecated
170
+
171
+ @deprecated(target=None, deprecated_in="0.1", remove_in="0.5")
172
+ def my_sum(a: int, b: int = 5) -> int:
173
+ """My deprecated function which still has to have implementation."""
174
+ return a + b
175
+
176
+ # call this function will raise deprecation warning:
177
+ # The `my_sum` was deprecated since v0.1. It will be removed in v0.5.
178
+ print(my_sum(1, 2))
179
+ ```
180
+ <details>
181
+ <summary>sample output:</summary>
182
+ ```
183
+ 3
184
+ ```
185
+ </details>
186
+
187
+ ### Self argument mapping
188
+
189
+ We also support deprecation and argument mapping for the function itself:
190
+
191
+ ```python
192
+ from deprecate import deprecated
193
+
194
+ @deprecated(
195
+ # define as depreaction some self argument - mapping
196
+ target=True, args_mapping={'coef': 'new_coef'},
197
+ # common version info
198
+ deprecated_in="0.2", remove_in="0.4",
199
+ )
200
+ def any_pow(base: float, coef: float = 0, new_coef: float = 0) -> float:
201
+ """My function with deprecated argument `coef` mapped to `new_coef`."""
202
+ return base ** new_coef
203
+
204
+ # call this function will raise deprecation warning:
205
+ # The `any_pow` uses deprecated arguments: `coef` -> `new_coef`.
206
+ # They were deprecated since v0.2 and will be removed in v0.4.
207
+ print(any_pow(2, 3))
208
+ ```
209
+ <details>
210
+ <summary>sample output:</summary>
211
+ ```
212
+ 8
213
+ ```
214
+ </details>
215
+
216
+ ### Multiple deprecation levels
217
+
218
+ Eventually you can set multiple deprecation levels via chaining deprecation arguments as each could be deprecated in another version:
219
+
220
+ <details>
221
+ <summary>Multiple deprecation levels</summary>
222
+
223
+ ```python
224
+ from deprecate import deprecated
225
+
226
+ @deprecated(
227
+ True, "0.3", "0.6", args_mapping=dict(c1='nc1'),
228
+ template_mgs="Depr: v%(deprecated_in)s rm v%(remove_in)s for args: %(argument_map)s."
229
+ )
230
+ @deprecated(
231
+ True, "0.4", "0.7", args_mapping=dict(nc1='nc2'),
232
+ template_mgs="Depr: v%(deprecated_in)s rm v%(remove_in)s for args: %(argument_map)s."
233
+ )
234
+ def any_pow(base, c1: float = 0, nc1: float = 0, nc2: float = 2) -> float:
235
+ return base**nc2
236
+
237
+ # call this function will raise deprecation warning:
238
+ # DeprecationWarning('Depr: v0.3 rm v0.6 for args: `c1` -> `nc1`.')
239
+ # DeprecationWarning('Depr: v0.4 rm v0.7 for args: `nc1` -> `nc2`.')
240
+ print(any_pow(2, 3))
241
+ ```
242
+ sample output:
243
+ ```
244
+ 8
245
+ ```
246
+
247
+ </details>
248
+
249
+ ### Conditional skip
250
+
251
+ Conditional skip of which can be used for mapping between different target functions depending on additional input such as package version
252
+
253
+ ```python
254
+ from deprecate import deprecated
255
+
256
+ FAKE_VERSION = 1
257
+
258
+ def version_greater_1():
259
+ return FAKE_VERSION > 1
260
+
261
+ @deprecated(
262
+ True, "0.3", "0.6", args_mapping=dict(c1='nc1'), skip_if=version_greater_1
263
+ )
264
+ def skip_pow(base, c1: float = 1, nc1: float = 1) -> float:
265
+ return base**(c1 - nc1)
266
+
267
+ # call this function will raise deprecation warning
268
+ print(skip_pow(2, 3))
269
+
270
+ # change the fake versions
271
+ FAKE_VERSION = 2
272
+
273
+ # Will not raise any warning
274
+ print(skip_pow(2, 3))
275
+ ```
276
+ <details>
277
+ <summary>sample output:</summary>
278
+ ```
279
+ 0.25
280
+ 4
281
+ ```
282
+ </details>
283
+
284
+ This can be beneficial with multiple deprecation levels shown above...
285
+
286
+ ### Class deprecation
287
+
288
+ This case can be quite complex as you may deprecate just some methods, here we show full class deprecation:
289
+
290
+ ```python
291
+ class NewCls:
292
+ """My new class anywhere in the codebase or other package."""
293
+
294
+ def __init__(self, c: float, d: str = "abc"):
295
+ self.my_c = c
296
+ self.my_d = d
297
+
298
+ # ---------------------------
299
+
300
+ from deprecate import deprecated, void
301
+
302
+ class PastCls(NewCls):
303
+ """
304
+ The deprecated class shall be inherited from the successor class
305
+ to hold all methods.
306
+ """
307
+
308
+ @deprecated(target=NewCls, deprecated_in="0.2", remove_in="0.4")
309
+ def __init__(self, c: int, d: str = "efg"):
310
+ """
311
+ You place the decorator around __init__ as you want
312
+ to warn user just at the time of creating object.
313
+ """
314
+ return void(c, d)
315
+
316
+ # call this function will raise deprecation warning:
317
+ # The `PastCls` was deprecated since v0.2 in favor of `__main__.NewCls`.
318
+ # It will be removed in v0.4.
319
+ inst = PastCls(7)
320
+ print(inst.my_c) # returns: 7
321
+ print(inst.my_d) # returns: "efg"
322
+ ```
323
+ <details>
324
+ <summary>sample output:</summary>
325
+ ```
326
+ 7
327
+ efg
328
+ ```
329
+ </details>
330
+
331
+ ## Contribution
332
+
333
+ Have you faced this in past or even now, do you have good ideas for improvement, all is welcome!
334
+
335
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pyDeprecate-0.3.1.dist-info/RECORD ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ deprecate/__init__.py,sha256=37Y-nppyEVah9PsRW7D7MpOK3tcD0De-skCst_ajOdo,546
2
+ deprecate/__pycache__/__init__.cpython-38.pyc,,
3
+ deprecate/__pycache__/deprecation.cpython-38.pyc,,
4
+ deprecate/__pycache__/utils.cpython-38.pyc,,
5
+ deprecate/deprecation.py,sha256=FtZNg7SjB-ZTP2cs6NChzR6joIvfB8VWUWNdvDDicTk,12622
6
+ deprecate/utils.py,sha256=5nyZjgm8rxgpaQUwLWTebTt4cE9HE4nx2HpQDkIFmTU,1869
7
+ pyDeprecate-0.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
8
+ pyDeprecate-0.3.1.dist-info/LICENSE,sha256=5Ekt3qiROL3RpcpWxvJ20J4roFW1qjyfBndPq1rDmEw,1488
9
+ pyDeprecate-0.3.1.dist-info/METADATA,sha256=ET6d0pHoG_SezgJi1RLzqZDoXi8QLOvnvBBk_bjzo1U,10837
10
+ pyDeprecate-0.3.1.dist-info/RECORD,,
11
+ pyDeprecate-0.3.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
+ pyDeprecate-0.3.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
13
+ pyDeprecate-0.3.1.dist-info/top_level.txt,sha256=JoS3qR1D0-NtCnKrbT4roM65b8LE7ETaLj3AOdlTU1E,10