prasb commited on
Commit
7667c4d
·
verified ·
1 Parent(s): 45f263c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/_distutils_hack/__init__.py +220 -0
  3. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/_distutils_hack/override.py +1 -0
  4. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__init__.py +56 -0
  5. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/api.py +608 -0
  6. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cd.py +340 -0
  7. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/legacy.py +95 -0
  8. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/models.py +392 -0
  9. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/utils.py +342 -0
  10. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/version.py +6 -0
  11. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/__init__.py +18 -0
  12. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/deprecation.py +306 -0
  13. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/utils.py +53 -0
  14. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docs/conf.py +83 -0
  15. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/INSTALLER +1 -0
  16. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/LICENSE +21 -0
  17. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/REQUESTED +0 -0
  18. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/INSTALLER +1 -0
  19. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/METADATA +56 -0
  20. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/RECORD +22 -0
  21. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/WHEEL +4 -0
  22. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/INSTALLER +1 -0
  23. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/LICENSE +27 -0
  24. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/METADATA +17 -0
  25. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/RECORD +15 -0
  26. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/REQUESTED +0 -0
  27. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/WHEEL +6 -0
  28. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/top_level.txt +1 -0
  29. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/INSTALLER +1 -0
  30. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/LICENSE +24 -0
  31. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/RECORD +113 -0
  32. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/REQUESTED +0 -0
  33. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/WHEEL +5 -0
  34. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/entry_points.txt +4 -0
  35. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/__init__.py +65 -0
  36. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/_version.py +21 -0
  37. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/py.typed +0 -0
  38. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/LICENSE +13 -0
  39. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/METADATA +131 -0
  40. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/RECORD +20 -0
  41. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/REQUESTED +0 -0
  42. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/WHEEL +6 -0
  43. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/top_level.txt +1 -0
  44. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/__main__.py +24 -0
  45. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/py.typed +4 -0
  46. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/INSTALLER +1 -0
  47. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/METADATA +20 -0
  48. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/RECORD +100 -0
  49. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/REQUESTED +0 -0
  50. my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/WHEEL +6 -0
.gitattributes CHANGED
@@ -176,3 +176,6 @@ my_container_sandbox/workspace/anaconda3/pkgs/sqlite-3.36.0-hc218d9a_0.conda fil
176
  my_container_sandbox/workspace/anaconda3/pkgs/certifi-2021.5.30-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
177
  my_container_sandbox/workspace/anaconda3/pkgs/ncurses-6.2-he6710b0_1.conda filter=lfs diff=lfs merge=lfs -text
178
  my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
 
 
 
 
176
  my_container_sandbox/workspace/anaconda3/pkgs/certifi-2021.5.30-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
177
  my_container_sandbox/workspace/anaconda3/pkgs/ncurses-6.2-he6710b0_1.conda filter=lfs diff=lfs merge=lfs -text
178
  my_container_sandbox/workspace/anaconda3/pkgs/pip-21.1.3-py39h06a4308_0.conda filter=lfs diff=lfs merge=lfs -text
179
+ my_container_sandbox/workspace/anaconda3/pkgs/certifi-2024.2.2-pyhd8ed1ab_0.conda filter=lfs diff=lfs merge=lfs -text
180
+ my_container_sandbox/workspace/anaconda3/pkgs/sqlite-3.41.2-h5eee18b_0.conda filter=lfs diff=lfs merge=lfs -text
181
+ my_container_sandbox/workspace/anaconda3/pkgs/brotlipy-0.7.0-py39h27cfd23_1003.conda filter=lfs diff=lfs merge=lfs -text
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/_distutils_hack/__init__.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # don't import any costly modules
2
+ import sys
3
+ import os
4
+
5
+
6
+ def warn_distutils_present():
7
+ if 'distutils' not in sys.modules:
8
+ return
9
+ import warnings
10
+
11
+ warnings.warn(
12
+ "Distutils was imported before Setuptools, but importing Setuptools "
13
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
14
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
15
+ "using distutils directly, ensure that setuptools is installed in the "
16
+ "traditional way (e.g. not an editable install), and/or make sure "
17
+ "that setuptools is always imported before distutils."
18
+ )
19
+
20
+
21
+ def clear_distutils():
22
+ if 'distutils' not in sys.modules:
23
+ return
24
+ import warnings
25
+
26
+ warnings.warn("Setuptools is replacing distutils.")
27
+ mods = [
28
+ name
29
+ for name in sys.modules
30
+ if name == "distutils" or name.startswith("distutils.")
31
+ ]
32
+ for name in mods:
33
+ del sys.modules[name]
34
+
35
+
36
+ def enabled():
37
+ """
38
+ Allow selection of distutils by environment variable.
39
+ """
40
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
41
+ return which == 'local'
42
+
43
+
44
+ def ensure_local_distutils():
45
+ import importlib
46
+
47
+ clear_distutils()
48
+
49
+ # With the DistutilsMetaFinder in place,
50
+ # perform an import to cause distutils to be
51
+ # loaded from setuptools._distutils. Ref #2906.
52
+ with shim():
53
+ importlib.import_module('distutils')
54
+
55
+ # check that submodules load as expected
56
+ core = importlib.import_module('distutils.core')
57
+ assert '_distutils' in core.__file__, core.__file__
58
+ assert 'setuptools._distutils.log' not in sys.modules
59
+
60
+
61
+ def do_override():
62
+ """
63
+ Ensure that the local copy of distutils is preferred over stdlib.
64
+
65
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
66
+ for more motivation.
67
+ """
68
+ if enabled():
69
+ warn_distutils_present()
70
+ ensure_local_distutils()
71
+
72
+
73
+ class _TrivialRe:
74
+ def __init__(self, *patterns):
75
+ self._patterns = patterns
76
+
77
+ def match(self, string):
78
+ return all(pat in string for pat in self._patterns)
79
+
80
+
81
+ class DistutilsMetaFinder:
82
+ def find_spec(self, fullname, path, target=None):
83
+ # optimization: only consider top level modules and those
84
+ # found in the CPython test suite.
85
+ if path is not None and not fullname.startswith('test.'):
86
+ return None
87
+
88
+ method_name = 'spec_for_{fullname}'.format(**locals())
89
+ method = getattr(self, method_name, lambda: None)
90
+ return method()
91
+
92
+ def spec_for_distutils(self):
93
+ if self.is_cpython():
94
+ return None
95
+
96
+ import importlib
97
+ import importlib.abc
98
+ import importlib.util
99
+
100
+ try:
101
+ mod = importlib.import_module('setuptools._distutils')
102
+ except Exception:
103
+ # There are a couple of cases where setuptools._distutils
104
+ # may not be present:
105
+ # - An older Setuptools without a local distutils is
106
+ # taking precedence. Ref #2957.
107
+ # - Path manipulation during sitecustomize removes
108
+ # setuptools from the path but only after the hook
109
+ # has been loaded. Ref #2980.
110
+ # In either case, fall back to stdlib behavior.
111
+ return None
112
+
113
+ class DistutilsLoader(importlib.abc.Loader):
114
+ def create_module(self, spec):
115
+ mod.__name__ = 'distutils'
116
+ return mod
117
+
118
+ def exec_module(self, module):
119
+ pass
120
+
121
+ return importlib.util.spec_from_loader(
122
+ 'distutils', DistutilsLoader(), origin=mod.__file__
123
+ )
124
+
125
+ @staticmethod
126
+ def is_cpython():
127
+ """
128
+ Suppress supplying distutils for CPython (build and tests).
129
+ Ref #2965 and #3007.
130
+ """
131
+ return os.path.isfile('pybuilddir.txt')
132
+
133
+ def spec_for_pip(self):
134
+ """
135
+ Ensure stdlib distutils when running under pip.
136
+ See pypa/pip#8761 for rationale.
137
+ """
138
+ if sys.version_info >= (3, 12) or self.pip_imported_during_build():
139
+ return
140
+ clear_distutils()
141
+ self.spec_for_distutils = lambda: None
142
+
143
+ @classmethod
144
+ def pip_imported_during_build(cls):
145
+ """
146
+ Detect if pip is being imported in a build script. Ref #2355.
147
+ """
148
+ import traceback
149
+
150
+ return any(
151
+ cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
152
+ )
153
+
154
+ @staticmethod
155
+ def frame_file_is_setup(frame):
156
+ """
157
+ Return True if the indicated frame suggests a setup.py file.
158
+ """
159
+ # some frames may not have __file__ (#2940)
160
+ return frame.f_globals.get('__file__', '').endswith('setup.py')
161
+
162
+ def spec_for_sensitive_tests(self):
163
+ """
164
+ Ensure stdlib distutils when running select tests under CPython.
165
+
166
+ python/cpython#91169
167
+ """
168
+ clear_distutils()
169
+ self.spec_for_distutils = lambda: None
170
+
171
+ sensitive_tests = (
172
+ [
173
+ 'test.test_distutils',
174
+ 'test.test_peg_generator',
175
+ 'test.test_importlib',
176
+ ]
177
+ if sys.version_info < (3, 10)
178
+ else [
179
+ 'test.test_distutils',
180
+ ]
181
+ )
182
+
183
+
184
+ for name in DistutilsMetaFinder.sensitive_tests:
185
+ setattr(
186
+ DistutilsMetaFinder,
187
+ f'spec_for_{name}',
188
+ DistutilsMetaFinder.spec_for_sensitive_tests,
189
+ )
190
+
191
+
192
+ DISTUTILS_FINDER = DistutilsMetaFinder()
193
+
194
+
195
+ def add_shim():
196
+ DISTUTILS_FINDER in sys.meta_path or insert_shim()
197
+
198
+
199
+ class shim:
200
+ def __enter__(self):
201
+ insert_shim()
202
+
203
+ def __exit__(self, exc, value, tb):
204
+ _remove_shim()
205
+
206
+
207
+ def insert_shim():
208
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
209
+
210
+
211
+ def _remove_shim():
212
+ try:
213
+ sys.meta_path.remove(DISTUTILS_FINDER)
214
+ except ValueError:
215
+ pass
216
+
217
+
218
+ if sys.version_info < (3, 12):
219
+ # DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632)
220
+ remove_shim = _remove_shim
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/_distutils_hack/override.py ADDED
@@ -0,0 +1 @@
 
 
1
+ __import__('_distutils_hack').do_override()
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf_8 -*-
2
+ """
3
+ Charset-Normalizer
4
+ ~~~~~~~~~~~~~~
5
+ The Real First Universal Charset Detector.
6
+ A library that helps you read text from an unknown charset encoding.
7
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
8
+ All IANA character set names for which the Python core library provides codecs are supported.
9
+
10
+ Basic usage:
11
+ >>> from charset_normalizer import from_bytes
12
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
13
+ >>> best_guess = results.best()
14
+ >>> str(best_guess)
15
+ 'Bсеки човек има право на образование. Oбразованието!'
16
+
17
+ Others methods and usages are available - see the full documentation
18
+ at <https://github.com/Ousret/charset_normalizer>.
19
+ :copyright: (c) 2021 by Ahmed TAHRI
20
+ :license: MIT, see LICENSE for more details.
21
+ """
22
+ import logging
23
+
24
+ from .api import from_bytes, from_fp, from_path, normalize
25
+ from .legacy import (
26
+ CharsetDetector,
27
+ CharsetDoctor,
28
+ CharsetNormalizerMatch,
29
+ CharsetNormalizerMatches,
30
+ detect,
31
+ )
32
+ from .models import CharsetMatch, CharsetMatches
33
+ from .utils import set_logging_handler
34
+ from .version import VERSION, __version__
35
+
36
+ __all__ = (
37
+ "from_fp",
38
+ "from_path",
39
+ "from_bytes",
40
+ "normalize",
41
+ "detect",
42
+ "CharsetMatch",
43
+ "CharsetMatches",
44
+ "CharsetNormalizerMatch",
45
+ "CharsetNormalizerMatches",
46
+ "CharsetDetector",
47
+ "CharsetDoctor",
48
+ "__version__",
49
+ "VERSION",
50
+ "set_logging_handler",
51
+ )
52
+
53
+ # Attach a NullHandler to the top level logger by default
54
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
55
+
56
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/api.py ADDED
@@ -0,0 +1,608 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from os.path import basename, splitext
3
+ from typing import BinaryIO, List, Optional, Set
4
+
5
+ try:
6
+ from os import PathLike
7
+ except ImportError: # pragma: no cover
8
+ PathLike = str # type: ignore
9
+
10
+ from .cd import (
11
+ coherence_ratio,
12
+ encoding_languages,
13
+ mb_encoding_languages,
14
+ merge_coherence_ratios,
15
+ )
16
+ from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
17
+ from .md import mess_ratio
18
+ from .models import CharsetMatch, CharsetMatches
19
+ from .utils import (
20
+ any_specified_encoding,
21
+ iana_name,
22
+ identify_sig_or_bom,
23
+ is_cp_similar,
24
+ is_multi_byte_encoding,
25
+ should_strip_sig_or_bom,
26
+ )
27
+
28
+ # Will most likely be controversial
29
+ # logging.addLevelName(TRACE, "TRACE")
30
+ logger = logging.getLogger("charset_normalizer")
31
+ explain_handler = logging.StreamHandler()
32
+ explain_handler.setFormatter(
33
+ logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
34
+ )
35
+
36
+
37
+ def from_bytes(
38
+ sequences: bytes,
39
+ steps: int = 5,
40
+ chunk_size: int = 512,
41
+ threshold: float = 0.2,
42
+ cp_isolation: List[str] = None,
43
+ cp_exclusion: List[str] = None,
44
+ preemptive_behaviour: bool = True,
45
+ explain: bool = False,
46
+ ) -> CharsetMatches:
47
+ """
48
+ Given a raw bytes sequence, return the best possibles charset usable to render str objects.
49
+ If there is no results, it is a strong indicator that the source is binary/not text.
50
+ By default, the process will extract 5 blocs of 512o each to assess the mess and coherence of a given sequence.
51
+ And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
52
+
53
+ The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
54
+ but never take it for granted. Can improve the performance.
55
+
56
+ You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
57
+ purpose.
58
+
59
+ This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
60
+ By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
61
+ toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
62
+ Custom logging format and handler can be set manually.
63
+ """
64
+
65
+ if not isinstance(sequences, (bytearray, bytes)):
66
+ raise TypeError(
67
+ "Expected object of type bytes or bytearray, got: {0}".format(
68
+ type(sequences)
69
+ )
70
+ )
71
+
72
+ if explain:
73
+ previous_logger_level = logger.level # type: int
74
+ logger.addHandler(explain_handler)
75
+ logger.setLevel(TRACE)
76
+
77
+ length = len(sequences) # type: int
78
+
79
+ if length == 0:
80
+ logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
81
+ if explain:
82
+ logger.removeHandler(explain_handler)
83
+ logger.setLevel(previous_logger_level or logging.WARNING)
84
+ return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
85
+
86
+ if cp_isolation is not None:
87
+ logger.log(
88
+ TRACE,
89
+ "cp_isolation is set. use this flag for debugging purpose. "
90
+ "limited list of encoding allowed : %s.",
91
+ ", ".join(cp_isolation),
92
+ )
93
+ cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
94
+ else:
95
+ cp_isolation = []
96
+
97
+ if cp_exclusion is not None:
98
+ logger.log(
99
+ TRACE,
100
+ "cp_exclusion is set. use this flag for debugging purpose. "
101
+ "limited list of encoding excluded : %s.",
102
+ ", ".join(cp_exclusion),
103
+ )
104
+ cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
105
+ else:
106
+ cp_exclusion = []
107
+
108
+ if length <= (chunk_size * steps):
109
+ logger.log(
110
+ TRACE,
111
+ "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
112
+ steps,
113
+ chunk_size,
114
+ length,
115
+ )
116
+ steps = 1
117
+ chunk_size = length
118
+
119
+ if steps > 1 and length / steps < chunk_size:
120
+ chunk_size = int(length / steps)
121
+
122
+ is_too_small_sequence = len(sequences) < TOO_SMALL_SEQUENCE # type: bool
123
+ is_too_large_sequence = len(sequences) >= TOO_BIG_SEQUENCE # type: bool
124
+
125
+ if is_too_small_sequence:
126
+ logger.log(
127
+ TRACE,
128
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
129
+ length
130
+ ),
131
+ )
132
+ elif is_too_large_sequence:
133
+ logger.log(
134
+ TRACE,
135
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
136
+ length
137
+ ),
138
+ )
139
+
140
+ prioritized_encodings = [] # type: List[str]
141
+
142
+ specified_encoding = (
143
+ any_specified_encoding(sequences) if preemptive_behaviour else None
144
+ ) # type: Optional[str]
145
+
146
+ if specified_encoding is not None:
147
+ prioritized_encodings.append(specified_encoding)
148
+ logger.log(
149
+ TRACE,
150
+ "Detected declarative mark in sequence. Priority +1 given for %s.",
151
+ specified_encoding,
152
+ )
153
+
154
+ tested = set() # type: Set[str]
155
+ tested_but_hard_failure = [] # type: List[str]
156
+ tested_but_soft_failure = [] # type: List[str]
157
+
158
+ fallback_ascii = None # type: Optional[CharsetMatch]
159
+ fallback_u8 = None # type: Optional[CharsetMatch]
160
+ fallback_specified = None # type: Optional[CharsetMatch]
161
+
162
+ results = CharsetMatches() # type: CharsetMatches
163
+
164
+ sig_encoding, sig_payload = identify_sig_or_bom(sequences)
165
+
166
+ if sig_encoding is not None:
167
+ prioritized_encodings.append(sig_encoding)
168
+ logger.log(
169
+ TRACE,
170
+ "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
171
+ len(sig_payload),
172
+ sig_encoding,
173
+ )
174
+
175
+ prioritized_encodings.append("ascii")
176
+
177
+ if "utf_8" not in prioritized_encodings:
178
+ prioritized_encodings.append("utf_8")
179
+
180
+ for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
181
+
182
+ if cp_isolation and encoding_iana not in cp_isolation:
183
+ continue
184
+
185
+ if cp_exclusion and encoding_iana in cp_exclusion:
186
+ continue
187
+
188
+ if encoding_iana in tested:
189
+ continue
190
+
191
+ tested.add(encoding_iana)
192
+
193
+ decoded_payload = None # type: Optional[str]
194
+ bom_or_sig_available = sig_encoding == encoding_iana # type: bool
195
+ strip_sig_or_bom = bom_or_sig_available and should_strip_sig_or_bom(
196
+ encoding_iana
197
+ ) # type: bool
198
+
199
+ if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
200
+ logger.log(
201
+ TRACE,
202
+ "Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
203
+ encoding_iana,
204
+ )
205
+ continue
206
+
207
+ try:
208
+ is_multi_byte_decoder = is_multi_byte_encoding(encoding_iana) # type: bool
209
+ except (ModuleNotFoundError, ImportError):
210
+ logger.log(
211
+ TRACE,
212
+ "Encoding %s does not provide an IncrementalDecoder",
213
+ encoding_iana,
214
+ )
215
+ continue
216
+
217
+ try:
218
+ if is_too_large_sequence and is_multi_byte_decoder is False:
219
+ str(
220
+ sequences[: int(50e4)]
221
+ if strip_sig_or_bom is False
222
+ else sequences[len(sig_payload) : int(50e4)],
223
+ encoding=encoding_iana,
224
+ )
225
+ else:
226
+ decoded_payload = str(
227
+ sequences
228
+ if strip_sig_or_bom is False
229
+ else sequences[len(sig_payload) :],
230
+ encoding=encoding_iana,
231
+ )
232
+ except (UnicodeDecodeError, LookupError) as e:
233
+ if not isinstance(e, LookupError):
234
+ logger.log(
235
+ TRACE,
236
+ "Code page %s does not fit given bytes sequence at ALL. %s",
237
+ encoding_iana,
238
+ str(e),
239
+ )
240
+ tested_but_hard_failure.append(encoding_iana)
241
+ continue
242
+
243
+ similar_soft_failure_test = False # type: bool
244
+
245
+ for encoding_soft_failed in tested_but_soft_failure:
246
+ if is_cp_similar(encoding_iana, encoding_soft_failed):
247
+ similar_soft_failure_test = True
248
+ break
249
+
250
+ if similar_soft_failure_test:
251
+ logger.log(
252
+ TRACE,
253
+ "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
254
+ encoding_iana,
255
+ encoding_soft_failed,
256
+ )
257
+ continue
258
+
259
+ r_ = range(
260
+ 0 if not bom_or_sig_available else len(sig_payload),
261
+ length,
262
+ int(length / steps),
263
+ )
264
+
265
+ multi_byte_bonus = (
266
+ is_multi_byte_decoder
267
+ and decoded_payload is not None
268
+ and len(decoded_payload) < length
269
+ ) # type: bool
270
+
271
+ if multi_byte_bonus:
272
+ logger.log(
273
+ TRACE,
274
+ "Code page %s is a multi byte encoding table and it appear that at least one character "
275
+ "was encoded using n-bytes.",
276
+ encoding_iana,
277
+ )
278
+
279
+ max_chunk_gave_up = int(len(r_) / 4) # type: int
280
+
281
+ max_chunk_gave_up = max(max_chunk_gave_up, 2)
282
+ early_stop_count = 0 # type: int
283
+ lazy_str_hard_failure = False
284
+
285
+ md_chunks = [] # type: List[str]
286
+ md_ratios = []
287
+
288
+ for i in r_:
289
+ if i + chunk_size > length + 8:
290
+ continue
291
+
292
+ cut_sequence = sequences[i : i + chunk_size]
293
+
294
+ if bom_or_sig_available and strip_sig_or_bom is False:
295
+ cut_sequence = sig_payload + cut_sequence
296
+
297
+ try:
298
+ chunk = cut_sequence.decode(
299
+ encoding_iana,
300
+ errors="ignore" if is_multi_byte_decoder else "strict",
301
+ ) # type: str
302
+ except UnicodeDecodeError as e: # Lazy str loading may have missed something there
303
+ logger.log(
304
+ TRACE,
305
+ "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
306
+ encoding_iana,
307
+ str(e),
308
+ )
309
+ early_stop_count = max_chunk_gave_up
310
+ lazy_str_hard_failure = True
311
+ break
312
+
313
+ # multi-byte bad cutting detector and adjustment
314
+ # not the cleanest way to perform that fix but clever enough for now.
315
+ if is_multi_byte_decoder and i > 0 and sequences[i] >= 0x80:
316
+
317
+ chunk_partial_size_chk = min(chunk_size, 16) # type: int
318
+
319
+ if (
320
+ decoded_payload
321
+ and chunk[:chunk_partial_size_chk] not in decoded_payload
322
+ ):
323
+ for j in range(i, i - 4, -1):
324
+ cut_sequence = sequences[j : i + chunk_size]
325
+
326
+ if bom_or_sig_available and strip_sig_or_bom is False:
327
+ cut_sequence = sig_payload + cut_sequence
328
+
329
+ chunk = cut_sequence.decode(encoding_iana, errors="ignore")
330
+
331
+ if chunk[:chunk_partial_size_chk] in decoded_payload:
332
+ break
333
+
334
+ md_chunks.append(chunk)
335
+
336
+ md_ratios.append(mess_ratio(chunk, threshold))
337
+
338
+ if md_ratios[-1] >= threshold:
339
+ early_stop_count += 1
340
+
341
+ if (early_stop_count >= max_chunk_gave_up) or (
342
+ bom_or_sig_available and strip_sig_or_bom is False
343
+ ):
344
+ break
345
+
346
+ # We might want to check the sequence again with the whole content
347
+ # Only if initial MD tests passes
348
+ if (
349
+ not lazy_str_hard_failure
350
+ and is_too_large_sequence
351
+ and not is_multi_byte_decoder
352
+ ):
353
+ try:
354
+ sequences[int(50e3) :].decode(encoding_iana, errors="strict")
355
+ except UnicodeDecodeError as e:
356
+ logger.log(
357
+ TRACE,
358
+ "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
359
+ encoding_iana,
360
+ str(e),
361
+ )
362
+ tested_but_hard_failure.append(encoding_iana)
363
+ continue
364
+
365
+ mean_mess_ratio = (
366
+ sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
367
+ ) # type: float
368
+ if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
369
+ tested_but_soft_failure.append(encoding_iana)
370
+ logger.log(
371
+ TRACE,
372
+ "%s was excluded because of initial chaos probing. Gave up %i time(s). "
373
+ "Computed mean chaos is %f %%.",
374
+ encoding_iana,
375
+ early_stop_count,
376
+ round(mean_mess_ratio * 100, ndigits=3),
377
+ )
378
+ # Preparing those fallbacks in case we got nothing.
379
+ if (
380
+ encoding_iana in ["ascii", "utf_8", specified_encoding]
381
+ and not lazy_str_hard_failure
382
+ ):
383
+ fallback_entry = CharsetMatch(
384
+ sequences, encoding_iana, threshold, False, [], decoded_payload
385
+ )
386
+ if encoding_iana == specified_encoding:
387
+ fallback_specified = fallback_entry
388
+ elif encoding_iana == "ascii":
389
+ fallback_ascii = fallback_entry
390
+ else:
391
+ fallback_u8 = fallback_entry
392
+ continue
393
+
394
+ logger.log(
395
+ TRACE,
396
+ "%s passed initial chaos probing. Mean measured chaos is %f %%",
397
+ encoding_iana,
398
+ round(mean_mess_ratio * 100, ndigits=3),
399
+ )
400
+
401
+ if not is_multi_byte_decoder:
402
+ target_languages = encoding_languages(encoding_iana) # type: List[str]
403
+ else:
404
+ target_languages = mb_encoding_languages(encoding_iana)
405
+
406
+ if target_languages:
407
+ logger.log(
408
+ TRACE,
409
+ "{} should target any language(s) of {}".format(
410
+ encoding_iana, str(target_languages)
411
+ ),
412
+ )
413
+
414
+ cd_ratios = []
415
+
416
+ # We shall skip the CD when its about ASCII
417
+ # Most of the time its not relevant to run "language-detection" on it.
418
+ if encoding_iana != "ascii":
419
+ for chunk in md_chunks:
420
+ chunk_languages = coherence_ratio(
421
+ chunk, 0.1, ",".join(target_languages) if target_languages else None
422
+ )
423
+
424
+ cd_ratios.append(chunk_languages)
425
+
426
+ cd_ratios_merged = merge_coherence_ratios(cd_ratios)
427
+
428
+ if cd_ratios_merged:
429
+ logger.log(
430
+ TRACE,
431
+ "We detected language {} using {}".format(
432
+ cd_ratios_merged, encoding_iana
433
+ ),
434
+ )
435
+
436
+ results.append(
437
+ CharsetMatch(
438
+ sequences,
439
+ encoding_iana,
440
+ mean_mess_ratio,
441
+ bom_or_sig_available,
442
+ cd_ratios_merged,
443
+ decoded_payload,
444
+ )
445
+ )
446
+
447
+ if (
448
+ encoding_iana in [specified_encoding, "ascii", "utf_8"]
449
+ and mean_mess_ratio < 0.1
450
+ ):
451
+ logger.debug(
452
+ "Encoding detection: %s is most likely the one.", encoding_iana
453
+ )
454
+ if explain:
455
+ logger.removeHandler(explain_handler)
456
+ logger.setLevel(previous_logger_level)
457
+ return CharsetMatches([results[encoding_iana]])
458
+
459
+ if encoding_iana == sig_encoding:
460
+ logger.debug(
461
+ "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
462
+ "the beginning of the sequence.",
463
+ encoding_iana,
464
+ )
465
+ if explain:
466
+ logger.removeHandler(explain_handler)
467
+ logger.setLevel(previous_logger_level)
468
+ return CharsetMatches([results[encoding_iana]])
469
+
470
+ if len(results) == 0:
471
+ if fallback_u8 or fallback_ascii or fallback_specified:
472
+ logger.log(
473
+ TRACE,
474
+ "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
475
+ )
476
+
477
+ if fallback_specified:
478
+ logger.debug(
479
+ "Encoding detection: %s will be used as a fallback match",
480
+ fallback_specified.encoding,
481
+ )
482
+ results.append(fallback_specified)
483
+ elif (
484
+ (fallback_u8 and fallback_ascii is None)
485
+ or (
486
+ fallback_u8
487
+ and fallback_ascii
488
+ and fallback_u8.fingerprint != fallback_ascii.fingerprint
489
+ )
490
+ or (fallback_u8 is not None)
491
+ ):
492
+ logger.debug("Encoding detection: utf_8 will be used as a fallback match")
493
+ results.append(fallback_u8)
494
+ elif fallback_ascii:
495
+ logger.debug("Encoding detection: ascii will be used as a fallback match")
496
+ results.append(fallback_ascii)
497
+
498
+ if results:
499
+ logger.debug(
500
+ "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
501
+ results.best().encoding, # type: ignore
502
+ len(results) - 1,
503
+ )
504
+ else:
505
+ logger.debug("Encoding detection: Unable to determine any suitable charset.")
506
+
507
+ if explain:
508
+ logger.removeHandler(explain_handler)
509
+ logger.setLevel(previous_logger_level)
510
+
511
+ return results
512
+
513
+
514
+ def from_fp(
515
+ fp: BinaryIO,
516
+ steps: int = 5,
517
+ chunk_size: int = 512,
518
+ threshold: float = 0.20,
519
+ cp_isolation: List[str] = None,
520
+ cp_exclusion: List[str] = None,
521
+ preemptive_behaviour: bool = True,
522
+ explain: bool = False,
523
+ ) -> CharsetMatches:
524
+ """
525
+ Same thing than the function from_bytes but using a file pointer that is already ready.
526
+ Will not close the file pointer.
527
+ """
528
+ return from_bytes(
529
+ fp.read(),
530
+ steps,
531
+ chunk_size,
532
+ threshold,
533
+ cp_isolation,
534
+ cp_exclusion,
535
+ preemptive_behaviour,
536
+ explain,
537
+ )
538
+
539
+
540
+ def from_path(
541
+ path: PathLike,
542
+ steps: int = 5,
543
+ chunk_size: int = 512,
544
+ threshold: float = 0.20,
545
+ cp_isolation: List[str] = None,
546
+ cp_exclusion: List[str] = None,
547
+ preemptive_behaviour: bool = True,
548
+ explain: bool = False,
549
+ ) -> CharsetMatches:
550
+ """
551
+ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
552
+ Can raise IOError.
553
+ """
554
+ with open(path, "rb") as fp:
555
+ return from_fp(
556
+ fp,
557
+ steps,
558
+ chunk_size,
559
+ threshold,
560
+ cp_isolation,
561
+ cp_exclusion,
562
+ preemptive_behaviour,
563
+ explain,
564
+ )
565
+
566
+
567
+ def normalize(
568
+ path: PathLike,
569
+ steps: int = 5,
570
+ chunk_size: int = 512,
571
+ threshold: float = 0.20,
572
+ cp_isolation: List[str] = None,
573
+ cp_exclusion: List[str] = None,
574
+ preemptive_behaviour: bool = True,
575
+ ) -> CharsetMatch:
576
+ """
577
+ Take a (text-based) file path and try to create another file next to it, this time using UTF-8.
578
+ """
579
+ results = from_path(
580
+ path,
581
+ steps,
582
+ chunk_size,
583
+ threshold,
584
+ cp_isolation,
585
+ cp_exclusion,
586
+ preemptive_behaviour,
587
+ )
588
+
589
+ filename = basename(path)
590
+ target_extensions = list(splitext(filename))
591
+
592
+ if len(results) == 0:
593
+ raise IOError(
594
+ 'Unable to normalize "{}", no encoding charset seems to fit.'.format(
595
+ filename
596
+ )
597
+ )
598
+
599
+ result = results.best()
600
+
601
+ target_extensions[0] += "-" + result.encoding # type: ignore
602
+
603
+ with open(
604
+ "{}".format(str(path).replace(filename, "".join(target_extensions))), "wb"
605
+ ) as fp:
606
+ fp.write(result.output()) # type: ignore
607
+
608
+ return result # type: ignore
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/cd.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ from codecs import IncrementalDecoder
3
+ from collections import Counter, OrderedDict
4
+ from functools import lru_cache
5
+ from typing import Dict, List, Optional, Tuple
6
+
7
+ from .assets import FREQUENCIES
8
+ from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
9
+ from .md import is_suspiciously_successive_range
10
+ from .models import CoherenceMatches
11
+ from .utils import (
12
+ is_accentuated,
13
+ is_latin,
14
+ is_multi_byte_encoding,
15
+ is_unicode_range_secondary,
16
+ unicode_range,
17
+ )
18
+
19
+
20
+ def encoding_unicode_range(iana_name: str) -> List[str]:
21
+ """
22
+ Return associated unicode ranges in a single byte code page.
23
+ """
24
+ if is_multi_byte_encoding(iana_name):
25
+ raise IOError("Function not supported on multi-byte code page")
26
+
27
+ decoder = importlib.import_module("encodings.{}".format(iana_name)).IncrementalDecoder # type: ignore
28
+
29
+ p = decoder(errors="ignore") # type: IncrementalDecoder
30
+ seen_ranges = {} # type: Dict[str, int]
31
+ character_count = 0 # type: int
32
+
33
+ for i in range(0x40, 0xFF):
34
+ chunk = p.decode(bytes([i])) # type: str
35
+
36
+ if chunk:
37
+ character_range = unicode_range(chunk) # type: Optional[str]
38
+
39
+ if character_range is None:
40
+ continue
41
+
42
+ if is_unicode_range_secondary(character_range) is False:
43
+ if character_range not in seen_ranges:
44
+ seen_ranges[character_range] = 0
45
+ seen_ranges[character_range] += 1
46
+ character_count += 1
47
+
48
+ return sorted(
49
+ [
50
+ character_range
51
+ for character_range in seen_ranges
52
+ if seen_ranges[character_range] / character_count >= 0.15
53
+ ]
54
+ )
55
+
56
+
57
+ def unicode_range_languages(primary_range: str) -> List[str]:
58
+ """
59
+ Return inferred languages used with a unicode range.
60
+ """
61
+ languages = [] # type: List[str]
62
+
63
+ for language, characters in FREQUENCIES.items():
64
+ for character in characters:
65
+ if unicode_range(character) == primary_range:
66
+ languages.append(language)
67
+ break
68
+
69
+ return languages
70
+
71
+
72
+ @lru_cache()
73
+ def encoding_languages(iana_name: str) -> List[str]:
74
+ """
75
+ Single-byte encoding language association. Some code page are heavily linked to particular language(s).
76
+ This function does the correspondence.
77
+ """
78
+ unicode_ranges = encoding_unicode_range(iana_name) # type: List[str]
79
+ primary_range = None # type: Optional[str]
80
+
81
+ for specified_range in unicode_ranges:
82
+ if "Latin" not in specified_range:
83
+ primary_range = specified_range
84
+ break
85
+
86
+ if primary_range is None:
87
+ return ["Latin Based"]
88
+
89
+ return unicode_range_languages(primary_range)
90
+
91
+
92
+ @lru_cache()
93
+ def mb_encoding_languages(iana_name: str) -> List[str]:
94
+ """
95
+ Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
96
+ This function does the correspondence.
97
+ """
98
+ if (
99
+ iana_name.startswith("shift_")
100
+ or iana_name.startswith("iso2022_jp")
101
+ or iana_name.startswith("euc_j")
102
+ or iana_name == "cp932"
103
+ ):
104
+ return ["Japanese"]
105
+ if iana_name.startswith("gb") or iana_name in ZH_NAMES:
106
+ return ["Chinese", "Classical Chinese"]
107
+ if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
108
+ return ["Korean"]
109
+
110
+ return []
111
+
112
+
113
+ @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
114
+ def get_target_features(language: str) -> Tuple[bool, bool]:
115
+ """
116
+ Determine main aspects from a supported language if it contains accents and if is pure Latin.
117
+ """
118
+ target_have_accents = False # type: bool
119
+ target_pure_latin = True # type: bool
120
+
121
+ for character in FREQUENCIES[language]:
122
+ if not target_have_accents and is_accentuated(character):
123
+ target_have_accents = True
124
+ if target_pure_latin and is_latin(character) is False:
125
+ target_pure_latin = False
126
+
127
+ return target_have_accents, target_pure_latin
128
+
129
+
130
+ def alphabet_languages(
131
+ characters: List[str], ignore_non_latin: bool = False
132
+ ) -> List[str]:
133
+ """
134
+ Return associated languages associated to given characters.
135
+ """
136
+ languages = [] # type: List[Tuple[str, float]]
137
+
138
+ source_have_accents = any(is_accentuated(character) for character in characters)
139
+
140
+ for language, language_characters in FREQUENCIES.items():
141
+
142
+ target_have_accents, target_pure_latin = get_target_features(language)
143
+
144
+ if ignore_non_latin and target_pure_latin is False:
145
+ continue
146
+
147
+ if target_have_accents is False and source_have_accents:
148
+ continue
149
+
150
+ character_count = len(language_characters) # type: int
151
+
152
+ character_match_count = len(
153
+ [c for c in language_characters if c in characters]
154
+ ) # type: int
155
+
156
+ ratio = character_match_count / character_count # type: float
157
+
158
+ if ratio >= 0.2:
159
+ languages.append((language, ratio))
160
+
161
+ languages = sorted(languages, key=lambda x: x[1], reverse=True)
162
+
163
+ return [compatible_language[0] for compatible_language in languages]
164
+
165
+
166
+ def characters_popularity_compare(
167
+ language: str, ordered_characters: List[str]
168
+ ) -> float:
169
+ """
170
+ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
171
+ The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
172
+ Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
173
+ """
174
+ if language not in FREQUENCIES:
175
+ raise ValueError("{} not available".format(language))
176
+
177
+ character_approved_count = 0 # type: int
178
+
179
+ for character in ordered_characters:
180
+ if character not in FREQUENCIES[language]:
181
+ continue
182
+
183
+ characters_before_source = FREQUENCIES[language][
184
+ 0 : FREQUENCIES[language].index(character)
185
+ ] # type: List[str]
186
+ characters_after_source = FREQUENCIES[language][
187
+ FREQUENCIES[language].index(character) :
188
+ ] # type: List[str]
189
+
190
+ characters_before = ordered_characters[
191
+ 0 : ordered_characters.index(character)
192
+ ] # type: List[str]
193
+ characters_after = ordered_characters[
194
+ ordered_characters.index(character) :
195
+ ] # type: List[str]
196
+
197
+ before_match_count = [
198
+ e in characters_before for e in characters_before_source
199
+ ].count(
200
+ True
201
+ ) # type: int
202
+ after_match_count = [
203
+ e in characters_after for e in characters_after_source
204
+ ].count(
205
+ True
206
+ ) # type: int
207
+
208
+ if len(characters_before_source) == 0 and before_match_count <= 4:
209
+ character_approved_count += 1
210
+ continue
211
+
212
+ if len(characters_after_source) == 0 and after_match_count <= 4:
213
+ character_approved_count += 1
214
+ continue
215
+
216
+ if (
217
+ before_match_count / len(characters_before_source) >= 0.4
218
+ or after_match_count / len(characters_after_source) >= 0.4
219
+ ):
220
+ character_approved_count += 1
221
+ continue
222
+
223
+ return character_approved_count / len(ordered_characters)
224
+
225
+
226
+ def alpha_unicode_split(decoded_sequence: str) -> List[str]:
227
+ """
228
+ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
229
+ Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
230
+ One containing the latin letters and the other hebrew.
231
+ """
232
+ layers = OrderedDict() # type: Dict[str, str]
233
+
234
+ for character in decoded_sequence:
235
+ if character.isalpha() is False:
236
+ continue
237
+
238
+ character_range = unicode_range(character) # type: Optional[str]
239
+
240
+ if character_range is None:
241
+ continue
242
+
243
+ layer_target_range = None # type: Optional[str]
244
+
245
+ for discovered_range in layers:
246
+ if (
247
+ is_suspiciously_successive_range(discovered_range, character_range)
248
+ is False
249
+ ):
250
+ layer_target_range = discovered_range
251
+ break
252
+
253
+ if layer_target_range is None:
254
+ layer_target_range = character_range
255
+
256
+ if layer_target_range not in layers:
257
+ layers[layer_target_range] = character.lower()
258
+ continue
259
+
260
+ layers[layer_target_range] += character.lower()
261
+
262
+ return list(layers.values())
263
+
264
+
265
+ def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
266
+ """
267
+ This function merge results previously given by the function coherence_ratio.
268
+ The return type is the same as coherence_ratio.
269
+ """
270
+ per_language_ratios = OrderedDict() # type: Dict[str, List[float]]
271
+ for result in results:
272
+ for sub_result in result:
273
+ language, ratio = sub_result
274
+ if language not in per_language_ratios:
275
+ per_language_ratios[language] = [ratio]
276
+ continue
277
+ per_language_ratios[language].append(ratio)
278
+
279
+ merge = [
280
+ (
281
+ language,
282
+ round(
283
+ sum(per_language_ratios[language]) / len(per_language_ratios[language]),
284
+ 4,
285
+ ),
286
+ )
287
+ for language in per_language_ratios
288
+ ]
289
+
290
+ return sorted(merge, key=lambda x: x[1], reverse=True)
291
+
292
+
293
+ @lru_cache(maxsize=2048)
294
+ def coherence_ratio(
295
+ decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
296
+ ) -> CoherenceMatches:
297
+ """
298
+ Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
299
+ A layer = Character extraction by alphabets/ranges.
300
+ """
301
+
302
+ results = [] # type: List[Tuple[str, float]]
303
+ ignore_non_latin = False # type: bool
304
+
305
+ sufficient_match_count = 0 # type: int
306
+
307
+ lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
308
+ if "Latin Based" in lg_inclusion_list:
309
+ ignore_non_latin = True
310
+ lg_inclusion_list.remove("Latin Based")
311
+
312
+ for layer in alpha_unicode_split(decoded_sequence):
313
+ sequence_frequencies = Counter(layer) # type: Counter
314
+ most_common = sequence_frequencies.most_common()
315
+
316
+ character_count = sum(o for c, o in most_common) # type: int
317
+
318
+ if character_count <= TOO_SMALL_SEQUENCE:
319
+ continue
320
+
321
+ popular_character_ordered = [c for c, o in most_common] # type: List[str]
322
+
323
+ for language in lg_inclusion_list or alphabet_languages(
324
+ popular_character_ordered, ignore_non_latin
325
+ ):
326
+ ratio = characters_popularity_compare(
327
+ language, popular_character_ordered
328
+ ) # type: float
329
+
330
+ if ratio < threshold:
331
+ continue
332
+ elif ratio >= 0.8:
333
+ sufficient_match_count += 1
334
+
335
+ results.append((language, round(ratio, 4)))
336
+
337
+ if sufficient_match_count >= 3:
338
+ break
339
+
340
+ return sorted(results, key=lambda x: x[1], reverse=True)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/legacy.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Dict, Optional, Union
3
+
4
+ from .api import from_bytes, from_fp, from_path, normalize
5
+ from .constant import CHARDET_CORRESPONDENCE
6
+ from .models import CharsetMatch, CharsetMatches
7
+
8
+
9
+ def detect(byte_str: bytes) -> Dict[str, Optional[Union[str, float]]]:
10
+ """
11
+ chardet legacy method
12
+ Detect the encoding of the given byte string. It should be mostly backward-compatible.
13
+ Encoding name will match Chardet own writing whenever possible. (Not on encoding name unsupported by it)
14
+ This function is deprecated and should be used to migrate your project easily, consult the documentation for
15
+ further information. Not planned for removal.
16
+
17
+ :param byte_str: The byte sequence to examine.
18
+ """
19
+ if not isinstance(byte_str, (bytearray, bytes)):
20
+ raise TypeError( # pragma: nocover
21
+ "Expected object of type bytes or bytearray, got: "
22
+ "{0}".format(type(byte_str))
23
+ )
24
+
25
+ if isinstance(byte_str, bytearray):
26
+ byte_str = bytes(byte_str)
27
+
28
+ r = from_bytes(byte_str).best()
29
+
30
+ encoding = r.encoding if r is not None else None
31
+ language = r.language if r is not None and r.language != "Unknown" else ""
32
+ confidence = 1.0 - r.chaos if r is not None else None
33
+
34
+ # Note: CharsetNormalizer does not return 'UTF-8-SIG' as the sig get stripped in the detection/normalization process
35
+ # but chardet does return 'utf-8-sig' and it is a valid codec name.
36
+ if r is not None and encoding == "utf_8" and r.bom:
37
+ encoding += "_sig"
38
+
39
+ return {
40
+ "encoding": encoding
41
+ if encoding not in CHARDET_CORRESPONDENCE
42
+ else CHARDET_CORRESPONDENCE[encoding],
43
+ "language": language,
44
+ "confidence": confidence,
45
+ }
46
+
47
+
48
+ class CharsetNormalizerMatch(CharsetMatch):
49
+ pass
50
+
51
+
52
+ class CharsetNormalizerMatches(CharsetMatches):
53
+ @staticmethod
54
+ def from_fp(*args, **kwargs): # type: ignore
55
+ warnings.warn( # pragma: nocover
56
+ "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
57
+ "and scheduled to be removed in 3.0",
58
+ DeprecationWarning,
59
+ )
60
+ return from_fp(*args, **kwargs) # pragma: nocover
61
+
62
+ @staticmethod
63
+ def from_bytes(*args, **kwargs): # type: ignore
64
+ warnings.warn( # pragma: nocover
65
+ "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
66
+ "and scheduled to be removed in 3.0",
67
+ DeprecationWarning,
68
+ )
69
+ return from_bytes(*args, **kwargs) # pragma: nocover
70
+
71
+ @staticmethod
72
+ def from_path(*args, **kwargs): # type: ignore
73
+ warnings.warn( # pragma: nocover
74
+ "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
75
+ "and scheduled to be removed in 3.0",
76
+ DeprecationWarning,
77
+ )
78
+ return from_path(*args, **kwargs) # pragma: nocover
79
+
80
+ @staticmethod
81
+ def normalize(*args, **kwargs): # type: ignore
82
+ warnings.warn( # pragma: nocover
83
+ "staticmethod from_fp, from_bytes, from_path and normalize are deprecated "
84
+ "and scheduled to be removed in 3.0",
85
+ DeprecationWarning,
86
+ )
87
+ return normalize(*args, **kwargs) # pragma: nocover
88
+
89
+
90
+ class CharsetDetector(CharsetNormalizerMatches):
91
+ pass
92
+
93
+
94
+ class CharsetDoctor(CharsetNormalizerMatches):
95
+ pass
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/models.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from collections import Counter
3
+ from encodings.aliases import aliases
4
+ from hashlib import sha256
5
+ from json import dumps
6
+ from re import sub
7
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
8
+
9
+ from .constant import NOT_PRINTABLE_PATTERN, TOO_BIG_SEQUENCE
10
+ from .md import mess_ratio
11
+ from .utils import iana_name, is_multi_byte_encoding, unicode_range
12
+
13
+
14
+ class CharsetMatch:
15
+ def __init__(
16
+ self,
17
+ payload: bytes,
18
+ guessed_encoding: str,
19
+ mean_mess_ratio: float,
20
+ has_sig_or_bom: bool,
21
+ languages: "CoherenceMatches",
22
+ decoded_payload: Optional[str] = None,
23
+ ):
24
+ self._payload = payload # type: bytes
25
+
26
+ self._encoding = guessed_encoding # type: str
27
+ self._mean_mess_ratio = mean_mess_ratio # type: float
28
+ self._languages = languages # type: CoherenceMatches
29
+ self._has_sig_or_bom = has_sig_or_bom # type: bool
30
+ self._unicode_ranges = None # type: Optional[List[str]]
31
+
32
+ self._leaves = [] # type: List[CharsetMatch]
33
+ self._mean_coherence_ratio = 0.0 # type: float
34
+
35
+ self._output_payload = None # type: Optional[bytes]
36
+ self._output_encoding = None # type: Optional[str]
37
+
38
+ self._string = decoded_payload # type: Optional[str]
39
+
40
+ def __eq__(self, other: object) -> bool:
41
+ if not isinstance(other, CharsetMatch):
42
+ raise TypeError(
43
+ "__eq__ cannot be invoked on {} and {}.".format(
44
+ str(other.__class__), str(self.__class__)
45
+ )
46
+ )
47
+ return self.encoding == other.encoding and self.fingerprint == other.fingerprint
48
+
49
+ def __lt__(self, other: object) -> bool:
50
+ """
51
+ Implemented to make sorted available upon CharsetMatches items.
52
+ """
53
+ if not isinstance(other, CharsetMatch):
54
+ raise ValueError
55
+
56
+ chaos_difference = abs(self.chaos - other.chaos) # type: float
57
+ coherence_difference = abs(self.coherence - other.coherence) # type: float
58
+
59
+ # Bellow 1% difference --> Use Coherence
60
+ if chaos_difference < 0.01 and coherence_difference > 0.02:
61
+ # When having a tough decision, use the result that decoded as many multi-byte as possible.
62
+ if chaos_difference == 0.0 and self.coherence == other.coherence:
63
+ return self.multi_byte_usage > other.multi_byte_usage
64
+ return self.coherence > other.coherence
65
+
66
+ return self.chaos < other.chaos
67
+
68
+ @property
69
+ def multi_byte_usage(self) -> float:
70
+ return 1.0 - len(str(self)) / len(self.raw)
71
+
72
+ @property
73
+ def chaos_secondary_pass(self) -> float:
74
+ """
75
+ Check once again chaos in decoded text, except this time, with full content.
76
+ Use with caution, this can be very slow.
77
+ Notice: Will be removed in 3.0
78
+ """
79
+ warnings.warn(
80
+ "chaos_secondary_pass is deprecated and will be removed in 3.0",
81
+ DeprecationWarning,
82
+ )
83
+ return mess_ratio(str(self), 1.0)
84
+
85
+ @property
86
+ def coherence_non_latin(self) -> float:
87
+ """
88
+ Coherence ratio on the first non-latin language detected if ANY.
89
+ Notice: Will be removed in 3.0
90
+ """
91
+ warnings.warn(
92
+ "coherence_non_latin is deprecated and will be removed in 3.0",
93
+ DeprecationWarning,
94
+ )
95
+ return 0.0
96
+
97
+ @property
98
+ def w_counter(self) -> Counter:
99
+ """
100
+ Word counter instance on decoded text.
101
+ Notice: Will be removed in 3.0
102
+ """
103
+ warnings.warn(
104
+ "w_counter is deprecated and will be removed in 3.0", DeprecationWarning
105
+ )
106
+
107
+ string_printable_only = sub(NOT_PRINTABLE_PATTERN, " ", str(self).lower())
108
+
109
+ return Counter(string_printable_only.split())
110
+
111
+ def __str__(self) -> str:
112
+ # Lazy Str Loading
113
+ if self._string is None:
114
+ self._string = str(self._payload, self._encoding, "strict")
115
+ return self._string
116
+
117
+ def __repr__(self) -> str:
118
+ return "<CharsetMatch '{}' bytes({})>".format(self.encoding, self.fingerprint)
119
+
120
+ def add_submatch(self, other: "CharsetMatch") -> None:
121
+ if not isinstance(other, CharsetMatch) or other == self:
122
+ raise ValueError(
123
+ "Unable to add instance <{}> as a submatch of a CharsetMatch".format(
124
+ other.__class__
125
+ )
126
+ )
127
+
128
+ other._string = None # Unload RAM usage; dirty trick.
129
+ self._leaves.append(other)
130
+
131
+ @property
132
+ def encoding(self) -> str:
133
+ return self._encoding
134
+
135
+ @property
136
+ def encoding_aliases(self) -> List[str]:
137
+ """
138
+ Encoding name are known by many name, using this could help when searching for IBM855 when it's listed as CP855.
139
+ """
140
+ also_known_as = [] # type: List[str]
141
+ for u, p in aliases.items():
142
+ if self.encoding == u:
143
+ also_known_as.append(p)
144
+ elif self.encoding == p:
145
+ also_known_as.append(u)
146
+ return also_known_as
147
+
148
+ @property
149
+ def bom(self) -> bool:
150
+ return self._has_sig_or_bom
151
+
152
+ @property
153
+ def byte_order_mark(self) -> bool:
154
+ return self._has_sig_or_bom
155
+
156
+ @property
157
+ def languages(self) -> List[str]:
158
+ """
159
+ Return the complete list of possible languages found in decoded sequence.
160
+ Usually not really useful. Returned list may be empty even if 'language' property return something != 'Unknown'.
161
+ """
162
+ return [e[0] for e in self._languages]
163
+
164
+ @property
165
+ def language(self) -> str:
166
+ """
167
+ Most probable language found in decoded sequence. If none were detected or inferred, the property will return
168
+ "Unknown".
169
+ """
170
+ if not self._languages:
171
+ # Trying to infer the language based on the given encoding
172
+ # Its either English or we should not pronounce ourselves in certain cases.
173
+ if "ascii" in self.could_be_from_charset:
174
+ return "English"
175
+
176
+ # doing it there to avoid circular import
177
+ from charset_normalizer.cd import encoding_languages, mb_encoding_languages
178
+
179
+ languages = (
180
+ mb_encoding_languages(self.encoding)
181
+ if is_multi_byte_encoding(self.encoding)
182
+ else encoding_languages(self.encoding)
183
+ )
184
+
185
+ if len(languages) == 0 or "Latin Based" in languages:
186
+ return "Unknown"
187
+
188
+ return languages[0]
189
+
190
+ return self._languages[0][0]
191
+
192
+ @property
193
+ def chaos(self) -> float:
194
+ return self._mean_mess_ratio
195
+
196
+ @property
197
+ def coherence(self) -> float:
198
+ if not self._languages:
199
+ return 0.0
200
+ return self._languages[0][1]
201
+
202
+ @property
203
+ def percent_chaos(self) -> float:
204
+ return round(self.chaos * 100, ndigits=3)
205
+
206
+ @property
207
+ def percent_coherence(self) -> float:
208
+ return round(self.coherence * 100, ndigits=3)
209
+
210
+ @property
211
+ def raw(self) -> bytes:
212
+ """
213
+ Original untouched bytes.
214
+ """
215
+ return self._payload
216
+
217
+ @property
218
+ def submatch(self) -> List["CharsetMatch"]:
219
+ return self._leaves
220
+
221
+ @property
222
+ def has_submatch(self) -> bool:
223
+ return len(self._leaves) > 0
224
+
225
+ @property
226
+ def alphabets(self) -> List[str]:
227
+ if self._unicode_ranges is not None:
228
+ return self._unicode_ranges
229
+ # list detected ranges
230
+ detected_ranges = [
231
+ unicode_range(char) for char in str(self)
232
+ ] # type: List[Optional[str]]
233
+ # filter and sort
234
+ self._unicode_ranges = sorted(list({r for r in detected_ranges if r}))
235
+ return self._unicode_ranges
236
+
237
+ @property
238
+ def could_be_from_charset(self) -> List[str]:
239
+ """
240
+ The complete list of encoding that output the exact SAME str result and therefore could be the originating
241
+ encoding.
242
+ This list does include the encoding available in property 'encoding'.
243
+ """
244
+ return [self._encoding] + [m.encoding for m in self._leaves]
245
+
246
+ def first(self) -> "CharsetMatch":
247
+ """
248
+ Kept for BC reasons. Will be removed in 3.0.
249
+ """
250
+ return self
251
+
252
+ def best(self) -> "CharsetMatch":
253
+ """
254
+ Kept for BC reasons. Will be removed in 3.0.
255
+ """
256
+ return self
257
+
258
+ def output(self, encoding: str = "utf_8") -> bytes:
259
+ """
260
+ Method to get re-encoded bytes payload using given target encoding. Default to UTF-8.
261
+ Any errors will be simply ignored by the encoder NOT replaced.
262
+ """
263
+ if self._output_encoding is None or self._output_encoding != encoding:
264
+ self._output_encoding = encoding
265
+ self._output_payload = str(self).encode(encoding, "replace")
266
+
267
+ return self._output_payload # type: ignore
268
+
269
+ @property
270
+ def fingerprint(self) -> str:
271
+ """
272
+ Retrieve the unique SHA256 computed using the transformed (re-encoded) payload. Not the original one.
273
+ """
274
+ return sha256(self.output()).hexdigest()
275
+
276
+
277
+ class CharsetMatches:
278
+ """
279
+ Container with every CharsetMatch items ordered by default from most probable to the less one.
280
+ Act like a list(iterable) but does not implements all related methods.
281
+ """
282
+
283
+ def __init__(self, results: List[CharsetMatch] = None):
284
+ self._results = sorted(results) if results else [] # type: List[CharsetMatch]
285
+
286
+ def __iter__(self) -> Iterator[CharsetMatch]:
287
+ yield from self._results
288
+
289
+ def __getitem__(self, item: Union[int, str]) -> CharsetMatch:
290
+ """
291
+ Retrieve a single item either by its position or encoding name (alias may be used here).
292
+ Raise KeyError upon invalid index or encoding not present in results.
293
+ """
294
+ if isinstance(item, int):
295
+ return self._results[item]
296
+ if isinstance(item, str):
297
+ item = iana_name(item, False)
298
+ for result in self._results:
299
+ if item in result.could_be_from_charset:
300
+ return result
301
+ raise KeyError
302
+
303
+ def __len__(self) -> int:
304
+ return len(self._results)
305
+
306
+ def __bool__(self) -> bool:
307
+ return len(self._results) > 0
308
+
309
+ def append(self, item: CharsetMatch) -> None:
310
+ """
311
+ Insert a single match. Will be inserted accordingly to preserve sort.
312
+ Can be inserted as a submatch.
313
+ """
314
+ if not isinstance(item, CharsetMatch):
315
+ raise ValueError(
316
+ "Cannot append instance '{}' to CharsetMatches".format(
317
+ str(item.__class__)
318
+ )
319
+ )
320
+ # We should disable the submatch factoring when the input file is too heavy (conserve RAM usage)
321
+ if len(item.raw) <= TOO_BIG_SEQUENCE:
322
+ for match in self._results:
323
+ if match.fingerprint == item.fingerprint and match.chaos == item.chaos:
324
+ match.add_submatch(item)
325
+ return
326
+ self._results.append(item)
327
+ self._results = sorted(self._results)
328
+
329
+ def best(self) -> Optional["CharsetMatch"]:
330
+ """
331
+ Simply return the first match. Strict equivalent to matches[0].
332
+ """
333
+ if not self._results:
334
+ return None
335
+ return self._results[0]
336
+
337
+ def first(self) -> Optional["CharsetMatch"]:
338
+ """
339
+ Redundant method, call the method best(). Kept for BC reasons.
340
+ """
341
+ return self.best()
342
+
343
+
344
+ CoherenceMatch = Tuple[str, float]
345
+ CoherenceMatches = List[CoherenceMatch]
346
+
347
+
348
+ class CliDetectionResult:
349
+ def __init__(
350
+ self,
351
+ path: str,
352
+ encoding: Optional[str],
353
+ encoding_aliases: List[str],
354
+ alternative_encodings: List[str],
355
+ language: str,
356
+ alphabets: List[str],
357
+ has_sig_or_bom: bool,
358
+ chaos: float,
359
+ coherence: float,
360
+ unicode_path: Optional[str],
361
+ is_preferred: bool,
362
+ ):
363
+ self.path = path # type: str
364
+ self.unicode_path = unicode_path # type: Optional[str]
365
+ self.encoding = encoding # type: Optional[str]
366
+ self.encoding_aliases = encoding_aliases # type: List[str]
367
+ self.alternative_encodings = alternative_encodings # type: List[str]
368
+ self.language = language # type: str
369
+ self.alphabets = alphabets # type: List[str]
370
+ self.has_sig_or_bom = has_sig_or_bom # type: bool
371
+ self.chaos = chaos # type: float
372
+ self.coherence = coherence # type: float
373
+ self.is_preferred = is_preferred # type: bool
374
+
375
+ @property
376
+ def __dict__(self) -> Dict[str, Any]: # type: ignore
377
+ return {
378
+ "path": self.path,
379
+ "encoding": self.encoding,
380
+ "encoding_aliases": self.encoding_aliases,
381
+ "alternative_encodings": self.alternative_encodings,
382
+ "language": self.language,
383
+ "alphabets": self.alphabets,
384
+ "has_sig_or_bom": self.has_sig_or_bom,
385
+ "chaos": self.chaos,
386
+ "coherence": self.coherence,
387
+ "unicode_path": self.unicode_path,
388
+ "is_preferred": self.is_preferred,
389
+ }
390
+
391
+ def to_json(self) -> str:
392
+ return dumps(self.__dict__, ensure_ascii=True, indent=4)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/utils.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ import unicodedata2 as unicodedata
3
+ except ImportError:
4
+ import unicodedata # type: ignore[no-redef]
5
+
6
+ import importlib
7
+ import logging
8
+ from codecs import IncrementalDecoder
9
+ from encodings.aliases import aliases
10
+ from functools import lru_cache
11
+ from re import findall
12
+ from typing import List, Optional, Set, Tuple, Union
13
+
14
+ from _multibytecodec import MultibyteIncrementalDecoder # type: ignore
15
+
16
+ from .constant import (
17
+ ENCODING_MARKS,
18
+ IANA_SUPPORTED_SIMILAR,
19
+ RE_POSSIBLE_ENCODING_INDICATION,
20
+ UNICODE_RANGES_COMBINED,
21
+ UNICODE_SECONDARY_RANGE_KEYWORD,
22
+ UTF8_MAXIMAL_ALLOCATION,
23
+ )
24
+
25
+
26
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
27
+ def is_accentuated(character: str) -> bool:
28
+ try:
29
+ description = unicodedata.name(character) # type: str
30
+ except ValueError:
31
+ return False
32
+ return (
33
+ "WITH GRAVE" in description
34
+ or "WITH ACUTE" in description
35
+ or "WITH CEDILLA" in description
36
+ or "WITH DIAERESIS" in description
37
+ or "WITH CIRCUMFLEX" in description
38
+ or "WITH TILDE" in description
39
+ )
40
+
41
+
42
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
43
+ def remove_accent(character: str) -> str:
44
+ decomposed = unicodedata.decomposition(character) # type: str
45
+ if not decomposed:
46
+ return character
47
+
48
+ codes = decomposed.split(" ") # type: List[str]
49
+
50
+ return chr(int(codes[0], 16))
51
+
52
+
53
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
54
+ def unicode_range(character: str) -> Optional[str]:
55
+ """
56
+ Retrieve the Unicode range official name from a single character.
57
+ """
58
+ character_ord = ord(character) # type: int
59
+
60
+ for range_name, ord_range in UNICODE_RANGES_COMBINED.items():
61
+ if character_ord in ord_range:
62
+ return range_name
63
+
64
+ return None
65
+
66
+
67
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
68
+ def is_latin(character: str) -> bool:
69
+ try:
70
+ description = unicodedata.name(character) # type: str
71
+ except ValueError:
72
+ return False
73
+ return "LATIN" in description
74
+
75
+
76
+ def is_ascii(character: str) -> bool:
77
+ try:
78
+ character.encode("ascii")
79
+ except UnicodeEncodeError:
80
+ return False
81
+ return True
82
+
83
+
84
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
85
+ def is_punctuation(character: str) -> bool:
86
+ character_category = unicodedata.category(character) # type: str
87
+
88
+ if "P" in character_category:
89
+ return True
90
+
91
+ character_range = unicode_range(character) # type: Optional[str]
92
+
93
+ if character_range is None:
94
+ return False
95
+
96
+ return "Punctuation" in character_range
97
+
98
+
99
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
100
+ def is_symbol(character: str) -> bool:
101
+ character_category = unicodedata.category(character) # type: str
102
+
103
+ if "S" in character_category or "N" in character_category:
104
+ return True
105
+
106
+ character_range = unicode_range(character) # type: Optional[str]
107
+
108
+ if character_range is None:
109
+ return False
110
+
111
+ return "Forms" in character_range
112
+
113
+
114
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
115
+ def is_emoticon(character: str) -> bool:
116
+ character_range = unicode_range(character) # type: Optional[str]
117
+
118
+ if character_range is None:
119
+ return False
120
+
121
+ return "Emoticons" in character_range
122
+
123
+
124
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
125
+ def is_separator(character: str) -> bool:
126
+ if character.isspace() or character in {"|", "+", ",", ";", "<", ">"}:
127
+ return True
128
+
129
+ character_category = unicodedata.category(character) # type: str
130
+
131
+ return "Z" in character_category
132
+
133
+
134
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
135
+ def is_case_variable(character: str) -> bool:
136
+ return character.islower() != character.isupper()
137
+
138
+
139
+ def is_private_use_only(character: str) -> bool:
140
+ character_category = unicodedata.category(character) # type: str
141
+
142
+ return character_category == "Co"
143
+
144
+
145
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
146
+ def is_cjk(character: str) -> bool:
147
+ try:
148
+ character_name = unicodedata.name(character)
149
+ except ValueError:
150
+ return False
151
+
152
+ return "CJK" in character_name
153
+
154
+
155
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
156
+ def is_hiragana(character: str) -> bool:
157
+ try:
158
+ character_name = unicodedata.name(character)
159
+ except ValueError:
160
+ return False
161
+
162
+ return "HIRAGANA" in character_name
163
+
164
+
165
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
166
+ def is_katakana(character: str) -> bool:
167
+ try:
168
+ character_name = unicodedata.name(character)
169
+ except ValueError:
170
+ return False
171
+
172
+ return "KATAKANA" in character_name
173
+
174
+
175
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
176
+ def is_hangul(character: str) -> bool:
177
+ try:
178
+ character_name = unicodedata.name(character)
179
+ except ValueError:
180
+ return False
181
+
182
+ return "HANGUL" in character_name
183
+
184
+
185
+ @lru_cache(maxsize=UTF8_MAXIMAL_ALLOCATION)
186
+ def is_thai(character: str) -> bool:
187
+ try:
188
+ character_name = unicodedata.name(character)
189
+ except ValueError:
190
+ return False
191
+
192
+ return "THAI" in character_name
193
+
194
+
195
+ @lru_cache(maxsize=len(UNICODE_RANGES_COMBINED))
196
+ def is_unicode_range_secondary(range_name: str) -> bool:
197
+ return any(keyword in range_name for keyword in UNICODE_SECONDARY_RANGE_KEYWORD)
198
+
199
+
200
+ def any_specified_encoding(sequence: bytes, search_zone: int = 4096) -> Optional[str]:
201
+ """
202
+ Extract using ASCII-only decoder any specified encoding in the first n-bytes.
203
+ """
204
+ if not isinstance(sequence, bytes):
205
+ raise TypeError
206
+
207
+ seq_len = len(sequence) # type: int
208
+
209
+ results = findall(
210
+ RE_POSSIBLE_ENCODING_INDICATION,
211
+ sequence[: min(seq_len, search_zone)].decode("ascii", errors="ignore"),
212
+ ) # type: List[str]
213
+
214
+ if len(results) == 0:
215
+ return None
216
+
217
+ for specified_encoding in results:
218
+ specified_encoding = specified_encoding.lower().replace("-", "_")
219
+
220
+ for encoding_alias, encoding_iana in aliases.items():
221
+ if encoding_alias == specified_encoding:
222
+ return encoding_iana
223
+ if encoding_iana == specified_encoding:
224
+ return encoding_iana
225
+
226
+ return None
227
+
228
+
229
+ @lru_cache(maxsize=128)
230
+ def is_multi_byte_encoding(name: str) -> bool:
231
+ """
232
+ Verify is a specific encoding is a multi byte one based on it IANA name
233
+ """
234
+ return name in {
235
+ "utf_8",
236
+ "utf_8_sig",
237
+ "utf_16",
238
+ "utf_16_be",
239
+ "utf_16_le",
240
+ "utf_32",
241
+ "utf_32_le",
242
+ "utf_32_be",
243
+ "utf_7",
244
+ } or issubclass(
245
+ importlib.import_module("encodings.{}".format(name)).IncrementalDecoder, # type: ignore
246
+ MultibyteIncrementalDecoder,
247
+ )
248
+
249
+
250
+ def identify_sig_or_bom(sequence: bytes) -> Tuple[Optional[str], bytes]:
251
+ """
252
+ Identify and extract SIG/BOM in given sequence.
253
+ """
254
+
255
+ for iana_encoding in ENCODING_MARKS:
256
+ marks = ENCODING_MARKS[iana_encoding] # type: Union[bytes, List[bytes]]
257
+
258
+ if isinstance(marks, bytes):
259
+ marks = [marks]
260
+
261
+ for mark in marks:
262
+ if sequence.startswith(mark):
263
+ return iana_encoding, mark
264
+
265
+ return None, b""
266
+
267
+
268
+ def should_strip_sig_or_bom(iana_encoding: str) -> bool:
269
+ return iana_encoding not in {"utf_16", "utf_32"}
270
+
271
+
272
+ def iana_name(cp_name: str, strict: bool = True) -> str:
273
+ cp_name = cp_name.lower().replace("-", "_")
274
+
275
+ for encoding_alias, encoding_iana in aliases.items():
276
+ if cp_name in [encoding_alias, encoding_iana]:
277
+ return encoding_iana
278
+
279
+ if strict:
280
+ raise ValueError("Unable to retrieve IANA for '{}'".format(cp_name))
281
+
282
+ return cp_name
283
+
284
+
285
+ def range_scan(decoded_sequence: str) -> List[str]:
286
+ ranges = set() # type: Set[str]
287
+
288
+ for character in decoded_sequence:
289
+ character_range = unicode_range(character) # type: Optional[str]
290
+
291
+ if character_range is None:
292
+ continue
293
+
294
+ ranges.add(character_range)
295
+
296
+ return list(ranges)
297
+
298
+
299
+ def cp_similarity(iana_name_a: str, iana_name_b: str) -> float:
300
+
301
+ if is_multi_byte_encoding(iana_name_a) or is_multi_byte_encoding(iana_name_b):
302
+ return 0.0
303
+
304
+ decoder_a = importlib.import_module("encodings.{}".format(iana_name_a)).IncrementalDecoder # type: ignore
305
+ decoder_b = importlib.import_module("encodings.{}".format(iana_name_b)).IncrementalDecoder # type: ignore
306
+
307
+ id_a = decoder_a(errors="ignore") # type: IncrementalDecoder
308
+ id_b = decoder_b(errors="ignore") # type: IncrementalDecoder
309
+
310
+ character_match_count = 0 # type: int
311
+
312
+ for i in range(255):
313
+ to_be_decoded = bytes([i]) # type: bytes
314
+ if id_a.decode(to_be_decoded) == id_b.decode(to_be_decoded):
315
+ character_match_count += 1
316
+
317
+ return character_match_count / 254
318
+
319
+
320
+ def is_cp_similar(iana_name_a: str, iana_name_b: str) -> bool:
321
+ """
322
+ Determine if two code page are at least 80% similar. IANA_SUPPORTED_SIMILAR dict was generated using
323
+ the function cp_similarity.
324
+ """
325
+ return (
326
+ iana_name_a in IANA_SUPPORTED_SIMILAR
327
+ and iana_name_b in IANA_SUPPORTED_SIMILAR[iana_name_a]
328
+ )
329
+
330
+
331
+ def set_logging_handler(
332
+ name: str = "charset_normalizer",
333
+ level: int = logging.INFO,
334
+ format_string: str = "%(asctime)s | %(levelname)s | %(message)s",
335
+ ) -> None:
336
+
337
+ logger = logging.getLogger(name)
338
+ logger.setLevel(level)
339
+
340
+ handler = logging.StreamHandler()
341
+ handler.setFormatter(logging.Formatter(format_string))
342
+ logger.addHandler(handler)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/charset_normalizer/version.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """
2
+ Expose version
3
+ """
4
+
5
+ __version__ = "2.0.12"
6
+ VERSION = __version__.split(".")
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (C) 2020-2021 Jiri Borovec <...>
3
+ """
4
+ import os
5
+
6
+ __version__ = "0.3.1"
7
+ __docs__ = "Deprecation tooling"
8
+ __author__ = "Jiri Borovec"
9
+ __author_email__ = "jiri.borovec@fel.cvut.cz"
10
+ __homepage__ = "https://borda.github.io/pyDeprecate"
11
+ __source_code__ = "https://github.com/Borda/pyDeprecate"
12
+ __license__ = 'MIT'
13
+
14
+ _PATH_PACKAGE = os.path.realpath(os.path.dirname(__file__))
15
+ _PATH_PROJECT = os.path.dirname(_PATH_PACKAGE)
16
+
17
+ from deprecate.deprecation import deprecated # noqa: F401 E402
18
+ from deprecate.utils import void # noqa: F401 E402
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/deprecation.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (C) 2020-2021 Jiri Borovec <...>
3
+ """
4
+ import inspect
5
+ from functools import partial, wraps
6
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
7
+ from warnings import warn
8
+
9
+ #: Default template warning message fot redirecting callable
10
+ TEMPLATE_WARNING_CALLABLE = (
11
+ "The `%(source_name)s` was deprecated since v%(deprecated_in)s in favor of `%(target_path)s`."
12
+ " It will be removed in v%(remove_in)s."
13
+ )
14
+ #: Default template warning message for chnaging argument mapping
15
+ TEMPLATE_WARNING_ARGUMENTS = (
16
+ "The `%(source_name)s` uses deprecated arguments: %(argument_map)s."
17
+ " They were deprecated since v%(deprecated_in)s and will be removed in v%(remove_in)s."
18
+ )
19
+ #: Tempalte for mapping from old to new examples
20
+ TEMPLATE_ARGUMENT_MAPPING = "`%(old_arg)s` -> `%(new_arg)s`"
21
+ #: Default template warning message for no target func/method
22
+ TEMPLATE_WARNING_NO_TARGET = (
23
+ "The `%(source_name)s` was deprecated since v%(deprecated_in)s."
24
+ " It will be removed in v%(remove_in)s."
25
+ )
26
+
27
+ deprecation_warning = partial(warn, category=DeprecationWarning)
28
+
29
+
30
+ def get_func_arguments_types_defaults(func: Callable) -> List[Tuple[str, Tuple, Any]]:
31
+ """
32
+ Parse function arguments, types and default values
33
+
34
+ Args:
35
+ func: a function to be xeamined
36
+
37
+ Returns:
38
+ sequence of details for each position/keyward argument
39
+
40
+ Example:
41
+ >>> get_func_arguments_types_defaults(get_func_arguments_types_defaults)
42
+ [('func', typing.Callable, <class 'inspect._empty'>)]
43
+
44
+ """
45
+ func_default_params = inspect.signature(func).parameters
46
+ func_arg_type_val = []
47
+ for arg in func_default_params:
48
+ arg_type = func_default_params[arg].annotation
49
+ arg_default = func_default_params[arg].default
50
+ func_arg_type_val.append((arg, arg_type, arg_default))
51
+ return func_arg_type_val
52
+
53
+
54
+ def _update_kwargs_with_args(func: Callable, fn_args: tuple, fn_kwargs: dict) -> dict:
55
+ """ Update in case any args passed move them to kwargs and add defaults
56
+
57
+ Args:
58
+ func: particular function
59
+ fn_args: function position arguments
60
+ fn_kwargs: function keyword arguments
61
+
62
+ Returns:
63
+ extended dictionary with all args as keyword arguments
64
+
65
+ """
66
+ if not fn_args:
67
+ return fn_kwargs
68
+ func_arg_type_val = get_func_arguments_types_defaults(func)
69
+ # parse only the argument names
70
+ arg_names = [arg[0] for arg in func_arg_type_val]
71
+ # convert args to kwargs
72
+ fn_kwargs.update(dict(zip(arg_names, fn_args)))
73
+ return fn_kwargs
74
+
75
+
76
+ def _update_kwargs_with_defaults(func: Callable, fn_kwargs: dict) -> dict:
77
+ """ Update in case any args passed move them to kwargs and add defaults
78
+
79
+ Args:
80
+ func: particular function
81
+ fn_kwargs: function keyword arguments
82
+
83
+ Returns:
84
+ extended dictionary with all args as keyword arguments
85
+
86
+ """
87
+ func_arg_type_val = get_func_arguments_types_defaults(func)
88
+ # fill by source defaults
89
+ fn_defaults = {arg[0]: arg[2] for arg in func_arg_type_val if arg[2] != inspect._empty} # type: ignore
90
+ fn_kwargs = dict(list(fn_defaults.items()) + list(fn_kwargs.items()))
91
+ return fn_kwargs
92
+
93
+
94
+ def _raise_warn(
95
+ stream: Callable,
96
+ source: Callable,
97
+ template_mgs: str,
98
+ **extras: str,
99
+ ) -> None:
100
+ """Raise deprecation warning with in given stream ...
101
+
102
+ Args:
103
+ stream: a function which takes message as the only position argument
104
+ source: function/methods which is wrapped
105
+ template_mgs: python formatted string message which has build-ins arguments
106
+ extras: string arguments used in the template message
107
+ """
108
+ source_name = source.__qualname__.split('.')[-2] if source.__name__ == "__init__" else source.__name__
109
+ source_path = f'{source.__module__}.{source_name}'
110
+ msg_args = dict(
111
+ source_name=source_name,
112
+ source_path=source_path,
113
+ **extras,
114
+ )
115
+ stream(template_mgs % msg_args)
116
+
117
+
118
+ def _raise_warn_callable(
119
+ stream: Callable,
120
+ source: Callable,
121
+ target: Union[None, bool, Callable],
122
+ deprecated_in: str,
123
+ remove_in: str,
124
+ template_mgs: Optional[str] = None,
125
+ ) -> None:
126
+ """
127
+ Raise deprecation warning with in given stream, redirecting callables
128
+
129
+ Args:
130
+ stream: a function which takes message as the only position argument
131
+ source: function/methods which is wrapped
132
+ target: function/methods which is mapping target
133
+ deprecated_in: set version when source is deprecated
134
+ remove_in: set version when source will be removed
135
+ template_mgs: python formatted string message which has build-ins arguments:
136
+
137
+ - ``source_name`` just the functions name such as "my_source_func"
138
+ - ``source_path`` pythonic path to the function such as "my_package.with_module.my_source_func"
139
+ - ``target_name`` just the functions name such as "my_target_func"
140
+ - ``target_path`` pythonic path to the function such as "any_package.with_module.my_target_func"
141
+ - ``deprecated_in`` version passed to wrapper
142
+ - ``remove_in`` version passed to wrapper
143
+
144
+ """
145
+ if callable(target):
146
+ target_name = target.__name__
147
+ target_path = f'{target.__module__}.{target_name}'
148
+ template_mgs = template_mgs or TEMPLATE_WARNING_CALLABLE
149
+ else:
150
+ target_name, target_path = "", ""
151
+ template_mgs = template_mgs or TEMPLATE_WARNING_NO_TARGET
152
+ _raise_warn(
153
+ stream,
154
+ source,
155
+ template_mgs,
156
+ deprecated_in=deprecated_in,
157
+ remove_in=remove_in,
158
+ target_name=target_name,
159
+ target_path=target_path
160
+ )
161
+
162
+
163
+ def _raise_warn_arguments(
164
+ stream: Callable,
165
+ source: Callable,
166
+ arguments: Dict[str, str],
167
+ deprecated_in: str,
168
+ remove_in: str,
169
+ template_mgs: Optional[str] = None,
170
+ ) -> None:
171
+ """
172
+ Raise deprecation warning with in given stream, note about arguments
173
+
174
+ Args:
175
+ stream: a function which takes message as the only position argument
176
+ source: function/methods which is wrapped
177
+ arguments: mapping from deprecated to new arguments
178
+ deprecated_in: set version when source is deprecated
179
+ remove_in: set version when source will be removed
180
+ template_mgs: python formatted string message which has build-ins arguments:
181
+
182
+ - ``source_name`` just the functions name such as "my_source_func"
183
+ - ``source_path`` pythonic path to the function such as "my_package.with_module.my_source_func"
184
+ - ``argument_map`` mapping from deprecated to new argument "old_arg -> new_arg"
185
+ - ``deprecated_in`` version passed to wrapper
186
+ - ``remove_in`` version passed to wrapper
187
+
188
+ """
189
+ args_map = ', '.join([TEMPLATE_ARGUMENT_MAPPING % dict(old_arg=a, new_arg=b) for a, b in arguments.items()])
190
+ template_mgs = template_mgs or TEMPLATE_WARNING_ARGUMENTS
191
+ _raise_warn(stream, source, template_mgs, deprecated_in=deprecated_in, remove_in=remove_in, argument_map=args_map)
192
+
193
+
194
+ def deprecated(
195
+ target: Union[bool, None, Callable],
196
+ deprecated_in: str = "",
197
+ remove_in: str = "",
198
+ stream: Optional[Callable] = deprecation_warning,
199
+ num_warns: int = 1,
200
+ template_mgs: Optional[str] = None,
201
+ args_mapping: Optional[Dict[str, str]] = None,
202
+ args_extra: Optional[Dict[str, Any]] = None,
203
+ skip_if: Union[bool, Callable] = False,
204
+ ) -> Callable:
205
+ """
206
+ Decorate a function or class ``__init__`` with warning message
207
+ and pass all arguments directly to the target class/method.
208
+
209
+ Args:
210
+ target: Function or method to forward the call. If set ``None``, no forwarding is applied and only warn.
211
+ deprecated_in: Define version when the wrapped function is deprecated.
212
+ remove_in: Define version when the wrapped function will be removed.
213
+ stream: Set stream for printing warning messages, by default is deprecation warning.
214
+ Setting ``None``, no warning is shown to user.
215
+ num_warns: Custom define number or warning raised. Negative value (-1) means no limit.
216
+ template_mgs: python formatted string message which has build-ins arguments:
217
+ ``source_name``, ``source_path``, ``target_name``, ``target_path``, ``deprecated_in``, ``remove_in``
218
+ Example of a custom message is::
219
+
220
+ "v%(deprecated_in)s: `%(source_name)s` was deprecated in favor of `%(target_path)s`."
221
+
222
+ args_mapping: Custom argument mapping argument between source and target and options to suppress some,
223
+ for example ``{'my_arg': 'their_arg`}`` passes "my_arg" from source as "their_arg" in target
224
+ or ``{'my_arg': None}`` ignores the "my_arg" from source function.
225
+ args_extra: Custom filling extra argument in target function, mostly if they are required
226
+ or your needed default is different from target one, for example ``{'their_arg': 42}``
227
+ skip_if: Conditional skip for this wrapper, e.g. in case of versions
228
+
229
+ Returns:
230
+ wrapped function pointing to the target implementation with source arguments
231
+
232
+ Raises:
233
+ TypeError: if there are some argument in source function which are missing in target function
234
+
235
+ """
236
+
237
+ def packing(source: Callable) -> Callable:
238
+
239
+ @wraps(source)
240
+ def wrapped_fn(*args: Any, **kwargs: Any) -> Any:
241
+ # check if user requested a skip
242
+ shall_skip = skip_if() if callable(skip_if) else bool(skip_if)
243
+ assert isinstance(shall_skip, bool), "function shall return bool"
244
+ if shall_skip:
245
+ return source(*args, **kwargs)
246
+
247
+ nb_called = getattr(wrapped_fn, '_called', 0)
248
+ setattr(wrapped_fn, "_called", nb_called + 1)
249
+ # convert args to kwargs
250
+ kwargs = _update_kwargs_with_args(source, args, kwargs)
251
+
252
+ reason_callable = target is None or callable(target)
253
+ reason_argument = {}
254
+ if args_mapping and target:
255
+ reason_argument = {a: b for a, b in args_mapping.items() if a in kwargs}
256
+ # short cycle with no reason for redirect
257
+ if not (reason_callable or reason_argument):
258
+ # todo: eventually warn that there is no reason to use wrapper, e.g. mapping args does not exist
259
+ return source(**kwargs)
260
+
261
+ # warning per argument
262
+ if reason_argument:
263
+ arg_warns = [getattr(wrapped_fn, f'_warned_{arg}', 0) for arg in reason_argument]
264
+ nb_warned = min(arg_warns)
265
+ else:
266
+ nb_warned = getattr(wrapped_fn, '_warned', 0)
267
+
268
+ # warn user only N times in lifetime or infinitely...
269
+ if stream and (num_warns < 0 or nb_warned < num_warns):
270
+ if reason_callable:
271
+ _raise_warn_callable(stream, source, target, deprecated_in, remove_in, template_mgs)
272
+ setattr(wrapped_fn, "_warned", nb_warned + 1)
273
+ elif reason_argument:
274
+ _raise_warn_arguments(stream, source, reason_argument, deprecated_in, remove_in, template_mgs)
275
+ attrib_names = [f'_warned_{arg}' for arg in reason_argument]
276
+ for n in attrib_names:
277
+ setattr(wrapped_fn, n, getattr(wrapped_fn, n, 0) + 1)
278
+
279
+ if reason_callable:
280
+ kwargs = _update_kwargs_with_defaults(source, kwargs)
281
+ if args_mapping and target: # covers target as True and callable
282
+ # filter args which shall be skipped
283
+ args_skip = [arg for arg in args_mapping if not args_mapping[arg]]
284
+ # Look-Up-table mapping
285
+ kwargs = {args_mapping.get(arg, arg): val for arg, val in kwargs.items() if arg not in args_skip}
286
+
287
+ if args_extra and target: # covers target as True and callable
288
+ # update target argument by extra arguments
289
+ kwargs.update(args_extra)
290
+
291
+ if not callable(target):
292
+ return source(**kwargs)
293
+
294
+ target_is_class = inspect.isclass(target)
295
+ target_func = target.__init__ if target_is_class else target # type: ignore
296
+ target_args = [arg[0] for arg in get_func_arguments_types_defaults(target_func)]
297
+
298
+ missed = [arg for arg in kwargs if arg not in target_args]
299
+ if missed:
300
+ raise TypeError("Failed mapping, arguments missing in target source: %s" % missed)
301
+ # all args were already moved to kwargs
302
+ return target_func(**kwargs)
303
+
304
+ return wrapped_fn
305
+
306
+ return packing
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/deprecate/utils.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Copyright (C) 2020-2021 Jiri Borovec <...>
3
+ """
4
+ import warnings
5
+ from contextlib import contextmanager
6
+ from typing import Any, Generator, List, Optional, Type, Union
7
+
8
+
9
+ def _warns_repr(warns: List[warnings.WarningMessage]) -> List[Union[Warning, str]]:
10
+ return [w.message for w in warns]
11
+
12
+
13
+ @contextmanager
14
+ def no_warning_call(warning_type: Optional[Type[Warning]] = None, match: Optional[str] = None) -> Generator:
15
+ """
16
+
17
+ Args:
18
+ warning_type: specify catching warning, if None catching all
19
+ match: match message, containing following string, if None catches all
20
+
21
+ Raises:
22
+ AssertionError: if specified warning was called
23
+ """
24
+ with warnings.catch_warnings(record=True) as called:
25
+ # Cause all warnings to always be triggered.
26
+ warnings.simplefilter("always")
27
+ # Trigger a warning.
28
+ yield
29
+ # no warning raised
30
+ if not called:
31
+ return
32
+ if not warning_type:
33
+ raise AssertionError(f'While catching all warnings, these were found: {_warns_repr(called)}')
34
+ # filter warnings by type
35
+ warns = [w for w in called if issubclass(w.category, warning_type)]
36
+ # Verify some things
37
+ if not warns:
38
+ return
39
+ if not match:
40
+ raise AssertionError(
41
+ f'While catching `{warning_type.__name__}` warnings, these were found: {_warns_repr(warns)}'
42
+ )
43
+ found = [w for w in warns if match in w.message.__str__()]
44
+ if found:
45
+ raise AssertionError(
46
+ f'While catching `{warning_type.__name__}` warnings with "{match}",'
47
+ f' these were found: {_warns_repr(found)}'
48
+ )
49
+
50
+
51
+ def void(*args: Any, **kwrgs: Any) -> Any:
52
+ """Empty function which does nothing, just let your IDE stop complaining about unused arguments."""
53
+ _, _ = args, kwrgs
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/docs/conf.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The ML Collections Authors.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Configuration file for the Sphinx documentation builder.
16
+ #
17
+ # This file only contains a selection of the most common options. For a full
18
+ # list see the documentation:
19
+ # https://www.sphinx-doc.org/en/master/usage/configuration.html
20
+
21
+ # -- Path setup --------------------------------------------------------------
22
+
23
+ # If extensions (or modules to document with autodoc) are in another directory,
24
+ # add these directories to sys.path here. If the directory is relative to the
25
+ # documentation root, use os.path.abspath to make it absolute, like shown here.
26
+ #
27
+ # import os
28
+ # import sys
29
+ # sys.path.insert(0, os.path.abspath('.'))
30
+
31
+ import os
32
+ import sys
33
+ sys.path.insert(0, os.path.abspath('..'))
34
+
35
+ # -- Project information -----------------------------------------------------
36
+
37
+ project = 'ml_collections'
38
+ copyright = '2020, The ML Collection Authors'
39
+ author = 'The ML Collection Authors'
40
+
41
+ # The full version, including alpha/beta/rc tags
42
+ release = '0.1.0'
43
+
44
+
45
+ # -- General configuration ---------------------------------------------------
46
+
47
+ # Add any Sphinx extension module names here, as strings. They can be
48
+ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
49
+ # ones.
50
+ extensions = [
51
+ 'sphinx.ext.autodoc',
52
+ 'sphinx.ext.autosummary',
53
+ 'sphinx.ext.intersphinx',
54
+ 'sphinx.ext.mathjax',
55
+ 'sphinx.ext.napoleon',
56
+ 'sphinx.ext.viewcode',
57
+ 'nbsphinx',
58
+ 'recommonmark',
59
+ ]
60
+
61
+ # Add any paths that contain templates here, relative to this directory.
62
+ templates_path = ['_templates']
63
+
64
+ # List of patterns, relative to source directory, that match files and
65
+ # directories to ignore when looking for source files.
66
+ # This pattern also affects html_static_path and html_extra_path.
67
+ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
68
+
69
+ autosummary_generate = True
70
+
71
+ master_doc = 'index'
72
+
73
+ # -- Options for HTML output -------------------------------------------------
74
+
75
+ # The theme to use for HTML and HTML Help pages. See the documentation for
76
+ # a list of builtin themes.
77
+ #
78
+ html_theme = 'sphinx_rtd_theme'
79
+
80
+ # Add any paths that contain custom static files (such as style sheets) here,
81
+ # relative to this directory. They are copied after the builtin static files,
82
+ # so a file named "default.css" will overwrite the builtin "default.css".
83
+ html_static_path = ['_static']
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Phil Wang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/ema_pytorch-0.0.8.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/METADATA ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: filelock
3
+ Version: 3.13.1
4
+ Summary: A platform independent file lock.
5
+ Project-URL: Documentation, https://py-filelock.readthedocs.io
6
+ Project-URL: Homepage, https://github.com/tox-dev/py-filelock
7
+ Project-URL: Source, https://github.com/tox-dev/py-filelock
8
+ Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues
9
+ Maintainer-email: Bernát Gábor <gaborjbernat@gmail.com>
10
+ License-Expression: Unlicense
11
+ License-File: LICENSE
12
+ Keywords: application,cache,directory,log,user
13
+ Classifier: Development Status :: 5 - Production/Stable
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: The Unlicense (Unlicense)
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3 :: Only
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Topic :: Internet
25
+ Classifier: Topic :: Software Development :: Libraries
26
+ Classifier: Topic :: System
27
+ Requires-Python: >=3.8
28
+ Provides-Extra: docs
29
+ Requires-Dist: furo>=2023.9.10; extra == 'docs'
30
+ Requires-Dist: sphinx-autodoc-typehints!=1.23.4,>=1.24; extra == 'docs'
31
+ Requires-Dist: sphinx>=7.2.6; extra == 'docs'
32
+ Provides-Extra: testing
33
+ Requires-Dist: covdefaults>=2.3; extra == 'testing'
34
+ Requires-Dist: coverage>=7.3.2; extra == 'testing'
35
+ Requires-Dist: diff-cover>=8; extra == 'testing'
36
+ Requires-Dist: pytest-cov>=4.1; extra == 'testing'
37
+ Requires-Dist: pytest-mock>=3.12; extra == 'testing'
38
+ Requires-Dist: pytest-timeout>=2.2; extra == 'testing'
39
+ Requires-Dist: pytest>=7.4.3; extra == 'testing'
40
+ Provides-Extra: typing
41
+ Requires-Dist: typing-extensions>=4.8; python_version < '3.11' and extra == 'typing'
42
+ Description-Content-Type: text/markdown
43
+
44
+ # filelock
45
+
46
+ [![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/)
47
+ [![Supported Python
48
+ versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/)
49
+ [![Documentation
50
+ status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest)
51
+ [![Code style:
52
+ black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
53
+ [![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock)
54
+ [![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yml)
55
+
56
+ For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html).
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/RECORD ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filelock-3.13.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ filelock-3.13.1.dist-info/METADATA,sha256=gi7LyG-dEuOBZC32wie-OOG0OkPZHABsn9rXvxuQlcA,2784
3
+ filelock-3.13.1.dist-info/RECORD,,
4
+ filelock-3.13.1.dist-info/WHEEL,sha256=9QBuHhg6FNW7lppboF2vKVbCGTVzsFykgRQjjlajrhA,87
5
+ filelock-3.13.1.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210
6
+ filelock/__init__.py,sha256=wAVZ_9_-3Y14xzzupRk5BTTRewFJekR2vf9oIx4M750,1213
7
+ filelock/__pycache__/__init__.cpython-38.pyc,,
8
+ filelock/__pycache__/_api.cpython-38.pyc,,
9
+ filelock/__pycache__/_error.cpython-38.pyc,,
10
+ filelock/__pycache__/_soft.cpython-38.pyc,,
11
+ filelock/__pycache__/_unix.cpython-38.pyc,,
12
+ filelock/__pycache__/_util.cpython-38.pyc,,
13
+ filelock/__pycache__/_windows.cpython-38.pyc,,
14
+ filelock/__pycache__/version.cpython-38.pyc,,
15
+ filelock/_api.py,sha256=UsVWPEOOgFH1pR_6WMk2b5hWZ7nWhUPT5GZX9WuYaC8,11860
16
+ filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787
17
+ filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711
18
+ filelock/_unix.py,sha256=ViG38PgJsIhT3xaArugvw0TPP6VWoP2VJj7FEIWypkg,2157
19
+ filelock/_util.py,sha256=dBDlIj1dHL_juXX0Qqq6bZtyE53YZTN8GFhtyTV043o,1708
20
+ filelock/_windows.py,sha256=eMKL8dZKrgekf5VYVGR14an29JGEInRtUO8ui9ABywg,2177
21
+ filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
+ filelock/version.py,sha256=fmajg3X8ZdOn-UpUewARwK5cfYf4wP4Xa0DcHjigFYo,413
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/filelock-3.13.1.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.18.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/LICENSE ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2008, Michael Elsdörfer <http://elsdoerfer.name>
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions
6
+ are met:
7
+
8
+ 1. Redistributions of source code must retain the above copyright
9
+ notice, this list of conditions and the following disclaimer.
10
+
11
+ 2. Redistributions in binary form must reproduce the above
12
+ copyright notice, this list of conditions and the following
13
+ disclaimer in the documentation and/or other materials
14
+ provided with the distribution.
15
+
16
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
19
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
20
+ COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
21
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22
+ BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23
+ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
24
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
+ LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
26
+ ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27
+ POSSIBILITY OF SUCH DAMAGE.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/METADATA ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: glob2
3
+ Version: 0.7
4
+ Summary: Version of the glob module that can capture patterns and supports recursive wildcards
5
+ Home-page: http://github.com/miracle2k/python-glob2/
6
+ Author: Michael Elsdoerfer
7
+ Author-email: michael@elsdoerfer.com
8
+ License: BSD
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: BSD License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Topic :: Software Development :: Libraries
16
+ License-File: LICENSE
17
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/RECORD ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ glob2-0.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ glob2-0.7.dist-info/LICENSE,sha256=mfyVVvpQ7TUZmWlxvy7Bq_n0tj8PP07RXN7oTjBhNLc,1359
3
+ glob2-0.7.dist-info/METADATA,sha256=x0sgtwGrFtBnmOZD6uGMbiiNmWkWDQzaqR1OIoQlv2g,627
4
+ glob2-0.7.dist-info/RECORD,,
5
+ glob2-0.7.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ glob2-0.7.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
7
+ glob2-0.7.dist-info/top_level.txt,sha256=LmVNT8jZb84cpRTpWn9CBoPunhq4nawIWlwXjJi2s68,6
8
+ glob2/__init__.py,sha256=SWkdfrrElFQVNPEPy3YCNaK0Nb95Wcyb-4XADiQskqA,82
9
+ glob2/__pycache__/__init__.cpython-38.pyc,,
10
+ glob2/__pycache__/compat.cpython-38.pyc,,
11
+ glob2/__pycache__/fnmatch.cpython-38.pyc,,
12
+ glob2/__pycache__/impl.cpython-38.pyc,,
13
+ glob2/compat.py,sha256=jRLW2AMBM4OATSUdfE3D6tpvf8Oexwiw2c0r4_npU6c,6859
14
+ glob2/fnmatch.py,sha256=6wv-SO-Sm9MG9w95IM5wr3Zt-U_vFFHBNtjhIzO1RH0,4463
15
+ glob2/impl.py,sha256=4paYLj3fVdJ4wR--iRUW0fUzDXYlkTSTXRV3uJEYc9Q,8304
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.36.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/glob2-0.7.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ glob2
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2014-2022, imageio developers
2
+ All rights reserved.
3
+
4
+ Redistribution and use in source and binary forms, with or without
5
+ modification, are permitted provided that the following conditions are met:
6
+
7
+ * Redistributions of source code must retain the above copyright notice, this
8
+ list of conditions and the following disclaimer.
9
+
10
+ * Redistributions in binary form must reproduce the above copyright notice,
11
+ this list of conditions and the following disclaimer in the documentation
12
+ and/or other materials provided with the distribution.
13
+
14
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
15
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
18
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
20
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
21
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
22
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/RECORD ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/imageio_download_bin,sha256=UUAW50PLPWt4jQrKugwzR1q7LYxViUNpQHNZctfXnss,259
2
+ ../../../bin/imageio_remove_bin,sha256=wyw25HOufznctDRLJjogj37UhB9JWZYyuKUNOI9jlgk,255
3
+ imageio-2.19.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
4
+ imageio-2.19.3.dist-info/LICENSE,sha256=rlmepQpJTvtyXkIKqzXR91kgDP5BhrbGSjC6Sds_0GQ,1307
5
+ imageio-2.19.3.dist-info/METADATA,sha256=yRnqA9nRW7j1uNZjdZuAkyxS-7avpekBmLfaVybb20k,4930
6
+ imageio-2.19.3.dist-info/RECORD,,
7
+ imageio-2.19.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ imageio-2.19.3.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
9
+ imageio-2.19.3.dist-info/entry_points.txt,sha256=wIv0WLZA9V-h0NF4ozbsQHo8Ym9-tj4lfOG6J9Pv13c,131
10
+ imageio-2.19.3.dist-info/top_level.txt,sha256=iSUjc-wEw-xbMTvMOSKg85n0-E7Ms--Mo4FLMC-J2YM,8
11
+ imageio/__init__.py,sha256=8IKfa2UXtNJmE8u7BY0huDJ91_8wqKwLMrwxXqP8uWA,3272
12
+ imageio/__main__.py,sha256=s5nidb9wRZ6AbimHTPHULt3sTXPx4mqNil67KJHZvd4,5393
13
+ imageio/__pycache__/__init__.cpython-38.pyc,,
14
+ imageio/__pycache__/__main__.cpython-38.pyc,,
15
+ imageio/__pycache__/freeze.cpython-38.pyc,,
16
+ imageio/__pycache__/testing.cpython-38.pyc,,
17
+ imageio/__pycache__/typing.cpython-38.pyc,,
18
+ imageio/__pycache__/v2.cpython-38.pyc,,
19
+ imageio/__pycache__/v3.cpython-38.pyc,,
20
+ imageio/config/__init__.py,sha256=8NOpL5ePrkiioJb9hRBw3rydc4iNZkMwp7VdQlP4jDc,307
21
+ imageio/config/__pycache__/__init__.cpython-38.pyc,,
22
+ imageio/config/__pycache__/extensions.cpython-38.pyc,,
23
+ imageio/config/__pycache__/plugins.cpython-38.pyc,,
24
+ imageio/config/extensions.py,sha256=GdmyD2XXj--NXurv07wb9K93tJt5YxNU9YQfMJRW9sE,44975
25
+ imageio/config/plugins.py,sha256=QRD4jIKRyGVn1QDBKAtECbqFwf6YfAGO_4xID78O2lk,20181
26
+ imageio/core/__init__.py,sha256=PSkGH8K76ntSWhwM4j7W49UmCSZf_OGaSl9fNbQP7uQ,639
27
+ imageio/core/__pycache__/__init__.cpython-38.pyc,,
28
+ imageio/core/__pycache__/fetching.cpython-38.pyc,,
29
+ imageio/core/__pycache__/findlib.cpython-38.pyc,,
30
+ imageio/core/__pycache__/format.cpython-38.pyc,,
31
+ imageio/core/__pycache__/imopen.cpython-38.pyc,,
32
+ imageio/core/__pycache__/legacy_plugin_wrapper.cpython-38.pyc,,
33
+ imageio/core/__pycache__/request.cpython-38.pyc,,
34
+ imageio/core/__pycache__/util.cpython-38.pyc,,
35
+ imageio/core/__pycache__/v3_plugin_api.cpython-38.pyc,,
36
+ imageio/core/fetching.py,sha256=r81yBsJMqkwAXeVAuQuAzbk9etWxQUEUe4__UUjpQpc,9176
37
+ imageio/core/findlib.py,sha256=Zrhs0rEyp8p8iSIuCoBco0dCaB5dxJVZ4lRgv82Sqm0,5552
38
+ imageio/core/format.py,sha256=P8juRQqIRO1sPInRV9F7LpBNzKv6kGbLVFBi-XLsBCI,29975
39
+ imageio/core/imopen.py,sha256=vWyQyEn65JuvlUxy0898O_r37TqjB3H_PRyTEvJkhEk,10717
40
+ imageio/core/legacy_plugin_wrapper.py,sha256=FogsZ5wUltLxBDGyILY4WIEe3OgfAaboTHdlDUgZjwQ,10798
41
+ imageio/core/request.py,sha256=t1BTuwhqDBu2xGfmEuhQtpkLy9TvW506-1P8KCE5Eko,26762
42
+ imageio/core/util.py,sha256=3-TvMyWV6c67j6FnOztPaTVfoFjHn5z_mKb4IXjFUQM,18655
43
+ imageio/core/v3_plugin_api.py,sha256=lycFyzUj2DUVuGJZD34kDu1RNyeWYoxdf1irupv09eo,15427
44
+ imageio/freeze.py,sha256=hi9MNZz-ridgQBWcAqnd92sULek2lgmBSTmuott5lus,170
45
+ imageio/plugins/__init__.py,sha256=e1-9CjZ5HRnirnY_iBT26xXxDo4hfDmavOdiwUgDzUA,4289
46
+ imageio/plugins/__pycache__/__init__.cpython-38.pyc,,
47
+ imageio/plugins/__pycache__/_bsdf.cpython-38.pyc,,
48
+ imageio/plugins/__pycache__/_dicom.cpython-38.pyc,,
49
+ imageio/plugins/__pycache__/_freeimage.cpython-38.pyc,,
50
+ imageio/plugins/__pycache__/_swf.cpython-38.pyc,,
51
+ imageio/plugins/__pycache__/_tifffile.cpython-38.pyc,,
52
+ imageio/plugins/__pycache__/bsdf.cpython-38.pyc,,
53
+ imageio/plugins/__pycache__/dicom.cpython-38.pyc,,
54
+ imageio/plugins/__pycache__/example.cpython-38.pyc,,
55
+ imageio/plugins/__pycache__/feisem.cpython-38.pyc,,
56
+ imageio/plugins/__pycache__/ffmpeg.cpython-38.pyc,,
57
+ imageio/plugins/__pycache__/fits.cpython-38.pyc,,
58
+ imageio/plugins/__pycache__/freeimage.cpython-38.pyc,,
59
+ imageio/plugins/__pycache__/freeimagemulti.cpython-38.pyc,,
60
+ imageio/plugins/__pycache__/gdal.cpython-38.pyc,,
61
+ imageio/plugins/__pycache__/grab.cpython-38.pyc,,
62
+ imageio/plugins/__pycache__/lytro.cpython-38.pyc,,
63
+ imageio/plugins/__pycache__/npz.cpython-38.pyc,,
64
+ imageio/plugins/__pycache__/opencv.cpython-38.pyc,,
65
+ imageio/plugins/__pycache__/pillow.cpython-38.pyc,,
66
+ imageio/plugins/__pycache__/pillow_info.cpython-38.pyc,,
67
+ imageio/plugins/__pycache__/pillow_legacy.cpython-38.pyc,,
68
+ imageio/plugins/__pycache__/pillowmulti.cpython-38.pyc,,
69
+ imageio/plugins/__pycache__/pyav.cpython-38.pyc,,
70
+ imageio/plugins/__pycache__/simpleitk.cpython-38.pyc,,
71
+ imageio/plugins/__pycache__/spe.cpython-38.pyc,,
72
+ imageio/plugins/__pycache__/swf.cpython-38.pyc,,
73
+ imageio/plugins/__pycache__/tifffile.cpython-38.pyc,,
74
+ imageio/plugins/_bsdf.py,sha256=F2of0kjrhnGVWbrrgI7lkNJbAvfCrxF3jSu0GWZb_lQ,32757
75
+ imageio/plugins/_dicom.py,sha256=-IrYSnUNgeRvvm_lXtZbwteFiVWJ6UXKorzX94JEX9o,33859
76
+ imageio/plugins/_freeimage.py,sha256=7BmrZZoC_CbLjnULnfmUnSIXJQz2yKDF9tmWMQG5QQo,51755
77
+ imageio/plugins/_swf.py,sha256=q-QR-_8itteClrh94aVyjk5f5bvveElwUGrS9BRzLKM,25763
78
+ imageio/plugins/_tifffile.py,sha256=mQkxDuW_ir0Mg2rknFskoN5AcYLSfAJhcFotU0u9fs4,371589
79
+ imageio/plugins/bsdf.py,sha256=yZNfHwGlVnFWiQsSMy8QZocUsasUYHQUhIDRZEvnKCM,12854
80
+ imageio/plugins/dicom.py,sha256=iSb1QgDIZjbaQRJLtVD51Zry5GamZ02YRscerzBihng,11873
81
+ imageio/plugins/example.py,sha256=5nNT3f7O0M9GkBzEx6Jy0Vf2pYEP4V7j6vYCzKgBhos,5695
82
+ imageio/plugins/feisem.py,sha256=AKwZv7Zac0_grnr-wnzU7R0Zf2KSUe91k06evPa1NI8,3360
83
+ imageio/plugins/ffmpeg.py,sha256=0psArn9N58SZ-keIwyE4mb23qrvealGsu6M9UIo_-CI,29120
84
+ imageio/plugins/fits.py,sha256=XnlmeC79sIiIPd_7IDx05-p3-b2unO4CVR0nWAA4ph0,4531
85
+ imageio/plugins/freeimage.py,sha256=g7EDxJJrm_gM3ESIV0eBQWIuCHJA4ZdT5vFUn2K8_Yk,14646
86
+ imageio/plugins/freeimagemulti.py,sha256=hQjH18oGR5VaBw6vyrVz0MIH2nu9LbiithFYxPynfhA,11422
87
+ imageio/plugins/gdal.py,sha256=r2Ux7MQeHCUsmdk0aGENzGX8M5hCBU7NJomcf6G8FCU,1653
88
+ imageio/plugins/grab.py,sha256=wnDY-ly32gjY2ypQzlN_djBh4BC6cUFH5t9jA4LA65Q,2906
89
+ imageio/plugins/lytro.py,sha256=UQfVTsuTOpa0zx2v7KcyxaVBLTFdfiT9PqmAvNOYJeQ,25542
90
+ imageio/plugins/npz.py,sha256=7ZQr-4lQEKbfjaF6rOmpq9pQgDTUHvkZa_NHZkJWBQo,2670
91
+ imageio/plugins/opencv.py,sha256=VubaalArEUbO2Lt_zeNvh8DfeygBbI63CwZ8OHc_UU4,11109
92
+ imageio/plugins/pillow.py,sha256=l994prHdtMinPaduMhJSfibaysSLFq5hU7ErmeiMdmg,16100
93
+ imageio/plugins/pillow_info.py,sha256=Bt5iJtQnAh6mGViPIxhxRQPNidqay9-6BleAJZkhN1w,36624
94
+ imageio/plugins/pillow_legacy.py,sha256=zHQaXh2n-QYI8omk_Se7zKDK49hmDZQ62B8UU_RDfM8,31863
95
+ imageio/plugins/pillowmulti.py,sha256=IveZ5_QRAHMPdP4weiRa7zlkg7LpOyv7o3IgcYgwgO8,11296
96
+ imageio/plugins/pyav.py,sha256=bANtErft2UhHYpCwgOiMOMit5H8lEiXMfLqNVjf-w5c,36394
97
+ imageio/plugins/simpleitk.py,sha256=ldQWjkiCSZPoUnN87MtUqRIMMcIKmk8ZUeyDCQhnpG0,4107
98
+ imageio/plugins/spe.py,sha256=sg9dkZLUOS7tuVS-g5QopiLbBB_5bE_HwzrfvYb6uQc,24792
99
+ imageio/plugins/swf.py,sha256=QreAl1pdTVRsC8iD8x4pPS1C6LzklSXQ4RexafQupP8,11876
100
+ imageio/plugins/tifffile.py,sha256=jYj0JCjkNr0z8Ijelv1nCPN7zRkf9Jr37uR1GB3xO2U,19771
101
+ imageio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
102
+ imageio/resources/images/astronaut.png,sha256=iEMc2WU8zVOXQbVV-wpGthVYswHUEQQStbwotePqbLU,791555
103
+ imageio/resources/images/chelsea.png,sha256=l0A8nBcdGu3SAmx1FeICCO-GXqq1bUYsPC7vrem313k,221294
104
+ imageio/resources/images/chelsea.zip,sha256=ieIbNItsviHa0hRghW_MBOgCXdnr1Sp7MvC_vXEDGJo,221318
105
+ imageio/resources/images/cockatoo.mp4,sha256=X9419aKIyobiFtLcKBiKtktFYNMCHyc_rv3w3oDziqU,728751
106
+ imageio/resources/images/newtonscradle.gif,sha256=pmPE4Ha1xI4KrFjHd30rsxk8swU8CY0I2ieKYtAv8xQ,583374
107
+ imageio/resources/images/realshort.mp4,sha256=qLNcLCEwRTueoRcq1K9orAJ7wkg-8FRXaWhHIhJ7_hg,96822
108
+ imageio/resources/images/stent.npz,sha256=YKg9Ipa1HualMVPpupa6kCA5GwyJUoldnWCgpimsa7Y,824612
109
+ imageio/resources/shipped_resources_go_here,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
110
+ imageio/testing.py,sha256=tkRPxZZpG68q_MAIux8WE8QeKbhbq6rDPVfCDsof1Ms,1597
111
+ imageio/typing.py,sha256=GiWD3Muonws8wZv3SDsuP_5s6eZtYHouEAshCo-5bW0,342
112
+ imageio/v2.py,sha256=OIvZXgso8d0GSynaF0Mra_j2BbOdqtzCErMoW9u13Qc,17232
113
+ imageio/v3.py,sha256=iG2UEJkePrkqp3jr_6EGktj7WFGuddcHuld__M-5iyE,10217
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/imageio-2.19.3.dist-info/entry_points.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [console_scripts]
2
+ imageio_download_bin = imageio.__main__:download_bin_main
3
+ imageio_remove_bin = imageio.__main__:remove_bin_main
4
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) MONAI Consortium
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ # Unless required by applicable law or agreed to in writing, software
7
+ # distributed under the License is distributed on an "AS IS" BASIS,
8
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ # See the License for the specific language governing permissions and
10
+ # limitations under the License.
11
+
12
+ import os
13
+ import sys
14
+
15
+ from ._version import get_versions
16
+
17
+ PY_REQUIRED_MAJOR = 3
18
+ PY_REQUIRED_MINOR = 7
19
+
20
+ version_dict = get_versions()
21
+ __version__: str = version_dict.get("version", "0+unknown")
22
+ __revision_id__: str = version_dict.get("full-revisionid")
23
+ del get_versions, version_dict
24
+
25
+ __copyright__ = "(c) MONAI Consortium"
26
+
27
+ __basedir__ = os.path.dirname(__file__)
28
+
29
+ if sys.version_info.major != PY_REQUIRED_MAJOR or sys.version_info.minor < PY_REQUIRED_MINOR:
30
+ import warnings
31
+
32
+ warnings.warn(
33
+ f"MONAI requires Python {PY_REQUIRED_MAJOR}.{PY_REQUIRED_MINOR} or higher. "
34
+ f"But the current Python is: {sys.version}",
35
+ category=RuntimeWarning,
36
+ )
37
+
38
+ from .utils.module import load_submodules # noqa: E402
39
+
40
+ # handlers_* have some external decorators the users may not have installed
41
+ # *.so files and folder "_C" may not exist when the cpp extensions are not compiled
42
+ excludes = "(^(monai.handlers))|(^(monai.bundle))|((\\.so)$)|(^(monai._C))"
43
+
44
+ # load directory modules only, skip loading individual files
45
+ load_submodules(sys.modules[__name__], False, exclude_pattern=excludes)
46
+
47
+ # load all modules, this will trigger all export decorations
48
+ load_submodules(sys.modules[__name__], True, exclude_pattern=excludes)
49
+
50
+ __all__ = [
51
+ "apps",
52
+ "bundle",
53
+ "config",
54
+ "data",
55
+ "engines",
56
+ "handlers",
57
+ "inferers",
58
+ "losses",
59
+ "metrics",
60
+ "networks",
61
+ "optimizers",
62
+ "transforms",
63
+ "utils",
64
+ "visualize",
65
+ ]
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/_version.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # This file was generated by 'versioneer.py' (0.19) from
3
+ # revision-control system data, or from the parent directory name of an
4
+ # unpacked source archive. Distribution tarballs contain a pre-generated copy
5
+ # of this file.
6
+
7
+ import json
8
+
9
+ version_json = '''
10
+ {
11
+ "date": "2022-06-13T15:14:10+0000",
12
+ "dirty": false,
13
+ "error": null,
14
+ "full-revisionid": "af0e0e9f757558d144b655c63afcea3a4e0a06f5",
15
+ "version": "0.9.0"
16
+ }
17
+ ''' # END VERSION_JSON
18
+
19
+
20
+ def get_versions():
21
+ return json.loads(version_json)
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/monai/py.typed ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/LICENSE ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2016-2021 Andrew Svetlov and aio-libs team
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/METADATA ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: multidict
3
+ Version: 6.0.2
4
+ Summary: multidict implementation
5
+ Home-page: https://github.com/aio-libs/multidict
6
+ Author: Andrew Svetlov
7
+ Author-email: andrew.svetlov@gmail.com
8
+ License: Apache 2
9
+ Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
10
+ Project-URL: CI: GitHub, https://github.com/aio-libs/multidict/actions
11
+ Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict
12
+ Project-URL: Docs: RTD, https://multidict.readthedocs.io
13
+ Project-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues
14
+ Project-URL: GitHub: repo, https://github.com/aio-libs/multidict
15
+ Platform: UNKNOWN
16
+ Classifier: License :: OSI Approved :: Apache Software License
17
+ Classifier: Intended Audience :: Developers
18
+ Classifier: Programming Language :: Python
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.7
21
+ Classifier: Programming Language :: Python :: 3.8
22
+ Classifier: Programming Language :: Python :: 3.9
23
+ Classifier: Programming Language :: Python :: 3.10
24
+ Classifier: Development Status :: 5 - Production/Stable
25
+ Requires-Python: >=3.7
26
+ License-File: LICENSE
27
+
28
+ =========
29
+ multidict
30
+ =========
31
+
32
+ .. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg
33
+ :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI
34
+ :alt: GitHub status for master branch
35
+
36
+ .. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg
37
+ :target: https://codecov.io/gh/aio-libs/multidict
38
+ :alt: Coverage metrics
39
+
40
+ .. image:: https://img.shields.io/pypi/v/multidict.svg
41
+ :target: https://pypi.org/project/multidict
42
+ :alt: PyPI
43
+
44
+ .. image:: https://readthedocs.org/projects/multidict/badge/?version=latest
45
+ :target: http://multidict.readthedocs.org/en/latest/?badge=latest
46
+ :alt: Documentationb
47
+
48
+ .. image:: https://img.shields.io/pypi/pyversions/multidict.svg
49
+ :target: https://pypi.org/project/multidict
50
+ :alt: Python versions
51
+
52
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
53
+ :target: https://gitter.im/aio-libs/Lobby
54
+ :alt: Chat on Gitter
55
+
56
+ Multidict is dict-like collection of *key-value pairs* where key
57
+ might be occurred more than once in the container.
58
+
59
+ Introduction
60
+ ------------
61
+
62
+ *HTTP Headers* and *URL query string* require specific data structure:
63
+ *multidict*. It behaves mostly like a regular ``dict`` but it may have
64
+ several *values* for the same *key* and *preserves insertion ordering*.
65
+
66
+ The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).
67
+
68
+ ``multidict`` has four multidict classes:
69
+ ``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``
70
+ and ``CIMultiDictProxy``.
71
+
72
+ Immutable proxies (``MultiDictProxy`` and
73
+ ``CIMultiDictProxy``) provide a dynamic view for the
74
+ proxied multidict, the view reflects underlying collection changes. They
75
+ implement the ``collections.abc.Mapping`` interface.
76
+
77
+ Regular mutable (``MultiDict`` and ``CIMultiDict``) classes
78
+ implement ``collections.abc.MutableMapping`` and allows to change
79
+ their own content.
80
+
81
+
82
+ *Case insensitive* (``CIMultiDict`` and
83
+ ``CIMultiDictProxy``) ones assume the *keys* are case
84
+ insensitive, e.g.::
85
+
86
+ >>> dct = CIMultiDict(key='val')
87
+ >>> 'Key' in dct
88
+ True
89
+ >>> dct['Key']
90
+ 'val'
91
+
92
+ *Keys* should be ``str`` or ``istr`` instances.
93
+
94
+ The library has optional C Extensions for sake of speed.
95
+
96
+
97
+ License
98
+ -------
99
+
100
+ Apache 2
101
+
102
+ Library Installation
103
+ --------------------
104
+
105
+ .. code-block:: bash
106
+
107
+ $ pip install multidict
108
+
109
+ The library is Python 3 only!
110
+
111
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
112
+ ``multidict`` on another operation system (or *Alpine Linux* inside a Docker) the
113
+ Tarball will be used to compile the library from sources. It requires C compiler and
114
+ Python headers installed.
115
+
116
+ To skip the compilation please use `MULTIDICT_NO_EXTENSIONS` environment variable,
117
+ e.g.:
118
+
119
+ .. code-block:: bash
120
+
121
+ $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict
122
+
123
+ Please note, Pure Python (uncompiled) version is about 20-50 times slower depending on
124
+ the usage scenario!!!
125
+
126
+
127
+
128
+ Changelog
129
+ ---------
130
+ See `RTD page <http://multidict.readthedocs.org/en/latest/changes.html>`_.
131
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/RECORD ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ multidict-6.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ multidict-6.0.2.dist-info/LICENSE,sha256=BqJA6hC6ho_aLeWN-FmIaWHfhzqnS7qx4PE-r5n5K3s,608
3
+ multidict-6.0.2.dist-info/METADATA,sha256=iNMvE8dk8rtpvHO0DbdsVPatveZlkoIlBI3YZvw-Q0Y,4106
4
+ multidict-6.0.2.dist-info/RECORD,,
5
+ multidict-6.0.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ multidict-6.0.2.dist-info/WHEEL,sha256=-ijGDuALlPxm3HbhKntps0QzHsi-DPlXqgerYTTJkFE,148
7
+ multidict-6.0.2.dist-info/top_level.txt,sha256=-euDElkk5_qkmfIJ7WiqCab02ZlSFZWynejKg59qZQQ,10
8
+ multidict/__init__.py,sha256=IoPxk53SsLhHykEQC4N5gxZWPZf72KueDKUOqBc7cH0,928
9
+ multidict/__init__.pyi,sha256=jLQkZwqRJYl_MOMGSavmzwzwefTEH_Tjk3oTKV7c6HY,5035
10
+ multidict/__pycache__/__init__.cpython-38.pyc,,
11
+ multidict/__pycache__/_abc.cpython-38.pyc,,
12
+ multidict/__pycache__/_compat.cpython-38.pyc,,
13
+ multidict/__pycache__/_multidict_base.cpython-38.pyc,,
14
+ multidict/__pycache__/_multidict_py.cpython-38.pyc,,
15
+ multidict/_abc.py,sha256=Zvnrn4SBkrv4QTD7-ZzqNcoxw0f8KStLMPzGvBuGT2w,1190
16
+ multidict/_compat.py,sha256=tjUGdP9ooiH6c2KJrvUbPRwcvjWerKlKU6InIviwh7w,316
17
+ multidict/_multidict.cpython-38-x86_64-linux-gnu.so,sha256=ZkT3hCEbGN2vvbWCgI9B8ydrTPoHP4dw1peUHtgqe6A,384936
18
+ multidict/_multidict_base.py,sha256=XugkE78fXBmtzDdg2Yi9TrEhDexmL-6qJbFIG0viLMg,3791
19
+ multidict/_multidict_py.py,sha256=kG9sxY0_E2e3B1qzDmFzgZvZtu8qmEhR5nhnvH4xatc,14864
20
+ multidict/py.typed,sha256=e9bmbH3UFxsabQrnNFPG9qxIXztwbcM6IKDYnvZwprY,15
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: false
4
+ Tag: cp38-cp38-manylinux_2_17_x86_64
5
+ Tag: cp38-cp38-manylinux2014_x86_64
6
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/multidict-6.0.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ multidict
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/__main__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ # Remove '' and current working directory from the first entry
5
+ # of sys.path, if present to avoid using current directory
6
+ # in pip commands check, freeze, install, list and show,
7
+ # when invoked as python -m pip <command>
8
+ if sys.path[0] in ("", os.getcwd()):
9
+ sys.path.pop(0)
10
+
11
+ # If we are running from a wheel, add the wheel to sys.path
12
+ # This allows the usage python pip-*.whl/pip install pip-*.whl
13
+ if __package__ == "":
14
+ # __file__ is pip-*.whl/pip/__main__.py
15
+ # first dirname call strips of '/__main__.py', second strips off '/pip'
16
+ # Resulting path is the name of the wheel itself
17
+ # Add that to sys.path so we can import pip
18
+ path = os.path.dirname(os.path.dirname(__file__))
19
+ sys.path.insert(0, path)
20
+
21
+ if __name__ == "__main__":
22
+ from pip._internal.cli.main import main as _main
23
+
24
+ sys.exit(_main())
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/pip/py.typed ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ pip is a command line program. While it is implemented in Python, and so is
2
+ available for import, you must not use pip's internal APIs in this way. Typing
3
+ information is provided as a convenience only and is not a guarantee. Expect
4
+ unannounced changes to the API and types in releases.
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/METADATA ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: protobuf
3
+ Version: 3.20.1
4
+ Summary: Protocol Buffers
5
+ Home-page: https://developers.google.com/protocol-buffers/
6
+ Download-URL: https://github.com/protocolbuffers/protobuf/releases
7
+ Maintainer: protobuf@googlegroups.com
8
+ Maintainer-email: protobuf@googlegroups.com
9
+ License: BSD-3-Clause
10
+ Platform: UNKNOWN
11
+ Classifier: Programming Language :: Python
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.7
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Requires-Python: >=3.7
18
+
19
+ Protocol Buffers are Google's data interchange format
20
+
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/RECORD ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ google/protobuf/__init__.py,sha256=QmlumOi-Q0lfJS91GeA8GTtpEnUb2YRcta57NBc9-fc,1705
2
+ google/protobuf/__pycache__/__init__.cpython-38.pyc,,
3
+ google/protobuf/__pycache__/any_pb2.cpython-38.pyc,,
4
+ google/protobuf/__pycache__/api_pb2.cpython-38.pyc,,
5
+ google/protobuf/__pycache__/descriptor.cpython-38.pyc,,
6
+ google/protobuf/__pycache__/descriptor_database.cpython-38.pyc,,
7
+ google/protobuf/__pycache__/descriptor_pb2.cpython-38.pyc,,
8
+ google/protobuf/__pycache__/descriptor_pool.cpython-38.pyc,,
9
+ google/protobuf/__pycache__/duration_pb2.cpython-38.pyc,,
10
+ google/protobuf/__pycache__/empty_pb2.cpython-38.pyc,,
11
+ google/protobuf/__pycache__/field_mask_pb2.cpython-38.pyc,,
12
+ google/protobuf/__pycache__/json_format.cpython-38.pyc,,
13
+ google/protobuf/__pycache__/message.cpython-38.pyc,,
14
+ google/protobuf/__pycache__/message_factory.cpython-38.pyc,,
15
+ google/protobuf/__pycache__/proto_builder.cpython-38.pyc,,
16
+ google/protobuf/__pycache__/reflection.cpython-38.pyc,,
17
+ google/protobuf/__pycache__/service.cpython-38.pyc,,
18
+ google/protobuf/__pycache__/service_reflection.cpython-38.pyc,,
19
+ google/protobuf/__pycache__/source_context_pb2.cpython-38.pyc,,
20
+ google/protobuf/__pycache__/struct_pb2.cpython-38.pyc,,
21
+ google/protobuf/__pycache__/symbol_database.cpython-38.pyc,,
22
+ google/protobuf/__pycache__/text_encoding.cpython-38.pyc,,
23
+ google/protobuf/__pycache__/text_format.cpython-38.pyc,,
24
+ google/protobuf/__pycache__/timestamp_pb2.cpython-38.pyc,,
25
+ google/protobuf/__pycache__/type_pb2.cpython-38.pyc,,
26
+ google/protobuf/__pycache__/wrappers_pb2.cpython-38.pyc,,
27
+ google/protobuf/any_pb2.py,sha256=TdTaU8MPj7tqjilhMbIK8m3AIP7Yvd08R2LoXojwYaE,1355
28
+ google/protobuf/api_pb2.py,sha256=PMh7xH6vsLCW-y1f_A_0Qnx3PtSx-g2UsS4AIswXrcM,2539
29
+ google/protobuf/compiler/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
+ google/protobuf/compiler/__pycache__/__init__.cpython-38.pyc,,
31
+ google/protobuf/compiler/__pycache__/plugin_pb2.cpython-38.pyc,,
32
+ google/protobuf/compiler/plugin_pb2.py,sha256=Bv73ahQkWnOx9XH8YF5TrrzSPjksbqelnDTl63q17v0,2740
33
+ google/protobuf/descriptor.py,sha256=DiDxSej4W4dt3Y_bvv0uCa9YwdCaMCe-WYFigii3VaA,46474
34
+ google/protobuf/descriptor_database.py,sha256=2hBUBbzWjTdyq0nLZ9HYKbqhMpouzZVk9srurERnLVo,6819
35
+ google/protobuf/descriptor_pb2.py,sha256=o5c8FFMBHDxryibe_JCEYO5xi4AAm_Te4xZeWlJ8hlI,109072
36
+ google/protobuf/descriptor_pool.py,sha256=yHiZzzFTuh_LGp-WNHzGe4MVDpThNI3mtjV1bpkSAoY,47281
37
+ google/protobuf/duration_pb2.py,sha256=KmfAu5bQ4GhoeqH06nJ7tjRbtov3b0ktUHohhNIl2p0,1430
38
+ google/protobuf/empty_pb2.py,sha256=d6CTe50gpFNlRuXXyL6R1PU8WuLg8qqLsye7tElunFU,1319
39
+ google/protobuf/field_mask_pb2.py,sha256=nNXqeAZhmPOsez6D7V5eA9VQICbB5mXNe1um1jmH-tA,1401
40
+ google/protobuf/internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
+ google/protobuf/internal/__pycache__/__init__.cpython-38.pyc,,
42
+ google/protobuf/internal/__pycache__/api_implementation.cpython-38.pyc,,
43
+ google/protobuf/internal/__pycache__/builder.cpython-38.pyc,,
44
+ google/protobuf/internal/__pycache__/containers.cpython-38.pyc,,
45
+ google/protobuf/internal/__pycache__/decoder.cpython-38.pyc,,
46
+ google/protobuf/internal/__pycache__/encoder.cpython-38.pyc,,
47
+ google/protobuf/internal/__pycache__/enum_type_wrapper.cpython-38.pyc,,
48
+ google/protobuf/internal/__pycache__/extension_dict.cpython-38.pyc,,
49
+ google/protobuf/internal/__pycache__/message_listener.cpython-38.pyc,,
50
+ google/protobuf/internal/__pycache__/python_message.cpython-38.pyc,,
51
+ google/protobuf/internal/__pycache__/type_checkers.cpython-38.pyc,,
52
+ google/protobuf/internal/__pycache__/well_known_types.cpython-38.pyc,,
53
+ google/protobuf/internal/__pycache__/wire_format.cpython-38.pyc,,
54
+ google/protobuf/internal/_api_implementation.cpython-38-x86_64-linux-gnu.so,sha256=ZVZ-zAKr-zEzoo9ICWeshNyERCjUH9o5eeQSgncSmtk,5504
55
+ google/protobuf/internal/api_implementation.py,sha256=rma5XlGOY6x35S55AS5bSOv0vq_211gjnL4Q9X74lpY,4562
56
+ google/protobuf/internal/builder.py,sha256=wtugRgYbIMeo4txvGUlfFLD8nKZEDCxH3lkRtyVndbY,5188
57
+ google/protobuf/internal/containers.py,sha256=RH6NkwSCLzQ5qTgsvM04jkRjgCDNHFRWZyfSCvvv_rk,23328
58
+ google/protobuf/internal/decoder.py,sha256=XDqpaEzqavV4Ka7jx2jonxCEyuKClxzbWPS2M4OTe0I,37567
59
+ google/protobuf/internal/encoder.py,sha256=6hXWsTHCB-cumgbAMi5Z3JIxab8E5LD9p_iPS2HohiA,28656
60
+ google/protobuf/internal/enum_type_wrapper.py,sha256=PKWYYZRexjkl4KrMnGa6Csq2xbKFXoqsWbwYHvJ0yiM,4821
61
+ google/protobuf/internal/extension_dict.py,sha256=3DbWhlrpGybuur1bjfGKhx2d8IVo7tVQUEcF8tPLTyo,8443
62
+ google/protobuf/internal/message_listener.py,sha256=Qwc5gkifAvWzhm3b0v-nXJkozNTgL-L92XAslngFaow,3367
63
+ google/protobuf/internal/python_message.py,sha256=MEDGdNsrBo8OKk92s87J9qjJCQN_lkZCJHJXaA1th8U,58146
64
+ google/protobuf/internal/type_checkers.py,sha256=a3o2y-S9XSFEiPUau5faEz2fu2OIxYhTM9ZGiLPCXlM,16912
65
+ google/protobuf/internal/well_known_types.py,sha256=yLtyfrZ3svShTNgMW-U0TLt77pHsewi6xILDgabd-BY,30014
66
+ google/protobuf/internal/wire_format.py,sha256=7Wz8gV7QOvoTzLMWrwlWSg7hIJ_T8Pm1w8_WLhpieVw,8444
67
+ google/protobuf/json_format.py,sha256=egKnvgSRn62HI6UMWw-COTPfheBFERSqMNixp2iJZF0,35664
68
+ google/protobuf/message.py,sha256=Gyj0Yb6eWiI47QO4DnA2W2J0WlDiRVm83FlKfO_Isf8,14523
69
+ google/protobuf/message_factory.py,sha256=LD18eAKZ_tZnDzIUc_gDmrkxuwiYkUh-f-BkfVW7Wko,7482
70
+ google/protobuf/proto_builder.py,sha256=WcEmUDU26k_JSiUzXJ7bgthgR7jlTiOecV1np0zGyA8,5506
71
+ google/protobuf/pyext/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
+ google/protobuf/pyext/__pycache__/__init__.cpython-38.pyc,,
73
+ google/protobuf/pyext/__pycache__/cpp_message.cpython-38.pyc,,
74
+ google/protobuf/pyext/_message.cpython-38-x86_64-linux-gnu.so,sha256=41tJIVp4nvcNndk4MDiDldwaRpAGBCEPNAiToQLjXg4,2478152
75
+ google/protobuf/pyext/cpp_message.py,sha256=D0-bxtjf1Ri8b0GubL5xgkkEB_z_mIf847yrRvVqDBU,2851
76
+ google/protobuf/reflection.py,sha256=f61wP6k-HMShRwLsfRomScGzG0ZpWULpyhYwvjuZMKQ,3779
77
+ google/protobuf/service.py,sha256=MGWgoxTrSlmqWsgXvp1XaP5Sg-_pq8Sw2XJuY1m6MVM,9146
78
+ google/protobuf/service_reflection.py,sha256=5hBr8Q4gTgg3MT4NZoTxRSjTaxzLtNSG-8cXa5nHXaQ,11417
79
+ google/protobuf/source_context_pb2.py,sha256=9sFLqhUhkTHkdKMZCQPQQ3GClbDMtOSlAy4P9LjPEvg,1416
80
+ google/protobuf/struct_pb2.py,sha256=J16zp6HU5P2TyHpmAOzTvPDN_nih9uLg-z18-3bnFp0,2477
81
+ google/protobuf/symbol_database.py,sha256=aCPGE4N2slb6HFB4cHFJDA8zehgMy16XY8BMH_ebfhc,6944
82
+ google/protobuf/text_encoding.py,sha256=IrfncP112lKMLnWhhjXoczxEv2RZ9kzlinzAzHstrlY,4728
83
+ google/protobuf/text_format.py,sha256=6aYyfB-htl2za_waO6LV9JVTPbx5Qj2vf0uE-cZdC6M,60006
84
+ google/protobuf/timestamp_pb2.py,sha256=PTClFsyHjuwKHv4h6Ho1-GcMOfU3Rhd3edANjTQEbJI,1439
85
+ google/protobuf/type_pb2.py,sha256=Iifx3dIukGbRBdYaJPQJADJ-ZcBdjztB1JvplT7EiJo,4425
86
+ google/protobuf/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
87
+ google/protobuf/util/__pycache__/__init__.cpython-38.pyc,,
88
+ google/protobuf/util/__pycache__/json_format_pb2.cpython-38.pyc,,
89
+ google/protobuf/util/__pycache__/json_format_proto3_pb2.cpython-38.pyc,,
90
+ google/protobuf/util/json_format_pb2.py,sha256=NR9GMe0hgwdbDEW5PyquvwAYcsHkPsobrnGV4sIyiis,6124
91
+ google/protobuf/util/json_format_proto3_pb2.py,sha256=Gy7gqXLUPfSQkhmP6epX0-xODDGdE6pY57Mn93f4EmA,14095
92
+ google/protobuf/wrappers_pb2.py,sha256=7g8cp-WcEg0HWzx53KagbAr9a4cjXJHGMraSM2i4Kc4,2410
93
+ protobuf-3.20.1-py3.8-nspkg.pth,sha256=xH5gTxc4UipYP3qrbP-4CCHNGBV97eBR4QqhheCvBl4,539
94
+ protobuf-3.20.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
95
+ protobuf-3.20.1.dist-info/METADATA,sha256=Nm_OXnXJP9NViOodFARMR9MGLGHJ81WH-fnWlnOlcuY,698
96
+ protobuf-3.20.1.dist-info/RECORD,,
97
+ protobuf-3.20.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
+ protobuf-3.20.1.dist-info/WHEEL,sha256=U9CYjdvnyMM6M9rFbVg1rC7FvR0cX-CcV6tdgX3Vy0E,144
99
+ protobuf-3.20.1.dist-info/namespace_packages.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
100
+ protobuf-3.20.1.dist-info/top_level.txt,sha256=_1QvSJIhFAGfxb79D6DhB7SUw2X6T4rwnz_LLrbcD3c,7
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/REQUESTED ADDED
File without changes
my_container_sandbox/workspace/anaconda3/lib/python3.8/site-packages/protobuf-3.20.1.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.37.1)
3
+ Root-Is-Purelib: false
4
+ Tag: cp38-cp38-manylinux_2_5_x86_64
5
+ Tag: cp38-cp38-manylinux1_x86_64
6
+