ZTWHHH commited on
Commit
2c1b5ee
·
verified ·
1 Parent(s): be9343d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2017-10-30/endpoint-rule-set-1.json.gz +3 -0
  2. wemm/lib/python3.10/site-packages/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json.gz +3 -0
  3. wemm/lib/python3.10/site-packages/charset_normalizer/__init__.py +48 -0
  4. wemm/lib/python3.10/site-packages/charset_normalizer/api.py +668 -0
  5. wemm/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so +0 -0
  6. wemm/lib/python3.10/site-packages/charset_normalizer/md.py +630 -0
  7. wemm/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc +0 -0
  8. wemm/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc +0 -0
  9. wemm/lib/python3.10/site-packages/lightning_utilities/core/apply_func.py +291 -0
  10. wemm/lib/python3.10/site-packages/lightning_utilities/docs/__init__.py +6 -0
  11. wemm/lib/python3.10/site-packages/lightning_utilities/docs/__pycache__/__init__.cpython-310.pyc +0 -0
  12. wemm/lib/python3.10/site-packages/networkx/exception.py +131 -0
  13. wemm/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 +0 -0
  14. wemm/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt +85 -0
  15. wemm/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt +3 -0
  16. wemm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc +0 -0
  17. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  18. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_stereo_matching.cpython-310.pyc +0 -0
  19. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/cifar.cpython-310.pyc +0 -0
  20. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/cityscapes.cpython-310.pyc +0 -0
  21. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/clevr.cpython-310.pyc +0 -0
  22. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/coco.cpython-310.pyc +0 -0
  23. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/country211.cpython-310.pyc +0 -0
  24. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/dtd.cpython-310.pyc +0 -0
  25. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/eurosat.cpython-310.pyc +0 -0
  26. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/fer2013.cpython-310.pyc +0 -0
  27. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/fgvc_aircraft.cpython-310.pyc +0 -0
  28. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flickr.cpython-310.pyc +0 -0
  29. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/food101.cpython-310.pyc +0 -0
  30. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/gtsrb.cpython-310.pyc +0 -0
  31. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/imagenet.cpython-310.pyc +0 -0
  32. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/kinetics.cpython-310.pyc +0 -0
  33. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/kitti.cpython-310.pyc +0 -0
  34. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/lfw.cpython-310.pyc +0 -0
  35. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/lsun.cpython-310.pyc +0 -0
  36. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/mnist.cpython-310.pyc +0 -0
  37. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/oxford_iiit_pet.cpython-310.pyc +0 -0
  38. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/phototour.cpython-310.pyc +0 -0
  39. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/places365.cpython-310.pyc +0 -0
  40. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbu.cpython-310.pyc +0 -0
  41. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/semeion.cpython-310.pyc +0 -0
  42. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/stanford_cars.cpython-310.pyc +0 -0
  43. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/stl10.cpython-310.pyc +0 -0
  44. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/svhn.cpython-310.pyc +0 -0
  45. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/video_utils.cpython-310.pyc +0 -0
  46. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/vision.cpython-310.pyc +0 -0
  47. wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/voc.cpython-310.pyc +0 -0
  48. wemm/lib/python3.10/site-packages/torchvision/datasets/_optical_flow.py +491 -0
  49. wemm/lib/python3.10/site-packages/torchvision/datasets/_stereo_matching.py +1224 -0
  50. wemm/lib/python3.10/site-packages/torchvision/datasets/caltech.py +237 -0
wemm/lib/python3.10/site-packages/botocore/data/cloudfront/2017-10-30/endpoint-rule-set-1.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e91ddb21316f7400642cbf0078ae107bdae9b6daf96f89c9e74ca89c2c63dedd
3
+ size 1839
wemm/lib/python3.10/site-packages/botocore/data/waf-regional/2016-11-28/endpoint-rule-set-1.json.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b703ad4f938a1ed7424ba2ff11c650c4ca79096bdb929fbd87431e953a586471
3
+ size 1145
wemm/lib/python3.10/site-packages/charset_normalizer/__init__.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Charset-Normalizer
3
+ ~~~~~~~~~~~~~~
4
+ The Real First Universal Charset Detector.
5
+ A library that helps you read text from an unknown charset encoding.
6
+ Motivated by chardet, This package is trying to resolve the issue by taking a new approach.
7
+ All IANA character set names for which the Python core library provides codecs are supported.
8
+
9
+ Basic usage:
10
+ >>> from charset_normalizer import from_bytes
11
+ >>> results = from_bytes('Bсеки човек има право на образование. Oбразованието!'.encode('utf_8'))
12
+ >>> best_guess = results.best()
13
+ >>> str(best_guess)
14
+ 'Bсеки човек има право на образование. Oбразованието!'
15
+
16
+ Others methods and usages are available - see the full documentation
17
+ at <https://github.com/Ousret/charset_normalizer>.
18
+ :copyright: (c) 2021 by Ahmed TAHRI
19
+ :license: MIT, see LICENSE for more details.
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import logging
25
+
26
+ from .api import from_bytes, from_fp, from_path, is_binary
27
+ from .legacy import detect
28
+ from .models import CharsetMatch, CharsetMatches
29
+ from .utils import set_logging_handler
30
+ from .version import VERSION, __version__
31
+
32
+ __all__ = (
33
+ "from_fp",
34
+ "from_path",
35
+ "from_bytes",
36
+ "is_binary",
37
+ "detect",
38
+ "CharsetMatch",
39
+ "CharsetMatches",
40
+ "__version__",
41
+ "VERSION",
42
+ "set_logging_handler",
43
+ )
44
+
45
+ # Attach a NullHandler to the top level logger by default
46
+ # https://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
47
+
48
+ logging.getLogger("charset_normalizer").addHandler(logging.NullHandler())
wemm/lib/python3.10/site-packages/charset_normalizer/api.py ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from os import PathLike
5
+ from typing import BinaryIO
6
+
7
+ from .cd import (
8
+ coherence_ratio,
9
+ encoding_languages,
10
+ mb_encoding_languages,
11
+ merge_coherence_ratios,
12
+ )
13
+ from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
14
+ from .md import mess_ratio
15
+ from .models import CharsetMatch, CharsetMatches
16
+ from .utils import (
17
+ any_specified_encoding,
18
+ cut_sequence_chunks,
19
+ iana_name,
20
+ identify_sig_or_bom,
21
+ is_cp_similar,
22
+ is_multi_byte_encoding,
23
+ should_strip_sig_or_bom,
24
+ )
25
+
26
+ logger = logging.getLogger("charset_normalizer")
27
+ explain_handler = logging.StreamHandler()
28
+ explain_handler.setFormatter(
29
+ logging.Formatter("%(asctime)s | %(levelname)s | %(message)s")
30
+ )
31
+
32
+
33
+ def from_bytes(
34
+ sequences: bytes | bytearray,
35
+ steps: int = 5,
36
+ chunk_size: int = 512,
37
+ threshold: float = 0.2,
38
+ cp_isolation: list[str] | None = None,
39
+ cp_exclusion: list[str] | None = None,
40
+ preemptive_behaviour: bool = True,
41
+ explain: bool = False,
42
+ language_threshold: float = 0.1,
43
+ enable_fallback: bool = True,
44
+ ) -> CharsetMatches:
45
+ """
46
+ Given a raw bytes sequence, return the best possibles charset usable to render str objects.
47
+ If there is no results, it is a strong indicator that the source is binary/not text.
48
+ By default, the process will extract 5 blocks of 512o each to assess the mess and coherence of a given sequence.
49
+ And will give up a particular code page after 20% of measured mess. Those criteria are customizable at will.
50
+
51
+ The preemptive behavior DOES NOT replace the traditional detection workflow, it prioritize a particular code page
52
+ but never take it for granted. Can improve the performance.
53
+
54
+ You may want to focus your attention to some code page or/and not others, use cp_isolation and cp_exclusion for that
55
+ purpose.
56
+
57
+ This function will strip the SIG in the payload/sequence every time except on UTF-16, UTF-32.
58
+ By default the library does not setup any handler other than the NullHandler, if you choose to set the 'explain'
59
+ toggle to True it will alter the logger configuration to add a StreamHandler that is suitable for debugging.
60
+ Custom logging format and handler can be set manually.
61
+ """
62
+
63
+ if not isinstance(sequences, (bytearray, bytes)):
64
+ raise TypeError(
65
+ "Expected object of type bytes or bytearray, got: {}".format(
66
+ type(sequences)
67
+ )
68
+ )
69
+
70
+ if explain:
71
+ previous_logger_level: int = logger.level
72
+ logger.addHandler(explain_handler)
73
+ logger.setLevel(TRACE)
74
+
75
+ length: int = len(sequences)
76
+
77
+ if length == 0:
78
+ logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
79
+ if explain: # Defensive: ensure exit path clean handler
80
+ logger.removeHandler(explain_handler)
81
+ logger.setLevel(previous_logger_level or logging.WARNING)
82
+ return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
83
+
84
+ if cp_isolation is not None:
85
+ logger.log(
86
+ TRACE,
87
+ "cp_isolation is set. use this flag for debugging purpose. "
88
+ "limited list of encoding allowed : %s.",
89
+ ", ".join(cp_isolation),
90
+ )
91
+ cp_isolation = [iana_name(cp, False) for cp in cp_isolation]
92
+ else:
93
+ cp_isolation = []
94
+
95
+ if cp_exclusion is not None:
96
+ logger.log(
97
+ TRACE,
98
+ "cp_exclusion is set. use this flag for debugging purpose. "
99
+ "limited list of encoding excluded : %s.",
100
+ ", ".join(cp_exclusion),
101
+ )
102
+ cp_exclusion = [iana_name(cp, False) for cp in cp_exclusion]
103
+ else:
104
+ cp_exclusion = []
105
+
106
+ if length <= (chunk_size * steps):
107
+ logger.log(
108
+ TRACE,
109
+ "override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
110
+ steps,
111
+ chunk_size,
112
+ length,
113
+ )
114
+ steps = 1
115
+ chunk_size = length
116
+
117
+ if steps > 1 and length / steps < chunk_size:
118
+ chunk_size = int(length / steps)
119
+
120
+ is_too_small_sequence: bool = len(sequences) < TOO_SMALL_SEQUENCE
121
+ is_too_large_sequence: bool = len(sequences) >= TOO_BIG_SEQUENCE
122
+
123
+ if is_too_small_sequence:
124
+ logger.log(
125
+ TRACE,
126
+ "Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
127
+ length
128
+ ),
129
+ )
130
+ elif is_too_large_sequence:
131
+ logger.log(
132
+ TRACE,
133
+ "Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
134
+ length
135
+ ),
136
+ )
137
+
138
+ prioritized_encodings: list[str] = []
139
+
140
+ specified_encoding: str | None = (
141
+ any_specified_encoding(sequences) if preemptive_behaviour else None
142
+ )
143
+
144
+ if specified_encoding is not None:
145
+ prioritized_encodings.append(specified_encoding)
146
+ logger.log(
147
+ TRACE,
148
+ "Detected declarative mark in sequence. Priority +1 given for %s.",
149
+ specified_encoding,
150
+ )
151
+
152
+ tested: set[str] = set()
153
+ tested_but_hard_failure: list[str] = []
154
+ tested_but_soft_failure: list[str] = []
155
+
156
+ fallback_ascii: CharsetMatch | None = None
157
+ fallback_u8: CharsetMatch | None = None
158
+ fallback_specified: CharsetMatch | None = None
159
+
160
+ results: CharsetMatches = CharsetMatches()
161
+
162
+ early_stop_results: CharsetMatches = CharsetMatches()
163
+
164
+ sig_encoding, sig_payload = identify_sig_or_bom(sequences)
165
+
166
+ if sig_encoding is not None:
167
+ prioritized_encodings.append(sig_encoding)
168
+ logger.log(
169
+ TRACE,
170
+ "Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
171
+ len(sig_payload),
172
+ sig_encoding,
173
+ )
174
+
175
+ prioritized_encodings.append("ascii")
176
+
177
+ if "utf_8" not in prioritized_encodings:
178
+ prioritized_encodings.append("utf_8")
179
+
180
+ for encoding_iana in prioritized_encodings + IANA_SUPPORTED:
181
+ if cp_isolation and encoding_iana not in cp_isolation:
182
+ continue
183
+
184
+ if cp_exclusion and encoding_iana in cp_exclusion:
185
+ continue
186
+
187
+ if encoding_iana in tested:
188
+ continue
189
+
190
+ tested.add(encoding_iana)
191
+
192
+ decoded_payload: str | None = None
193
+ bom_or_sig_available: bool = sig_encoding == encoding_iana
194
+ strip_sig_or_bom: bool = bom_or_sig_available and should_strip_sig_or_bom(
195
+ encoding_iana
196
+ )
197
+
198
+ if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
199
+ logger.log(
200
+ TRACE,
201
+ "Encoding %s won't be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
202
+ encoding_iana,
203
+ )
204
+ continue
205
+ if encoding_iana in {"utf_7"} and not bom_or_sig_available:
206
+ logger.log(
207
+ TRACE,
208
+ "Encoding %s won't be tested as-is because detection is unreliable without BOM/SIG.",
209
+ encoding_iana,
210
+ )
211
+ continue
212
+
213
+ try:
214
+ is_multi_byte_decoder: bool = is_multi_byte_encoding(encoding_iana)
215
+ except (ModuleNotFoundError, ImportError):
216
+ logger.log(
217
+ TRACE,
218
+ "Encoding %s does not provide an IncrementalDecoder",
219
+ encoding_iana,
220
+ )
221
+ continue
222
+
223
+ try:
224
+ if is_too_large_sequence and is_multi_byte_decoder is False:
225
+ str(
226
+ (
227
+ sequences[: int(50e4)]
228
+ if strip_sig_or_bom is False
229
+ else sequences[len(sig_payload) : int(50e4)]
230
+ ),
231
+ encoding=encoding_iana,
232
+ )
233
+ else:
234
+ decoded_payload = str(
235
+ (
236
+ sequences
237
+ if strip_sig_or_bom is False
238
+ else sequences[len(sig_payload) :]
239
+ ),
240
+ encoding=encoding_iana,
241
+ )
242
+ except (UnicodeDecodeError, LookupError) as e:
243
+ if not isinstance(e, LookupError):
244
+ logger.log(
245
+ TRACE,
246
+ "Code page %s does not fit given bytes sequence at ALL. %s",
247
+ encoding_iana,
248
+ str(e),
249
+ )
250
+ tested_but_hard_failure.append(encoding_iana)
251
+ continue
252
+
253
+ similar_soft_failure_test: bool = False
254
+
255
+ for encoding_soft_failed in tested_but_soft_failure:
256
+ if is_cp_similar(encoding_iana, encoding_soft_failed):
257
+ similar_soft_failure_test = True
258
+ break
259
+
260
+ if similar_soft_failure_test:
261
+ logger.log(
262
+ TRACE,
263
+ "%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
264
+ encoding_iana,
265
+ encoding_soft_failed,
266
+ )
267
+ continue
268
+
269
+ r_ = range(
270
+ 0 if not bom_or_sig_available else len(sig_payload),
271
+ length,
272
+ int(length / steps),
273
+ )
274
+
275
+ multi_byte_bonus: bool = (
276
+ is_multi_byte_decoder
277
+ and decoded_payload is not None
278
+ and len(decoded_payload) < length
279
+ )
280
+
281
+ if multi_byte_bonus:
282
+ logger.log(
283
+ TRACE,
284
+ "Code page %s is a multi byte encoding table and it appear that at least one character "
285
+ "was encoded using n-bytes.",
286
+ encoding_iana,
287
+ )
288
+
289
+ max_chunk_gave_up: int = int(len(r_) / 4)
290
+
291
+ max_chunk_gave_up = max(max_chunk_gave_up, 2)
292
+ early_stop_count: int = 0
293
+ lazy_str_hard_failure = False
294
+
295
+ md_chunks: list[str] = []
296
+ md_ratios = []
297
+
298
+ try:
299
+ for chunk in cut_sequence_chunks(
300
+ sequences,
301
+ encoding_iana,
302
+ r_,
303
+ chunk_size,
304
+ bom_or_sig_available,
305
+ strip_sig_or_bom,
306
+ sig_payload,
307
+ is_multi_byte_decoder,
308
+ decoded_payload,
309
+ ):
310
+ md_chunks.append(chunk)
311
+
312
+ md_ratios.append(
313
+ mess_ratio(
314
+ chunk,
315
+ threshold,
316
+ explain is True and 1 <= len(cp_isolation) <= 2,
317
+ )
318
+ )
319
+
320
+ if md_ratios[-1] >= threshold:
321
+ early_stop_count += 1
322
+
323
+ if (early_stop_count >= max_chunk_gave_up) or (
324
+ bom_or_sig_available and strip_sig_or_bom is False
325
+ ):
326
+ break
327
+ except (
328
+ UnicodeDecodeError
329
+ ) as e: # Lazy str loading may have missed something there
330
+ logger.log(
331
+ TRACE,
332
+ "LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
333
+ encoding_iana,
334
+ str(e),
335
+ )
336
+ early_stop_count = max_chunk_gave_up
337
+ lazy_str_hard_failure = True
338
+
339
+ # We might want to check the sequence again with the whole content
340
+ # Only if initial MD tests passes
341
+ if (
342
+ not lazy_str_hard_failure
343
+ and is_too_large_sequence
344
+ and not is_multi_byte_decoder
345
+ ):
346
+ try:
347
+ sequences[int(50e3) :].decode(encoding_iana, errors="strict")
348
+ except UnicodeDecodeError as e:
349
+ logger.log(
350
+ TRACE,
351
+ "LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
352
+ encoding_iana,
353
+ str(e),
354
+ )
355
+ tested_but_hard_failure.append(encoding_iana)
356
+ continue
357
+
358
+ mean_mess_ratio: float = sum(md_ratios) / len(md_ratios) if md_ratios else 0.0
359
+ if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
360
+ tested_but_soft_failure.append(encoding_iana)
361
+ logger.log(
362
+ TRACE,
363
+ "%s was excluded because of initial chaos probing. Gave up %i time(s). "
364
+ "Computed mean chaos is %f %%.",
365
+ encoding_iana,
366
+ early_stop_count,
367
+ round(mean_mess_ratio * 100, ndigits=3),
368
+ )
369
+ # Preparing those fallbacks in case we got nothing.
370
+ if (
371
+ enable_fallback
372
+ and encoding_iana in ["ascii", "utf_8", specified_encoding]
373
+ and not lazy_str_hard_failure
374
+ ):
375
+ fallback_entry = CharsetMatch(
376
+ sequences,
377
+ encoding_iana,
378
+ threshold,
379
+ False,
380
+ [],
381
+ decoded_payload,
382
+ preemptive_declaration=specified_encoding,
383
+ )
384
+ if encoding_iana == specified_encoding:
385
+ fallback_specified = fallback_entry
386
+ elif encoding_iana == "ascii":
387
+ fallback_ascii = fallback_entry
388
+ else:
389
+ fallback_u8 = fallback_entry
390
+ continue
391
+
392
+ logger.log(
393
+ TRACE,
394
+ "%s passed initial chaos probing. Mean measured chaos is %f %%",
395
+ encoding_iana,
396
+ round(mean_mess_ratio * 100, ndigits=3),
397
+ )
398
+
399
+ if not is_multi_byte_decoder:
400
+ target_languages: list[str] = encoding_languages(encoding_iana)
401
+ else:
402
+ target_languages = mb_encoding_languages(encoding_iana)
403
+
404
+ if target_languages:
405
+ logger.log(
406
+ TRACE,
407
+ "{} should target any language(s) of {}".format(
408
+ encoding_iana, str(target_languages)
409
+ ),
410
+ )
411
+
412
+ cd_ratios = []
413
+
414
+ # We shall skip the CD when its about ASCII
415
+ # Most of the time its not relevant to run "language-detection" on it.
416
+ if encoding_iana != "ascii":
417
+ for chunk in md_chunks:
418
+ chunk_languages = coherence_ratio(
419
+ chunk,
420
+ language_threshold,
421
+ ",".join(target_languages) if target_languages else None,
422
+ )
423
+
424
+ cd_ratios.append(chunk_languages)
425
+
426
+ cd_ratios_merged = merge_coherence_ratios(cd_ratios)
427
+
428
+ if cd_ratios_merged:
429
+ logger.log(
430
+ TRACE,
431
+ "We detected language {} using {}".format(
432
+ cd_ratios_merged, encoding_iana
433
+ ),
434
+ )
435
+
436
+ current_match = CharsetMatch(
437
+ sequences,
438
+ encoding_iana,
439
+ mean_mess_ratio,
440
+ bom_or_sig_available,
441
+ cd_ratios_merged,
442
+ (
443
+ decoded_payload
444
+ if (
445
+ is_too_large_sequence is False
446
+ or encoding_iana in [specified_encoding, "ascii", "utf_8"]
447
+ )
448
+ else None
449
+ ),
450
+ preemptive_declaration=specified_encoding,
451
+ )
452
+
453
+ results.append(current_match)
454
+
455
+ if (
456
+ encoding_iana in [specified_encoding, "ascii", "utf_8"]
457
+ and mean_mess_ratio < 0.1
458
+ ):
459
+ # If md says nothing to worry about, then... stop immediately!
460
+ if mean_mess_ratio == 0.0:
461
+ logger.debug(
462
+ "Encoding detection: %s is most likely the one.",
463
+ current_match.encoding,
464
+ )
465
+ if explain: # Defensive: ensure exit path clean handler
466
+ logger.removeHandler(explain_handler)
467
+ logger.setLevel(previous_logger_level)
468
+ return CharsetMatches([current_match])
469
+
470
+ early_stop_results.append(current_match)
471
+
472
+ if (
473
+ len(early_stop_results)
474
+ and (specified_encoding is None or specified_encoding in tested)
475
+ and "ascii" in tested
476
+ and "utf_8" in tested
477
+ ):
478
+ probable_result: CharsetMatch = early_stop_results.best() # type: ignore[assignment]
479
+ logger.debug(
480
+ "Encoding detection: %s is most likely the one.",
481
+ probable_result.encoding,
482
+ )
483
+ if explain: # Defensive: ensure exit path clean handler
484
+ logger.removeHandler(explain_handler)
485
+ logger.setLevel(previous_logger_level)
486
+
487
+ return CharsetMatches([probable_result])
488
+
489
+ if encoding_iana == sig_encoding:
490
+ logger.debug(
491
+ "Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
492
+ "the beginning of the sequence.",
493
+ encoding_iana,
494
+ )
495
+ if explain: # Defensive: ensure exit path clean handler
496
+ logger.removeHandler(explain_handler)
497
+ logger.setLevel(previous_logger_level)
498
+ return CharsetMatches([results[encoding_iana]])
499
+
500
+ if len(results) == 0:
501
+ if fallback_u8 or fallback_ascii or fallback_specified:
502
+ logger.log(
503
+ TRACE,
504
+ "Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
505
+ )
506
+
507
+ if fallback_specified:
508
+ logger.debug(
509
+ "Encoding detection: %s will be used as a fallback match",
510
+ fallback_specified.encoding,
511
+ )
512
+ results.append(fallback_specified)
513
+ elif (
514
+ (fallback_u8 and fallback_ascii is None)
515
+ or (
516
+ fallback_u8
517
+ and fallback_ascii
518
+ and fallback_u8.fingerprint != fallback_ascii.fingerprint
519
+ )
520
+ or (fallback_u8 is not None)
521
+ ):
522
+ logger.debug("Encoding detection: utf_8 will be used as a fallback match")
523
+ results.append(fallback_u8)
524
+ elif fallback_ascii:
525
+ logger.debug("Encoding detection: ascii will be used as a fallback match")
526
+ results.append(fallback_ascii)
527
+
528
+ if results:
529
+ logger.debug(
530
+ "Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
531
+ results.best().encoding, # type: ignore
532
+ len(results) - 1,
533
+ )
534
+ else:
535
+ logger.debug("Encoding detection: Unable to determine any suitable charset.")
536
+
537
+ if explain:
538
+ logger.removeHandler(explain_handler)
539
+ logger.setLevel(previous_logger_level)
540
+
541
+ return results
542
+
543
+
544
+ def from_fp(
545
+ fp: BinaryIO,
546
+ steps: int = 5,
547
+ chunk_size: int = 512,
548
+ threshold: float = 0.20,
549
+ cp_isolation: list[str] | None = None,
550
+ cp_exclusion: list[str] | None = None,
551
+ preemptive_behaviour: bool = True,
552
+ explain: bool = False,
553
+ language_threshold: float = 0.1,
554
+ enable_fallback: bool = True,
555
+ ) -> CharsetMatches:
556
+ """
557
+ Same thing than the function from_bytes but using a file pointer that is already ready.
558
+ Will not close the file pointer.
559
+ """
560
+ return from_bytes(
561
+ fp.read(),
562
+ steps,
563
+ chunk_size,
564
+ threshold,
565
+ cp_isolation,
566
+ cp_exclusion,
567
+ preemptive_behaviour,
568
+ explain,
569
+ language_threshold,
570
+ enable_fallback,
571
+ )
572
+
573
+
574
+ def from_path(
575
+ path: str | bytes | PathLike, # type: ignore[type-arg]
576
+ steps: int = 5,
577
+ chunk_size: int = 512,
578
+ threshold: float = 0.20,
579
+ cp_isolation: list[str] | None = None,
580
+ cp_exclusion: list[str] | None = None,
581
+ preemptive_behaviour: bool = True,
582
+ explain: bool = False,
583
+ language_threshold: float = 0.1,
584
+ enable_fallback: bool = True,
585
+ ) -> CharsetMatches:
586
+ """
587
+ Same thing than the function from_bytes but with one extra step. Opening and reading given file path in binary mode.
588
+ Can raise IOError.
589
+ """
590
+ with open(path, "rb") as fp:
591
+ return from_fp(
592
+ fp,
593
+ steps,
594
+ chunk_size,
595
+ threshold,
596
+ cp_isolation,
597
+ cp_exclusion,
598
+ preemptive_behaviour,
599
+ explain,
600
+ language_threshold,
601
+ enable_fallback,
602
+ )
603
+
604
+
605
+ def is_binary(
606
+ fp_or_path_or_payload: PathLike | str | BinaryIO | bytes, # type: ignore[type-arg]
607
+ steps: int = 5,
608
+ chunk_size: int = 512,
609
+ threshold: float = 0.20,
610
+ cp_isolation: list[str] | None = None,
611
+ cp_exclusion: list[str] | None = None,
612
+ preemptive_behaviour: bool = True,
613
+ explain: bool = False,
614
+ language_threshold: float = 0.1,
615
+ enable_fallback: bool = False,
616
+ ) -> bool:
617
+ """
618
+ Detect if the given input (file, bytes, or path) points to a binary file. aka. not a string.
619
+ Based on the same main heuristic algorithms and default kwargs at the sole exception that fallbacks match
620
+ are disabled to be stricter around ASCII-compatible but unlikely to be a string.
621
+ """
622
+ if isinstance(fp_or_path_or_payload, (str, PathLike)):
623
+ guesses = from_path(
624
+ fp_or_path_or_payload,
625
+ steps=steps,
626
+ chunk_size=chunk_size,
627
+ threshold=threshold,
628
+ cp_isolation=cp_isolation,
629
+ cp_exclusion=cp_exclusion,
630
+ preemptive_behaviour=preemptive_behaviour,
631
+ explain=explain,
632
+ language_threshold=language_threshold,
633
+ enable_fallback=enable_fallback,
634
+ )
635
+ elif isinstance(
636
+ fp_or_path_or_payload,
637
+ (
638
+ bytes,
639
+ bytearray,
640
+ ),
641
+ ):
642
+ guesses = from_bytes(
643
+ fp_or_path_or_payload,
644
+ steps=steps,
645
+ chunk_size=chunk_size,
646
+ threshold=threshold,
647
+ cp_isolation=cp_isolation,
648
+ cp_exclusion=cp_exclusion,
649
+ preemptive_behaviour=preemptive_behaviour,
650
+ explain=explain,
651
+ language_threshold=language_threshold,
652
+ enable_fallback=enable_fallback,
653
+ )
654
+ else:
655
+ guesses = from_fp(
656
+ fp_or_path_or_payload,
657
+ steps=steps,
658
+ chunk_size=chunk_size,
659
+ threshold=threshold,
660
+ cp_isolation=cp_isolation,
661
+ cp_exclusion=cp_exclusion,
662
+ preemptive_behaviour=preemptive_behaviour,
663
+ explain=explain,
664
+ language_threshold=language_threshold,
665
+ enable_fallback=enable_fallback,
666
+ )
667
+
668
+ return not guesses
wemm/lib/python3.10/site-packages/charset_normalizer/md.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.1 kB). View file
 
wemm/lib/python3.10/site-packages/charset_normalizer/md.py ADDED
@@ -0,0 +1,630 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from functools import lru_cache
4
+ from logging import getLogger
5
+
6
+ from .constant import (
7
+ COMMON_SAFE_ASCII_CHARACTERS,
8
+ TRACE,
9
+ UNICODE_SECONDARY_RANGE_KEYWORD,
10
+ )
11
+ from .utils import (
12
+ is_accentuated,
13
+ is_arabic,
14
+ is_arabic_isolated_form,
15
+ is_case_variable,
16
+ is_cjk,
17
+ is_emoticon,
18
+ is_hangul,
19
+ is_hiragana,
20
+ is_katakana,
21
+ is_latin,
22
+ is_punctuation,
23
+ is_separator,
24
+ is_symbol,
25
+ is_thai,
26
+ is_unprintable,
27
+ remove_accent,
28
+ unicode_range,
29
+ )
30
+
31
+
32
+ class MessDetectorPlugin:
33
+ """
34
+ Base abstract class used for mess detection plugins.
35
+ All detectors MUST extend and implement given methods.
36
+ """
37
+
38
+ def eligible(self, character: str) -> bool:
39
+ """
40
+ Determine if given character should be fed in.
41
+ """
42
+ raise NotImplementedError # pragma: nocover
43
+
44
+ def feed(self, character: str) -> None:
45
+ """
46
+ The main routine to be executed upon character.
47
+ Insert the logic in witch the text would be considered chaotic.
48
+ """
49
+ raise NotImplementedError # pragma: nocover
50
+
51
+ def reset(self) -> None: # pragma: no cover
52
+ """
53
+ Permit to reset the plugin to the initial state.
54
+ """
55
+ raise NotImplementedError
56
+
57
+ @property
58
+ def ratio(self) -> float:
59
+ """
60
+ Compute the chaos ratio based on what your feed() has seen.
61
+ Must NOT be lower than 0.; No restriction gt 0.
62
+ """
63
+ raise NotImplementedError # pragma: nocover
64
+
65
+
66
+ class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
67
+ def __init__(self) -> None:
68
+ self._punctuation_count: int = 0
69
+ self._symbol_count: int = 0
70
+ self._character_count: int = 0
71
+
72
+ self._last_printable_char: str | None = None
73
+ self._frenzy_symbol_in_word: bool = False
74
+
75
+ def eligible(self, character: str) -> bool:
76
+ return character.isprintable()
77
+
78
+ def feed(self, character: str) -> None:
79
+ self._character_count += 1
80
+
81
+ if (
82
+ character != self._last_printable_char
83
+ and character not in COMMON_SAFE_ASCII_CHARACTERS
84
+ ):
85
+ if is_punctuation(character):
86
+ self._punctuation_count += 1
87
+ elif (
88
+ character.isdigit() is False
89
+ and is_symbol(character)
90
+ and is_emoticon(character) is False
91
+ ):
92
+ self._symbol_count += 2
93
+
94
+ self._last_printable_char = character
95
+
96
+ def reset(self) -> None: # Abstract
97
+ self._punctuation_count = 0
98
+ self._character_count = 0
99
+ self._symbol_count = 0
100
+
101
+ @property
102
+ def ratio(self) -> float:
103
+ if self._character_count == 0:
104
+ return 0.0
105
+
106
+ ratio_of_punctuation: float = (
107
+ self._punctuation_count + self._symbol_count
108
+ ) / self._character_count
109
+
110
+ return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.0
111
+
112
+
113
+ class TooManyAccentuatedPlugin(MessDetectorPlugin):
114
+ def __init__(self) -> None:
115
+ self._character_count: int = 0
116
+ self._accentuated_count: int = 0
117
+
118
+ def eligible(self, character: str) -> bool:
119
+ return character.isalpha()
120
+
121
+ def feed(self, character: str) -> None:
122
+ self._character_count += 1
123
+
124
+ if is_accentuated(character):
125
+ self._accentuated_count += 1
126
+
127
+ def reset(self) -> None: # Abstract
128
+ self._character_count = 0
129
+ self._accentuated_count = 0
130
+
131
+ @property
132
+ def ratio(self) -> float:
133
+ if self._character_count < 8:
134
+ return 0.0
135
+
136
+ ratio_of_accentuation: float = self._accentuated_count / self._character_count
137
+ return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.0
138
+
139
+
140
+ class UnprintablePlugin(MessDetectorPlugin):
141
+ def __init__(self) -> None:
142
+ self._unprintable_count: int = 0
143
+ self._character_count: int = 0
144
+
145
+ def eligible(self, character: str) -> bool:
146
+ return True
147
+
148
+ def feed(self, character: str) -> None:
149
+ if is_unprintable(character):
150
+ self._unprintable_count += 1
151
+ self._character_count += 1
152
+
153
+ def reset(self) -> None: # Abstract
154
+ self._unprintable_count = 0
155
+
156
+ @property
157
+ def ratio(self) -> float:
158
+ if self._character_count == 0:
159
+ return 0.0
160
+
161
+ return (self._unprintable_count * 8) / self._character_count
162
+
163
+
164
+ class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
165
+ def __init__(self) -> None:
166
+ self._successive_count: int = 0
167
+ self._character_count: int = 0
168
+
169
+ self._last_latin_character: str | None = None
170
+
171
+ def eligible(self, character: str) -> bool:
172
+ return character.isalpha() and is_latin(character)
173
+
174
+ def feed(self, character: str) -> None:
175
+ self._character_count += 1
176
+ if (
177
+ self._last_latin_character is not None
178
+ and is_accentuated(character)
179
+ and is_accentuated(self._last_latin_character)
180
+ ):
181
+ if character.isupper() and self._last_latin_character.isupper():
182
+ self._successive_count += 1
183
+ # Worse if its the same char duplicated with different accent.
184
+ if remove_accent(character) == remove_accent(self._last_latin_character):
185
+ self._successive_count += 1
186
+ self._last_latin_character = character
187
+
188
+ def reset(self) -> None: # Abstract
189
+ self._successive_count = 0
190
+ self._character_count = 0
191
+ self._last_latin_character = None
192
+
193
+ @property
194
+ def ratio(self) -> float:
195
+ if self._character_count == 0:
196
+ return 0.0
197
+
198
+ return (self._successive_count * 2) / self._character_count
199
+
200
+
201
+ class SuspiciousRange(MessDetectorPlugin):
202
+ def __init__(self) -> None:
203
+ self._suspicious_successive_range_count: int = 0
204
+ self._character_count: int = 0
205
+ self._last_printable_seen: str | None = None
206
+
207
+ def eligible(self, character: str) -> bool:
208
+ return character.isprintable()
209
+
210
+ def feed(self, character: str) -> None:
211
+ self._character_count += 1
212
+
213
+ if (
214
+ character.isspace()
215
+ or is_punctuation(character)
216
+ or character in COMMON_SAFE_ASCII_CHARACTERS
217
+ ):
218
+ self._last_printable_seen = None
219
+ return
220
+
221
+ if self._last_printable_seen is None:
222
+ self._last_printable_seen = character
223
+ return
224
+
225
+ unicode_range_a: str | None = unicode_range(self._last_printable_seen)
226
+ unicode_range_b: str | None = unicode_range(character)
227
+
228
+ if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
229
+ self._suspicious_successive_range_count += 1
230
+
231
+ self._last_printable_seen = character
232
+
233
+ def reset(self) -> None: # Abstract
234
+ self._character_count = 0
235
+ self._suspicious_successive_range_count = 0
236
+ self._last_printable_seen = None
237
+
238
+ @property
239
+ def ratio(self) -> float:
240
+ if self._character_count <= 13:
241
+ return 0.0
242
+
243
+ ratio_of_suspicious_range_usage: float = (
244
+ self._suspicious_successive_range_count * 2
245
+ ) / self._character_count
246
+
247
+ return ratio_of_suspicious_range_usage
248
+
249
+
250
+ class SuperWeirdWordPlugin(MessDetectorPlugin):
251
+ def __init__(self) -> None:
252
+ self._word_count: int = 0
253
+ self._bad_word_count: int = 0
254
+ self._foreign_long_count: int = 0
255
+
256
+ self._is_current_word_bad: bool = False
257
+ self._foreign_long_watch: bool = False
258
+
259
+ self._character_count: int = 0
260
+ self._bad_character_count: int = 0
261
+
262
+ self._buffer: str = ""
263
+ self._buffer_accent_count: int = 0
264
+ self._buffer_glyph_count: int = 0
265
+
266
+ def eligible(self, character: str) -> bool:
267
+ return True
268
+
269
+ def feed(self, character: str) -> None:
270
+ if character.isalpha():
271
+ self._buffer += character
272
+ if is_accentuated(character):
273
+ self._buffer_accent_count += 1
274
+ if (
275
+ self._foreign_long_watch is False
276
+ and (is_latin(character) is False or is_accentuated(character))
277
+ and is_cjk(character) is False
278
+ and is_hangul(character) is False
279
+ and is_katakana(character) is False
280
+ and is_hiragana(character) is False
281
+ and is_thai(character) is False
282
+ ):
283
+ self._foreign_long_watch = True
284
+ if (
285
+ is_cjk(character)
286
+ or is_hangul(character)
287
+ or is_katakana(character)
288
+ or is_hiragana(character)
289
+ or is_thai(character)
290
+ ):
291
+ self._buffer_glyph_count += 1
292
+ return
293
+ if not self._buffer:
294
+ return
295
+ if (
296
+ character.isspace() or is_punctuation(character) or is_separator(character)
297
+ ) and self._buffer:
298
+ self._word_count += 1
299
+ buffer_length: int = len(self._buffer)
300
+
301
+ self._character_count += buffer_length
302
+
303
+ if buffer_length >= 4:
304
+ if self._buffer_accent_count / buffer_length >= 0.5:
305
+ self._is_current_word_bad = True
306
+ # Word/Buffer ending with an upper case accentuated letter are so rare,
307
+ # that we will consider them all as suspicious. Same weight as foreign_long suspicious.
308
+ elif (
309
+ is_accentuated(self._buffer[-1])
310
+ and self._buffer[-1].isupper()
311
+ and all(_.isupper() for _ in self._buffer) is False
312
+ ):
313
+ self._foreign_long_count += 1
314
+ self._is_current_word_bad = True
315
+ elif self._buffer_glyph_count == 1:
316
+ self._is_current_word_bad = True
317
+ self._foreign_long_count += 1
318
+ if buffer_length >= 24 and self._foreign_long_watch:
319
+ camel_case_dst = [
320
+ i
321
+ for c, i in zip(self._buffer, range(0, buffer_length))
322
+ if c.isupper()
323
+ ]
324
+ probable_camel_cased: bool = False
325
+
326
+ if camel_case_dst and (len(camel_case_dst) / buffer_length <= 0.3):
327
+ probable_camel_cased = True
328
+
329
+ if not probable_camel_cased:
330
+ self._foreign_long_count += 1
331
+ self._is_current_word_bad = True
332
+
333
+ if self._is_current_word_bad:
334
+ self._bad_word_count += 1
335
+ self._bad_character_count += len(self._buffer)
336
+ self._is_current_word_bad = False
337
+
338
+ self._foreign_long_watch = False
339
+ self._buffer = ""
340
+ self._buffer_accent_count = 0
341
+ self._buffer_glyph_count = 0
342
+ elif (
343
+ character not in {"<", ">", "-", "=", "~", "|", "_"}
344
+ and character.isdigit() is False
345
+ and is_symbol(character)
346
+ ):
347
+ self._is_current_word_bad = True
348
+ self._buffer += character
349
+
350
+ def reset(self) -> None: # Abstract
351
+ self._buffer = ""
352
+ self._is_current_word_bad = False
353
+ self._foreign_long_watch = False
354
+ self._bad_word_count = 0
355
+ self._word_count = 0
356
+ self._character_count = 0
357
+ self._bad_character_count = 0
358
+ self._foreign_long_count = 0
359
+
360
+ @property
361
+ def ratio(self) -> float:
362
+ if self._word_count <= 10 and self._foreign_long_count == 0:
363
+ return 0.0
364
+
365
+ return self._bad_character_count / self._character_count
366
+
367
+
368
+ class CjkInvalidStopPlugin(MessDetectorPlugin):
369
+ """
370
+ GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and
371
+ can be easily detected. Searching for the overuse of '丅' and '丄'.
372
+ """
373
+
374
+ def __init__(self) -> None:
375
+ self._wrong_stop_count: int = 0
376
+ self._cjk_character_count: int = 0
377
+
378
+ def eligible(self, character: str) -> bool:
379
+ return True
380
+
381
+ def feed(self, character: str) -> None:
382
+ if character in {"丅", "丄"}:
383
+ self._wrong_stop_count += 1
384
+ return
385
+ if is_cjk(character):
386
+ self._cjk_character_count += 1
387
+
388
+ def reset(self) -> None: # Abstract
389
+ self._wrong_stop_count = 0
390
+ self._cjk_character_count = 0
391
+
392
+ @property
393
+ def ratio(self) -> float:
394
+ if self._cjk_character_count < 16:
395
+ return 0.0
396
+ return self._wrong_stop_count / self._cjk_character_count
397
+
398
+
399
+ class ArchaicUpperLowerPlugin(MessDetectorPlugin):
400
+ def __init__(self) -> None:
401
+ self._buf: bool = False
402
+
403
+ self._character_count_since_last_sep: int = 0
404
+
405
+ self._successive_upper_lower_count: int = 0
406
+ self._successive_upper_lower_count_final: int = 0
407
+
408
+ self._character_count: int = 0
409
+
410
+ self._last_alpha_seen: str | None = None
411
+ self._current_ascii_only: bool = True
412
+
413
+ def eligible(self, character: str) -> bool:
414
+ return True
415
+
416
+ def feed(self, character: str) -> None:
417
+ is_concerned = character.isalpha() and is_case_variable(character)
418
+ chunk_sep = is_concerned is False
419
+
420
+ if chunk_sep and self._character_count_since_last_sep > 0:
421
+ if (
422
+ self._character_count_since_last_sep <= 64
423
+ and character.isdigit() is False
424
+ and self._current_ascii_only is False
425
+ ):
426
+ self._successive_upper_lower_count_final += (
427
+ self._successive_upper_lower_count
428
+ )
429
+
430
+ self._successive_upper_lower_count = 0
431
+ self._character_count_since_last_sep = 0
432
+ self._last_alpha_seen = None
433
+ self._buf = False
434
+ self._character_count += 1
435
+ self._current_ascii_only = True
436
+
437
+ return
438
+
439
+ if self._current_ascii_only is True and character.isascii() is False:
440
+ self._current_ascii_only = False
441
+
442
+ if self._last_alpha_seen is not None:
443
+ if (character.isupper() and self._last_alpha_seen.islower()) or (
444
+ character.islower() and self._last_alpha_seen.isupper()
445
+ ):
446
+ if self._buf is True:
447
+ self._successive_upper_lower_count += 2
448
+ self._buf = False
449
+ else:
450
+ self._buf = True
451
+ else:
452
+ self._buf = False
453
+
454
+ self._character_count += 1
455
+ self._character_count_since_last_sep += 1
456
+ self._last_alpha_seen = character
457
+
458
+ def reset(self) -> None: # Abstract
459
+ self._character_count = 0
460
+ self._character_count_since_last_sep = 0
461
+ self._successive_upper_lower_count = 0
462
+ self._successive_upper_lower_count_final = 0
463
+ self._last_alpha_seen = None
464
+ self._buf = False
465
+ self._current_ascii_only = True
466
+
467
+ @property
468
+ def ratio(self) -> float:
469
+ if self._character_count == 0:
470
+ return 0.0
471
+
472
+ return self._successive_upper_lower_count_final / self._character_count
473
+
474
+
475
+ class ArabicIsolatedFormPlugin(MessDetectorPlugin):
476
+ def __init__(self) -> None:
477
+ self._character_count: int = 0
478
+ self._isolated_form_count: int = 0
479
+
480
+ def reset(self) -> None: # Abstract
481
+ self._character_count = 0
482
+ self._isolated_form_count = 0
483
+
484
+ def eligible(self, character: str) -> bool:
485
+ return is_arabic(character)
486
+
487
+ def feed(self, character: str) -> None:
488
+ self._character_count += 1
489
+
490
+ if is_arabic_isolated_form(character):
491
+ self._isolated_form_count += 1
492
+
493
+ @property
494
+ def ratio(self) -> float:
495
+ if self._character_count < 8:
496
+ return 0.0
497
+
498
+ isolated_form_usage: float = self._isolated_form_count / self._character_count
499
+
500
+ return isolated_form_usage
501
+
502
+
503
+ @lru_cache(maxsize=1024)
504
+ def is_suspiciously_successive_range(
505
+ unicode_range_a: str | None, unicode_range_b: str | None
506
+ ) -> bool:
507
+ """
508
+ Determine if two Unicode range seen next to each other can be considered as suspicious.
509
+ """
510
+ if unicode_range_a is None or unicode_range_b is None:
511
+ return True
512
+
513
+ if unicode_range_a == unicode_range_b:
514
+ return False
515
+
516
+ if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
517
+ return False
518
+
519
+ if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
520
+ return False
521
+
522
+ # Latin characters can be accompanied with a combining diacritical mark
523
+ # eg. Vietnamese.
524
+ if ("Latin" in unicode_range_a or "Latin" in unicode_range_b) and (
525
+ "Combining" in unicode_range_a or "Combining" in unicode_range_b
526
+ ):
527
+ return False
528
+
529
+ keywords_range_a, keywords_range_b = (
530
+ unicode_range_a.split(" "),
531
+ unicode_range_b.split(" "),
532
+ )
533
+
534
+ for el in keywords_range_a:
535
+ if el in UNICODE_SECONDARY_RANGE_KEYWORD:
536
+ continue
537
+ if el in keywords_range_b:
538
+ return False
539
+
540
+ # Japanese Exception
541
+ range_a_jp_chars, range_b_jp_chars = (
542
+ unicode_range_a
543
+ in (
544
+ "Hiragana",
545
+ "Katakana",
546
+ ),
547
+ unicode_range_b in ("Hiragana", "Katakana"),
548
+ )
549
+ if (range_a_jp_chars or range_b_jp_chars) and (
550
+ "CJK" in unicode_range_a or "CJK" in unicode_range_b
551
+ ):
552
+ return False
553
+ if range_a_jp_chars and range_b_jp_chars:
554
+ return False
555
+
556
+ if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
557
+ if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
558
+ return False
559
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
560
+ return False
561
+
562
+ # Chinese/Japanese use dedicated range for punctuation and/or separators.
563
+ if ("CJK" in unicode_range_a or "CJK" in unicode_range_b) or (
564
+ unicode_range_a in ["Katakana", "Hiragana"]
565
+ and unicode_range_b in ["Katakana", "Hiragana"]
566
+ ):
567
+ if "Punctuation" in unicode_range_a or "Punctuation" in unicode_range_b:
568
+ return False
569
+ if "Forms" in unicode_range_a or "Forms" in unicode_range_b:
570
+ return False
571
+ if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
572
+ return False
573
+
574
+ return True
575
+
576
+
577
+ @lru_cache(maxsize=2048)
578
+ def mess_ratio(
579
+ decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False
580
+ ) -> float:
581
+ """
582
+ Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
583
+ """
584
+
585
+ detectors: list[MessDetectorPlugin] = [
586
+ md_class() for md_class in MessDetectorPlugin.__subclasses__()
587
+ ]
588
+
589
+ length: int = len(decoded_sequence) + 1
590
+
591
+ mean_mess_ratio: float = 0.0
592
+
593
+ if length < 512:
594
+ intermediary_mean_mess_ratio_calc: int = 32
595
+ elif length <= 1024:
596
+ intermediary_mean_mess_ratio_calc = 64
597
+ else:
598
+ intermediary_mean_mess_ratio_calc = 128
599
+
600
+ for character, index in zip(decoded_sequence + "\n", range(length)):
601
+ for detector in detectors:
602
+ if detector.eligible(character):
603
+ detector.feed(character)
604
+
605
+ if (
606
+ index > 0 and index % intermediary_mean_mess_ratio_calc == 0
607
+ ) or index == length - 1:
608
+ mean_mess_ratio = sum(dt.ratio for dt in detectors)
609
+
610
+ if mean_mess_ratio >= maximum_threshold:
611
+ break
612
+
613
+ if debug:
614
+ logger = getLogger("charset_normalizer")
615
+
616
+ logger.log(
617
+ TRACE,
618
+ "Mess-detector extended-analysis start. "
619
+ f"intermediary_mean_mess_ratio_calc={intermediary_mean_mess_ratio_calc} mean_mess_ratio={mean_mess_ratio} "
620
+ f"maximum_threshold={maximum_threshold}",
621
+ )
622
+
623
+ if len(decoded_sequence) > 16:
624
+ logger.log(TRACE, f"Starting with: {decoded_sequence[:16]}")
625
+ logger.log(TRACE, f"Ending with: {decoded_sequence[-16::]}")
626
+
627
+ for dt in detectors:
628
+ logger.log(TRACE, f"{dt.__class__}: {dt.ratio}")
629
+
630
+ return round(mean_mess_ratio, 3)
wemm/lib/python3.10/site-packages/idna/__pycache__/compat.cpython-310.pyc ADDED
Binary file (721 Bytes). View file
 
wemm/lib/python3.10/site-packages/idna/__pycache__/package_data.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
wemm/lib/python3.10/site-packages/lightning_utilities/core/apply_func.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright The Lightning AI team.
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # http://www.apache.org/licenses/LICENSE-2.0
4
+ #
5
+ import dataclasses
6
+ from collections import OrderedDict, defaultdict
7
+ from copy import deepcopy
8
+ from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Union
9
+
10
+
11
+ def is_namedtuple(obj: object) -> bool:
12
+ """Check if object is type nametuple."""
13
+ # https://github.com/pytorch/pytorch/blob/v1.8.1/torch/nn/parallel/scatter_gather.py#L4-L8
14
+ return isinstance(obj, tuple) and hasattr(obj, "_asdict") and hasattr(obj, "_fields")
15
+
16
+
17
+ def is_dataclass_instance(obj: object) -> bool:
18
+ """Check if object is dataclass."""
19
+ # https://docs.python.org/3/library/dataclasses.html#module-level-decorators-classes-and-functions
20
+ return dataclasses.is_dataclass(obj) and not isinstance(obj, type)
21
+
22
+
23
+ def apply_to_collection(
24
+ data: Any,
25
+ dtype: Union[type, Any, Tuple[Union[type, Any]]],
26
+ function: Callable,
27
+ *args: Any,
28
+ wrong_dtype: Optional[Union[type, Tuple[type, ...]]] = None,
29
+ include_none: bool = True,
30
+ allow_frozen: bool = False,
31
+ **kwargs: Any,
32
+ ) -> Any:
33
+ """Recursively applies a function to all elements of a certain dtype.
34
+
35
+ Args:
36
+ data: the collection to apply the function to
37
+ dtype: the given function will be applied to all elements of this dtype
38
+ function: the function to apply
39
+ *args: positional arguments (will be forwarded to calls of ``function``)
40
+ wrong_dtype: the given function won't be applied if this type is specified and the given collections
41
+ is of the ``wrong_dtype`` even if it is of type ``dtype``
42
+ include_none: Whether to include an element if the output of ``function`` is ``None``.
43
+ allow_frozen: Whether not to error upon encountering a frozen dataclass instance.
44
+ **kwargs: keyword arguments (will be forwarded to calls of ``function``)
45
+
46
+ Returns:
47
+ The resulting collection
48
+
49
+ """
50
+ if include_none is False or wrong_dtype is not None or allow_frozen is True:
51
+ # not worth implementing these on the fast path: go with the slower option
52
+ return _apply_to_collection_slow(
53
+ data,
54
+ dtype,
55
+ function,
56
+ *args,
57
+ wrong_dtype=wrong_dtype,
58
+ include_none=include_none,
59
+ allow_frozen=allow_frozen,
60
+ **kwargs,
61
+ )
62
+ # fast path for the most common cases:
63
+ if isinstance(data, dtype): # single element
64
+ return function(data, *args, **kwargs)
65
+ if data.__class__ is list and all(isinstance(x, dtype) for x in data): # 1d homogeneous list
66
+ return [function(x, *args, **kwargs) for x in data]
67
+ if data.__class__ is tuple and all(isinstance(x, dtype) for x in data): # 1d homogeneous tuple
68
+ return tuple(function(x, *args, **kwargs) for x in data)
69
+ if data.__class__ is dict and all(isinstance(x, dtype) for x in data.values()): # 1d homogeneous dict
70
+ return {k: function(v, *args, **kwargs) for k, v in data.items()}
71
+ # slow path for everything else
72
+ return _apply_to_collection_slow(
73
+ data,
74
+ dtype,
75
+ function,
76
+ *args,
77
+ wrong_dtype=wrong_dtype,
78
+ include_none=include_none,
79
+ allow_frozen=allow_frozen,
80
+ **kwargs,
81
+ )
82
+
83
+
84
+ def _apply_to_collection_slow(
85
+ data: Any,
86
+ dtype: Union[type, Any, Tuple[Union[type, Any]]],
87
+ function: Callable,
88
+ *args: Any,
89
+ wrong_dtype: Optional[Union[type, Tuple[type, ...]]] = None,
90
+ include_none: bool = True,
91
+ allow_frozen: bool = False,
92
+ **kwargs: Any,
93
+ ) -> Any:
94
+ # Breaking condition
95
+ if isinstance(data, dtype) and (wrong_dtype is None or not isinstance(data, wrong_dtype)):
96
+ return function(data, *args, **kwargs)
97
+
98
+ elem_type = type(data)
99
+
100
+ # Recursively apply to collection items
101
+ if isinstance(data, Mapping):
102
+ out = []
103
+ for k, v in data.items():
104
+ v = _apply_to_collection_slow(
105
+ v,
106
+ dtype,
107
+ function,
108
+ *args,
109
+ wrong_dtype=wrong_dtype,
110
+ include_none=include_none,
111
+ allow_frozen=allow_frozen,
112
+ **kwargs,
113
+ )
114
+ if include_none or v is not None:
115
+ out.append((k, v))
116
+ if isinstance(data, defaultdict):
117
+ return elem_type(data.default_factory, OrderedDict(out))
118
+ return elem_type(OrderedDict(out))
119
+
120
+ is_namedtuple_ = is_namedtuple(data)
121
+ is_sequence = isinstance(data, Sequence) and not isinstance(data, str)
122
+ if is_namedtuple_ or is_sequence:
123
+ out = []
124
+ for d in data:
125
+ v = _apply_to_collection_slow(
126
+ d,
127
+ dtype,
128
+ function,
129
+ *args,
130
+ wrong_dtype=wrong_dtype,
131
+ include_none=include_none,
132
+ allow_frozen=allow_frozen,
133
+ **kwargs,
134
+ )
135
+ if include_none or v is not None:
136
+ out.append(v)
137
+ return elem_type(*out) if is_namedtuple_ else elem_type(out)
138
+
139
+ if is_dataclass_instance(data):
140
+ # make a deepcopy of the data,
141
+ # but do not deepcopy mapped fields since the computation would
142
+ # be wasted on values that likely get immediately overwritten
143
+ fields = {}
144
+ memo = {}
145
+ for field in dataclasses.fields(data):
146
+ field_value = getattr(data, field.name)
147
+ fields[field.name] = (field_value, field.init)
148
+ memo[id(field_value)] = field_value
149
+ result = deepcopy(data, memo=memo)
150
+ # apply function to each field
151
+ for field_name, (field_value, field_init) in fields.items():
152
+ v = None
153
+ if field_init:
154
+ v = _apply_to_collection_slow(
155
+ field_value,
156
+ dtype,
157
+ function,
158
+ *args,
159
+ wrong_dtype=wrong_dtype,
160
+ include_none=include_none,
161
+ allow_frozen=allow_frozen,
162
+ **kwargs,
163
+ )
164
+ if not field_init or (not include_none and v is None): # retain old value
165
+ v = getattr(data, field_name)
166
+ try:
167
+ setattr(result, field_name, v)
168
+ except dataclasses.FrozenInstanceError as e:
169
+ if allow_frozen:
170
+ # Quit early if we encounter a frozen data class; return `result` as is.
171
+ break
172
+ raise ValueError(
173
+ "A frozen dataclass was passed to `apply_to_collection` but this is not allowed."
174
+ ) from e
175
+ return result
176
+
177
+ # data is neither of dtype, nor a collection
178
+ return data
179
+
180
+
181
+ def apply_to_collections(
182
+ data1: Optional[Any],
183
+ data2: Optional[Any],
184
+ dtype: Union[type, Any, Tuple[Union[type, Any]]],
185
+ function: Callable,
186
+ *args: Any,
187
+ wrong_dtype: Optional[Union[type, Tuple[type]]] = None,
188
+ **kwargs: Any,
189
+ ) -> Any:
190
+ """Zips two collections and applies a function to their items of a certain dtype.
191
+
192
+ Args:
193
+ data1: The first collection
194
+ data2: The second collection
195
+ dtype: the given function will be applied to all elements of this dtype
196
+ function: the function to apply
197
+ *args: positional arguments (will be forwarded to calls of ``function``)
198
+ wrong_dtype: the given function won't be applied if this type is specified and the given collections
199
+ is of the ``wrong_dtype`` even if it is of type ``dtype``
200
+ **kwargs: keyword arguments (will be forwarded to calls of ``function``)
201
+
202
+ Returns:
203
+ The resulting collection
204
+
205
+ Raises:
206
+ AssertionError:
207
+ If sequence collections have different data sizes.
208
+
209
+ """
210
+ if data1 is None:
211
+ if data2 is None:
212
+ return None
213
+ # in case they were passed reversed
214
+ data1, data2 = data2, None
215
+
216
+ elem_type = type(data1)
217
+
218
+ if isinstance(data1, dtype) and data2 is not None and (wrong_dtype is None or not isinstance(data1, wrong_dtype)):
219
+ return function(data1, data2, *args, **kwargs)
220
+
221
+ if isinstance(data1, Mapping) and data2 is not None:
222
+ # use union because we want to fail if a key does not exist in both
223
+ zipped = {k: (data1[k], data2[k]) for k in data1.keys() | data2.keys()}
224
+ return elem_type({
225
+ k: apply_to_collections(*v, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)
226
+ for k, v in zipped.items()
227
+ })
228
+
229
+ is_namedtuple_ = is_namedtuple(data1)
230
+ is_sequence = isinstance(data1, Sequence) and not isinstance(data1, str)
231
+ if (is_namedtuple_ or is_sequence) and data2 is not None:
232
+ if len(data1) != len(data2):
233
+ raise ValueError("Sequence collections have different sizes.")
234
+ out = [
235
+ apply_to_collections(v1, v2, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)
236
+ for v1, v2 in zip(data1, data2)
237
+ ]
238
+ return elem_type(*out) if is_namedtuple_ else elem_type(out)
239
+
240
+ if is_dataclass_instance(data1) and data2 is not None:
241
+ if not is_dataclass_instance(data2):
242
+ raise TypeError(
243
+ "Expected inputs to be dataclasses of the same type or to have identical fields"
244
+ f" but got input 1 of type {type(data1)} and input 2 of type {type(data2)}."
245
+ )
246
+ if not (
247
+ len(dataclasses.fields(data1)) == len(dataclasses.fields(data2))
248
+ and all(map(lambda f1, f2: isinstance(f1, type(f2)), dataclasses.fields(data1), dataclasses.fields(data2)))
249
+ ):
250
+ raise TypeError("Dataclasses fields do not match.")
251
+ # make a deepcopy of the data,
252
+ # but do not deepcopy mapped fields since the computation would
253
+ # be wasted on values that likely get immediately overwritten
254
+ data = [data1, data2]
255
+ fields: List[dict] = [{}, {}]
256
+ memo: dict = {}
257
+ for i in range(len(data)):
258
+ for field in dataclasses.fields(data[i]):
259
+ field_value = getattr(data[i], field.name)
260
+ fields[i][field.name] = (field_value, field.init)
261
+ if i == 0:
262
+ memo[id(field_value)] = field_value
263
+
264
+ result = deepcopy(data1, memo=memo)
265
+
266
+ # apply function to each field
267
+ for (field_name, (field_value1, field_init1)), (_, (field_value2, field_init2)) in zip(
268
+ fields[0].items(), fields[1].items()
269
+ ):
270
+ v = None
271
+ if field_init1 and field_init2:
272
+ v = apply_to_collections(
273
+ field_value1,
274
+ field_value2,
275
+ dtype,
276
+ function,
277
+ *args,
278
+ wrong_dtype=wrong_dtype,
279
+ **kwargs,
280
+ )
281
+ if not field_init1 or not field_init2 or v is None: # retain old value
282
+ return apply_to_collection(data1, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)
283
+ try:
284
+ setattr(result, field_name, v)
285
+ except dataclasses.FrozenInstanceError as e:
286
+ raise ValueError(
287
+ "A frozen dataclass was passed to `apply_to_collections` but this is not allowed."
288
+ ) from e
289
+ return result
290
+
291
+ return apply_to_collection(data1, dtype, function, *args, wrong_dtype=wrong_dtype, **kwargs)
wemm/lib/python3.10/site-packages/lightning_utilities/docs/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ """General tools for Docs."""
2
+
3
+ from lightning_utilities.docs.formatting import adjust_linked_external_docs
4
+ from lightning_utilities.docs.retriever import fetch_external_assets
5
+
6
+ __all__ = ["adjust_linked_external_docs", "fetch_external_assets"]
wemm/lib/python3.10/site-packages/lightning_utilities/docs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (410 Bytes). View file
 
wemm/lib/python3.10/site-packages/networkx/exception.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ **********
3
+ Exceptions
4
+ **********
5
+
6
+ Base exceptions and errors for NetworkX.
7
+ """
8
+
9
+ __all__ = [
10
+ "HasACycle",
11
+ "NodeNotFound",
12
+ "PowerIterationFailedConvergence",
13
+ "ExceededMaxIterations",
14
+ "AmbiguousSolution",
15
+ "NetworkXAlgorithmError",
16
+ "NetworkXException",
17
+ "NetworkXError",
18
+ "NetworkXNoCycle",
19
+ "NetworkXNoPath",
20
+ "NetworkXNotImplemented",
21
+ "NetworkXPointlessConcept",
22
+ "NetworkXUnbounded",
23
+ "NetworkXUnfeasible",
24
+ ]
25
+
26
+
27
+ class NetworkXException(Exception):
28
+ """Base class for exceptions in NetworkX."""
29
+
30
+
31
+ class NetworkXError(NetworkXException):
32
+ """Exception for a serious error in NetworkX"""
33
+
34
+
35
+ class NetworkXPointlessConcept(NetworkXException):
36
+ """Raised when a null graph is provided as input to an algorithm
37
+ that cannot use it.
38
+
39
+ The null graph is sometimes considered a pointless concept [1]_,
40
+ thus the name of the exception.
41
+
42
+ Notes
43
+ -----
44
+ Null graphs and empty graphs are often used interchangeably but they
45
+ are well defined in NetworkX. An ``empty_graph`` is a graph with ``n`` nodes
46
+ and 0 edges, and a ``null_graph`` is a graph with 0 nodes and 0 edges.
47
+
48
+ References
49
+ ----------
50
+ .. [1] Harary, F. and Read, R. "Is the Null Graph a Pointless
51
+ Concept?" In Graphs and Combinatorics Conference, George
52
+ Washington University. New York: Springer-Verlag, 1973.
53
+
54
+ """
55
+
56
+
57
+ class NetworkXAlgorithmError(NetworkXException):
58
+ """Exception for unexpected termination of algorithms."""
59
+
60
+
61
+ class NetworkXUnfeasible(NetworkXAlgorithmError):
62
+ """Exception raised by algorithms trying to solve a problem
63
+ instance that has no feasible solution."""
64
+
65
+
66
+ class NetworkXNoPath(NetworkXUnfeasible):
67
+ """Exception for algorithms that should return a path when running
68
+ on graphs where such a path does not exist."""
69
+
70
+
71
+ class NetworkXNoCycle(NetworkXUnfeasible):
72
+ """Exception for algorithms that should return a cycle when running
73
+ on graphs where such a cycle does not exist."""
74
+
75
+
76
+ class HasACycle(NetworkXException):
77
+ """Raised if a graph has a cycle when an algorithm expects that it
78
+ will have no cycles.
79
+
80
+ """
81
+
82
+
83
+ class NetworkXUnbounded(NetworkXAlgorithmError):
84
+ """Exception raised by algorithms trying to solve a maximization
85
+ or a minimization problem instance that is unbounded."""
86
+
87
+
88
+ class NetworkXNotImplemented(NetworkXException):
89
+ """Exception raised by algorithms not implemented for a type of graph."""
90
+
91
+
92
+ class NodeNotFound(NetworkXException):
93
+ """Exception raised if requested node is not present in the graph"""
94
+
95
+
96
+ class AmbiguousSolution(NetworkXException):
97
+ """Raised if more than one valid solution exists for an intermediary step
98
+ of an algorithm.
99
+
100
+ In the face of ambiguity, refuse the temptation to guess.
101
+ This may occur, for example, when trying to determine the
102
+ bipartite node sets in a disconnected bipartite graph when
103
+ computing bipartite matchings.
104
+
105
+ """
106
+
107
+
108
+ class ExceededMaxIterations(NetworkXException):
109
+ """Raised if a loop iterates too many times without breaking.
110
+
111
+ This may occur, for example, in an algorithm that computes
112
+ progressively better approximations to a value but exceeds an
113
+ iteration bound specified by the user.
114
+
115
+ """
116
+
117
+
118
+ class PowerIterationFailedConvergence(ExceededMaxIterations):
119
+ """Raised when the power iteration method fails to converge within a
120
+ specified iteration limit.
121
+
122
+ `num_iterations` is the number of iterations that have been
123
+ completed when this exception was raised.
124
+
125
+ """
126
+
127
+ def __init__(self, num_iterations, *args, **kw):
128
+ msg = f"power iteration failed to converge within {num_iterations} iterations"
129
+ exception_message = msg
130
+ superinit = super().__init__
131
+ superinit(self, exception_message, *args, **kw)
wemm/lib/python3.10/site-packages/pillow.libs/libXau-154567c4.so.6.0.0 ADDED
Binary file (22.1 kB). View file
 
wemm/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/requires.txt ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ [certs]
3
+
4
+ [check]
5
+ pytest-checkdocs>=2.4
6
+
7
+ [check:sys_platform != "cygwin"]
8
+ pytest-ruff>=0.2.1
9
+ ruff>=0.8.0
10
+
11
+ [core]
12
+ packaging>=24.2
13
+ more_itertools>=8.8
14
+ jaraco.text>=3.7
15
+ wheel>=0.43.0
16
+ platformdirs>=4.2.2
17
+ jaraco.collections
18
+ jaraco.functools>=4
19
+ packaging
20
+ more_itertools
21
+
22
+ [core:python_version < "3.10"]
23
+ importlib_metadata>=6
24
+
25
+ [core:python_version < "3.11"]
26
+ tomli>=2.0.1
27
+
28
+ [cover]
29
+ pytest-cov
30
+
31
+ [doc]
32
+ sphinx>=3.5
33
+ jaraco.packaging>=9.3
34
+ rst.linker>=1.9
35
+ furo
36
+ sphinx-lint
37
+ jaraco.tidelift>=1.4
38
+ pygments-github-lexers==0.0.5
39
+ sphinx-favicon
40
+ sphinx-inline-tabs
41
+ sphinx-reredirects
42
+ sphinxcontrib-towncrier
43
+ sphinx-notfound-page<2,>=1
44
+ pyproject-hooks!=1.1
45
+ towncrier<24.7
46
+
47
+ [enabler]
48
+ pytest-enabler>=2.2
49
+
50
+ [ssl]
51
+
52
+ [test]
53
+ pytest!=8.1.*,>=6
54
+ virtualenv>=13.0.0
55
+ wheel>=0.44.0
56
+ pip>=19.1
57
+ packaging>=24.2
58
+ jaraco.envs>=2.2
59
+ pytest-xdist>=3
60
+ jaraco.path>=3.7.2
61
+ build[virtualenv]>=1.0.3
62
+ filelock>=3.4.0
63
+ ini2toml[lite]>=0.14
64
+ tomli-w>=1.0.0
65
+ pytest-timeout
66
+ pytest-home>=0.5
67
+ pytest-subprocess
68
+ pyproject-hooks!=1.1
69
+ jaraco.test>=5.5
70
+
71
+ [test:python_version >= "3.9" and sys_platform != "cygwin"]
72
+ jaraco.develop>=7.21
73
+
74
+ [test:sys_platform != "cygwin"]
75
+ pytest-perf
76
+
77
+ [type]
78
+ pytest-mypy
79
+ mypy==1.14.*
80
+
81
+ [type:python_version < "3.10"]
82
+ importlib_metadata>=7.0.2
83
+
84
+ [type:sys_platform != "cygwin"]
85
+ jaraco.develop>=7.21
wemm/lib/python3.10/site-packages/setuptools-75.8.0-py3.10.egg-info/top_level.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ _distutils_hack
2
+ pkg_resources
3
+ setuptools
wemm/lib/python3.10/site-packages/torchvision/__pycache__/_internally_replaced_utils.cpython-310.pyc ADDED
Binary file (1.73 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.26 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/_stereo_matching.cpython-310.pyc ADDED
Binary file (39.7 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/cifar.cpython-310.pyc ADDED
Binary file (5.85 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/cityscapes.cpython-310.pyc ADDED
Binary file (8.55 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/clevr.cpython-310.pyc ADDED
Binary file (4.13 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/coco.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/country211.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/dtd.cpython-310.pyc ADDED
Binary file (4.36 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/eurosat.cpython-310.pyc ADDED
Binary file (2.46 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/fer2013.cpython-310.pyc ADDED
Binary file (3.31 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/fgvc_aircraft.cpython-310.pyc ADDED
Binary file (4.83 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/flickr.cpython-310.pyc ADDED
Binary file (5.23 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/food101.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/gtsrb.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/imagenet.cpython-310.pyc ADDED
Binary file (9.65 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/kinetics.cpython-310.pyc ADDED
Binary file (9.77 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/kitti.cpython-310.pyc ADDED
Binary file (5.91 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/lfw.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/lsun.cpython-310.pyc ADDED
Binary file (5.87 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/mnist.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/oxford_iiit_pet.cpython-310.pyc ADDED
Binary file (5.65 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/phototour.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/places365.cpython-310.pyc ADDED
Binary file (7.9 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/sbu.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/semeion.cpython-310.pyc ADDED
Binary file (3.33 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/stanford_cars.cpython-310.pyc ADDED
Binary file (4.76 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/stl10.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/svhn.cpython-310.pyc ADDED
Binary file (4.37 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/video_utils.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/vision.cpython-310.pyc ADDED
Binary file (4.85 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/__pycache__/voc.cpython-310.pyc ADDED
Binary file (8.85 kB). View file
 
wemm/lib/python3.10/site-packages/torchvision/datasets/_optical_flow.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import os
3
+ from abc import ABC, abstractmethod
4
+ from glob import glob
5
+ from pathlib import Path
6
+ from typing import Callable, List, Optional, Tuple, Union
7
+
8
+ import numpy as np
9
+ import torch
10
+ from PIL import Image
11
+
12
+ from ..io.image import _read_png_16
13
+ from .utils import _read_pfm, verify_str_arg
14
+ from .vision import VisionDataset
15
+
16
+
17
+ T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], Optional[np.ndarray]]
18
+ T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
19
+
20
+
21
+ __all__ = (
22
+ "KittiFlow",
23
+ "Sintel",
24
+ "FlyingThings3D",
25
+ "FlyingChairs",
26
+ "HD1K",
27
+ )
28
+
29
+
30
+ class FlowDataset(ABC, VisionDataset):
31
+ # Some datasets like Kitti have a built-in valid_flow_mask, indicating which flow values are valid
32
+ # For those we return (img1, img2, flow, valid_flow_mask), and for the rest we return (img1, img2, flow),
33
+ # and it's up to whatever consumes the dataset to decide what valid_flow_mask should be.
34
+ _has_builtin_flow_mask = False
35
+
36
+ def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
37
+
38
+ super().__init__(root=root)
39
+ self.transforms = transforms
40
+
41
+ self._flow_list: List[str] = []
42
+ self._image_list: List[List[str]] = []
43
+
44
+ def _read_img(self, file_name: str) -> Image.Image:
45
+ img = Image.open(file_name)
46
+ if img.mode != "RGB":
47
+ img = img.convert("RGB")
48
+ return img
49
+
50
+ @abstractmethod
51
+ def _read_flow(self, file_name: str):
52
+ # Return the flow or a tuple with the flow and the valid_flow_mask if _has_builtin_flow_mask is True
53
+ pass
54
+
55
+ def __getitem__(self, index: int) -> Union[T1, T2]:
56
+
57
+ img1 = self._read_img(self._image_list[index][0])
58
+ img2 = self._read_img(self._image_list[index][1])
59
+
60
+ if self._flow_list: # it will be empty for some dataset when split="test"
61
+ flow = self._read_flow(self._flow_list[index])
62
+ if self._has_builtin_flow_mask:
63
+ flow, valid_flow_mask = flow
64
+ else:
65
+ valid_flow_mask = None
66
+ else:
67
+ flow = valid_flow_mask = None
68
+
69
+ if self.transforms is not None:
70
+ img1, img2, flow, valid_flow_mask = self.transforms(img1, img2, flow, valid_flow_mask)
71
+
72
+ if self._has_builtin_flow_mask or valid_flow_mask is not None:
73
+ # The `or valid_flow_mask is not None` part is here because the mask can be generated within a transform
74
+ return img1, img2, flow, valid_flow_mask
75
+ else:
76
+ return img1, img2, flow
77
+
78
+ def __len__(self) -> int:
79
+ return len(self._image_list)
80
+
81
+ def __rmul__(self, v: int) -> torch.utils.data.ConcatDataset:
82
+ return torch.utils.data.ConcatDataset([self] * v)
83
+
84
+
85
+ class Sintel(FlowDataset):
86
+ """`Sintel <http://sintel.is.tue.mpg.de/>`_ Dataset for optical flow.
87
+
88
+ The dataset is expected to have the following structure: ::
89
+
90
+ root
91
+ Sintel
92
+ testing
93
+ clean
94
+ scene_1
95
+ scene_2
96
+ ...
97
+ final
98
+ scene_1
99
+ scene_2
100
+ ...
101
+ training
102
+ clean
103
+ scene_1
104
+ scene_2
105
+ ...
106
+ final
107
+ scene_1
108
+ scene_2
109
+ ...
110
+ flow
111
+ scene_1
112
+ scene_2
113
+ ...
114
+
115
+ Args:
116
+ root (string): Root directory of the Sintel Dataset.
117
+ split (string, optional): The dataset split, either "train" (default) or "test"
118
+ pass_name (string, optional): The pass to use, either "clean" (default), "final", or "both". See link above for
119
+ details on the different passes.
120
+ transforms (callable, optional): A function/transform that takes in
121
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
122
+ ``valid_flow_mask`` is expected for consistency with other datasets which
123
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
124
+ """
125
+
126
+ def __init__(
127
+ self,
128
+ root: str,
129
+ split: str = "train",
130
+ pass_name: str = "clean",
131
+ transforms: Optional[Callable] = None,
132
+ ) -> None:
133
+ super().__init__(root=root, transforms=transforms)
134
+
135
+ verify_str_arg(split, "split", valid_values=("train", "test"))
136
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
137
+ passes = ["clean", "final"] if pass_name == "both" else [pass_name]
138
+
139
+ root = Path(root) / "Sintel"
140
+ flow_root = root / "training" / "flow"
141
+
142
+ for pass_name in passes:
143
+ split_dir = "training" if split == "train" else split
144
+ image_root = root / split_dir / pass_name
145
+ for scene in os.listdir(image_root):
146
+ image_list = sorted(glob(str(image_root / scene / "*.png")))
147
+ for i in range(len(image_list) - 1):
148
+ self._image_list += [[image_list[i], image_list[i + 1]]]
149
+
150
+ if split == "train":
151
+ self._flow_list += sorted(glob(str(flow_root / scene / "*.flo")))
152
+
153
+ def __getitem__(self, index: int) -> Union[T1, T2]:
154
+ """Return example at given index.
155
+
156
+ Args:
157
+ index(int): The index of the example to retrieve
158
+
159
+ Returns:
160
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
161
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
162
+ ``flow`` is None if ``split="test"``.
163
+ If a valid flow mask is generated within the ``transforms`` parameter,
164
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
165
+ """
166
+ return super().__getitem__(index)
167
+
168
+ def _read_flow(self, file_name: str) -> np.ndarray:
169
+ return _read_flo(file_name)
170
+
171
+
172
+ class KittiFlow(FlowDataset):
173
+ """`KITTI <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php?benchmark=flow>`__ dataset for optical flow (2015).
174
+
175
+ The dataset is expected to have the following structure: ::
176
+
177
+ root
178
+ KittiFlow
179
+ testing
180
+ image_2
181
+ training
182
+ image_2
183
+ flow_occ
184
+
185
+ Args:
186
+ root (string): Root directory of the KittiFlow Dataset.
187
+ split (string, optional): The dataset split, either "train" (default) or "test"
188
+ transforms (callable, optional): A function/transform that takes in
189
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
190
+ """
191
+
192
+ _has_builtin_flow_mask = True
193
+
194
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
195
+ super().__init__(root=root, transforms=transforms)
196
+
197
+ verify_str_arg(split, "split", valid_values=("train", "test"))
198
+
199
+ root = Path(root) / "KittiFlow" / (split + "ing")
200
+ images1 = sorted(glob(str(root / "image_2" / "*_10.png")))
201
+ images2 = sorted(glob(str(root / "image_2" / "*_11.png")))
202
+
203
+ if not images1 or not images2:
204
+ raise FileNotFoundError(
205
+ "Could not find the Kitti flow images. Please make sure the directory structure is correct."
206
+ )
207
+
208
+ for img1, img2 in zip(images1, images2):
209
+ self._image_list += [[img1, img2]]
210
+
211
+ if split == "train":
212
+ self._flow_list = sorted(glob(str(root / "flow_occ" / "*_10.png")))
213
+
214
+ def __getitem__(self, index: int) -> Union[T1, T2]:
215
+ """Return example at given index.
216
+
217
+ Args:
218
+ index(int): The index of the example to retrieve
219
+
220
+ Returns:
221
+ tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)``
222
+ where ``valid_flow_mask`` is a numpy boolean mask of shape (H, W)
223
+ indicating which flow values are valid. The flow is a numpy array of
224
+ shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
225
+ ``split="test"``.
226
+ """
227
+ return super().__getitem__(index)
228
+
229
+ def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
230
+ return _read_16bits_png_with_flow_and_valid_mask(file_name)
231
+
232
+
233
+ class FlyingChairs(FlowDataset):
234
+ """`FlyingChairs <https://lmb.informatik.uni-freiburg.de/resources/datasets/FlyingChairs.en.html#flyingchairs>`_ Dataset for optical flow.
235
+
236
+ You will also need to download the FlyingChairs_train_val.txt file from the dataset page.
237
+
238
+ The dataset is expected to have the following structure: ::
239
+
240
+ root
241
+ FlyingChairs
242
+ data
243
+ 00001_flow.flo
244
+ 00001_img1.ppm
245
+ 00001_img2.ppm
246
+ ...
247
+ FlyingChairs_train_val.txt
248
+
249
+
250
+ Args:
251
+ root (string): Root directory of the FlyingChairs Dataset.
252
+ split (string, optional): The dataset split, either "train" (default) or "val"
253
+ transforms (callable, optional): A function/transform that takes in
254
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
255
+ ``valid_flow_mask`` is expected for consistency with other datasets which
256
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
257
+ """
258
+
259
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
260
+ super().__init__(root=root, transforms=transforms)
261
+
262
+ verify_str_arg(split, "split", valid_values=("train", "val"))
263
+
264
+ root = Path(root) / "FlyingChairs"
265
+ images = sorted(glob(str(root / "data" / "*.ppm")))
266
+ flows = sorted(glob(str(root / "data" / "*.flo")))
267
+
268
+ split_file_name = "FlyingChairs_train_val.txt"
269
+
270
+ if not os.path.exists(root / split_file_name):
271
+ raise FileNotFoundError(
272
+ "The FlyingChairs_train_val.txt file was not found - please download it from the dataset page (see docstring)."
273
+ )
274
+
275
+ split_list = np.loadtxt(str(root / split_file_name), dtype=np.int32)
276
+ for i in range(len(flows)):
277
+ split_id = split_list[i]
278
+ if (split == "train" and split_id == 1) or (split == "val" and split_id == 2):
279
+ self._flow_list += [flows[i]]
280
+ self._image_list += [[images[2 * i], images[2 * i + 1]]]
281
+
282
+ def __getitem__(self, index: int) -> Union[T1, T2]:
283
+ """Return example at given index.
284
+
285
+ Args:
286
+ index(int): The index of the example to retrieve
287
+
288
+ Returns:
289
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
290
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
291
+ ``flow`` is None if ``split="val"``.
292
+ If a valid flow mask is generated within the ``transforms`` parameter,
293
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
294
+ """
295
+ return super().__getitem__(index)
296
+
297
+ def _read_flow(self, file_name: str) -> np.ndarray:
298
+ return _read_flo(file_name)
299
+
300
+
301
+ class FlyingThings3D(FlowDataset):
302
+ """`FlyingThings3D <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ dataset for optical flow.
303
+
304
+ The dataset is expected to have the following structure: ::
305
+
306
+ root
307
+ FlyingThings3D
308
+ frames_cleanpass
309
+ TEST
310
+ TRAIN
311
+ frames_finalpass
312
+ TEST
313
+ TRAIN
314
+ optical_flow
315
+ TEST
316
+ TRAIN
317
+
318
+ Args:
319
+ root (string): Root directory of the intel FlyingThings3D Dataset.
320
+ split (string, optional): The dataset split, either "train" (default) or "test"
321
+ pass_name (string, optional): The pass to use, either "clean" (default) or "final" or "both". See link above for
322
+ details on the different passes.
323
+ camera (string, optional): Which camera to return images from. Can be either "left" (default) or "right" or "both".
324
+ transforms (callable, optional): A function/transform that takes in
325
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
326
+ ``valid_flow_mask`` is expected for consistency with other datasets which
327
+ return a built-in valid mask, such as :class:`~torchvision.datasets.KittiFlow`.
328
+ """
329
+
330
+ def __init__(
331
+ self,
332
+ root: str,
333
+ split: str = "train",
334
+ pass_name: str = "clean",
335
+ camera: str = "left",
336
+ transforms: Optional[Callable] = None,
337
+ ) -> None:
338
+ super().__init__(root=root, transforms=transforms)
339
+
340
+ verify_str_arg(split, "split", valid_values=("train", "test"))
341
+ split = split.upper()
342
+
343
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
344
+ passes = {
345
+ "clean": ["frames_cleanpass"],
346
+ "final": ["frames_finalpass"],
347
+ "both": ["frames_cleanpass", "frames_finalpass"],
348
+ }[pass_name]
349
+
350
+ verify_str_arg(camera, "camera", valid_values=("left", "right", "both"))
351
+ cameras = ["left", "right"] if camera == "both" else [camera]
352
+
353
+ root = Path(root) / "FlyingThings3D"
354
+
355
+ directions = ("into_future", "into_past")
356
+ for pass_name, camera, direction in itertools.product(passes, cameras, directions):
357
+ image_dirs = sorted(glob(str(root / pass_name / split / "*/*")))
358
+ image_dirs = sorted(Path(image_dir) / camera for image_dir in image_dirs)
359
+
360
+ flow_dirs = sorted(glob(str(root / "optical_flow" / split / "*/*")))
361
+ flow_dirs = sorted(Path(flow_dir) / direction / camera for flow_dir in flow_dirs)
362
+
363
+ if not image_dirs or not flow_dirs:
364
+ raise FileNotFoundError(
365
+ "Could not find the FlyingThings3D flow images. "
366
+ "Please make sure the directory structure is correct."
367
+ )
368
+
369
+ for image_dir, flow_dir in zip(image_dirs, flow_dirs):
370
+ images = sorted(glob(str(image_dir / "*.png")))
371
+ flows = sorted(glob(str(flow_dir / "*.pfm")))
372
+ for i in range(len(flows) - 1):
373
+ if direction == "into_future":
374
+ self._image_list += [[images[i], images[i + 1]]]
375
+ self._flow_list += [flows[i]]
376
+ elif direction == "into_past":
377
+ self._image_list += [[images[i + 1], images[i]]]
378
+ self._flow_list += [flows[i + 1]]
379
+
380
+ def __getitem__(self, index: int) -> Union[T1, T2]:
381
+ """Return example at given index.
382
+
383
+ Args:
384
+ index(int): The index of the example to retrieve
385
+
386
+ Returns:
387
+ tuple: A 3-tuple with ``(img1, img2, flow)``.
388
+ The flow is a numpy array of shape (2, H, W) and the images are PIL images.
389
+ ``flow`` is None if ``split="test"``.
390
+ If a valid flow mask is generated within the ``transforms`` parameter,
391
+ a 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` is returned.
392
+ """
393
+ return super().__getitem__(index)
394
+
395
+ def _read_flow(self, file_name: str) -> np.ndarray:
396
+ return _read_pfm(file_name)
397
+
398
+
399
+ class HD1K(FlowDataset):
400
+ """`HD1K <http://hci-benchmark.iwr.uni-heidelberg.de/>`__ dataset for optical flow.
401
+
402
+ The dataset is expected to have the following structure: ::
403
+
404
+ root
405
+ hd1k
406
+ hd1k_challenge
407
+ image_2
408
+ hd1k_flow_gt
409
+ flow_occ
410
+ hd1k_input
411
+ image_2
412
+
413
+ Args:
414
+ root (string): Root directory of the HD1K Dataset.
415
+ split (string, optional): The dataset split, either "train" (default) or "test"
416
+ transforms (callable, optional): A function/transform that takes in
417
+ ``img1, img2, flow, valid_flow_mask`` and returns a transformed version.
418
+ """
419
+
420
+ _has_builtin_flow_mask = True
421
+
422
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
423
+ super().__init__(root=root, transforms=transforms)
424
+
425
+ verify_str_arg(split, "split", valid_values=("train", "test"))
426
+
427
+ root = Path(root) / "hd1k"
428
+ if split == "train":
429
+ # There are 36 "sequences" and we don't want seq i to overlap with seq i + 1, so we need this for loop
430
+ for seq_idx in range(36):
431
+ flows = sorted(glob(str(root / "hd1k_flow_gt" / "flow_occ" / f"{seq_idx:06d}_*.png")))
432
+ images = sorted(glob(str(root / "hd1k_input" / "image_2" / f"{seq_idx:06d}_*.png")))
433
+ for i in range(len(flows) - 1):
434
+ self._flow_list += [flows[i]]
435
+ self._image_list += [[images[i], images[i + 1]]]
436
+ else:
437
+ images1 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*10.png")))
438
+ images2 = sorted(glob(str(root / "hd1k_challenge" / "image_2" / "*11.png")))
439
+ for image1, image2 in zip(images1, images2):
440
+ self._image_list += [[image1, image2]]
441
+
442
+ if not self._image_list:
443
+ raise FileNotFoundError(
444
+ "Could not find the HD1K images. Please make sure the directory structure is correct."
445
+ )
446
+
447
+ def _read_flow(self, file_name: str) -> Tuple[np.ndarray, np.ndarray]:
448
+ return _read_16bits_png_with_flow_and_valid_mask(file_name)
449
+
450
+ def __getitem__(self, index: int) -> Union[T1, T2]:
451
+ """Return example at given index.
452
+
453
+ Args:
454
+ index(int): The index of the example to retrieve
455
+
456
+ Returns:
457
+ tuple: A 4-tuple with ``(img1, img2, flow, valid_flow_mask)`` where ``valid_flow_mask``
458
+ is a numpy boolean mask of shape (H, W)
459
+ indicating which flow values are valid. The flow is a numpy array of
460
+ shape (2, H, W) and the images are PIL images. ``flow`` and ``valid_flow_mask`` are None if
461
+ ``split="test"``.
462
+ """
463
+ return super().__getitem__(index)
464
+
465
+
466
+ def _read_flo(file_name: str) -> np.ndarray:
467
+ """Read .flo file in Middlebury format"""
468
+ # Code adapted from:
469
+ # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy
470
+ # Everything needs to be in little Endian according to
471
+ # https://vision.middlebury.edu/flow/code/flow-code/README.txt
472
+ with open(file_name, "rb") as f:
473
+ magic = np.fromfile(f, "c", count=4).tobytes()
474
+ if magic != b"PIEH":
475
+ raise ValueError("Magic number incorrect. Invalid .flo file")
476
+
477
+ w = int(np.fromfile(f, "<i4", count=1))
478
+ h = int(np.fromfile(f, "<i4", count=1))
479
+ data = np.fromfile(f, "<f4", count=2 * w * h)
480
+ return data.reshape(h, w, 2).transpose(2, 0, 1)
481
+
482
+
483
+ def _read_16bits_png_with_flow_and_valid_mask(file_name: str) -> Tuple[np.ndarray, np.ndarray]:
484
+
485
+ flow_and_valid = _read_png_16(file_name).to(torch.float32)
486
+ flow, valid_flow_mask = flow_and_valid[:2, :, :], flow_and_valid[2, :, :]
487
+ flow = (flow - 2**15) / 64 # This conversion is explained somewhere on the kitti archive
488
+ valid_flow_mask = valid_flow_mask.bool()
489
+
490
+ # For consistency with other datasets, we convert to numpy
491
+ return flow.numpy(), valid_flow_mask.numpy()
wemm/lib/python3.10/site-packages/torchvision/datasets/_stereo_matching.py ADDED
@@ -0,0 +1,1224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import json
3
+ import os
4
+ import random
5
+ import shutil
6
+ from abc import ABC, abstractmethod
7
+ from glob import glob
8
+ from pathlib import Path
9
+ from typing import Callable, cast, List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ from PIL import Image
13
+
14
+ from .utils import _read_pfm, download_and_extract_archive, verify_str_arg
15
+ from .vision import VisionDataset
16
+
17
+ T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], np.ndarray]
18
+ T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
19
+
20
+ __all__ = ()
21
+
22
+ _read_pfm_file = functools.partial(_read_pfm, slice_channels=1)
23
+
24
+
25
+ class StereoMatchingDataset(ABC, VisionDataset):
26
+ """Base interface for Stereo matching datasets"""
27
+
28
+ _has_built_in_disparity_mask = False
29
+
30
+ def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
31
+ """
32
+ Args:
33
+ root(str): Root directory of the dataset.
34
+ transforms(callable, optional): A function/transform that takes in Tuples of
35
+ (images, disparities, valid_masks) and returns a transformed version of each of them.
36
+ images is a Tuple of (``PIL.Image``, ``PIL.Image``)
37
+ disparities is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (1, H, W)
38
+ valid_masks is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (H, W)
39
+ In some cases, when a dataset does not provide disparities, the ``disparities`` and
40
+ ``valid_masks`` can be Tuples containing None values.
41
+ For training splits generally the datasets provide a minimal guarantee of
42
+ images: (``PIL.Image``, ``PIL.Image``)
43
+ disparities: (``np.ndarray``, ``None``) with shape (1, H, W)
44
+ Optionally, based on the dataset, it can return a ``mask`` as well:
45
+ valid_masks: (``np.ndarray | None``, ``None``) with shape (H, W)
46
+ For some test splits, the datasets provides outputs that look like:
47
+ imgaes: (``PIL.Image``, ``PIL.Image``)
48
+ disparities: (``None``, ``None``)
49
+ Optionally, based on the dataset, it can return a ``mask`` as well:
50
+ valid_masks: (``None``, ``None``)
51
+ """
52
+ super().__init__(root=root)
53
+ self.transforms = transforms
54
+
55
+ self._images = [] # type: ignore
56
+ self._disparities = [] # type: ignore
57
+
58
+ def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
59
+ img = Image.open(file_path)
60
+ if img.mode != "RGB":
61
+ img = img.convert("RGB")
62
+ return img
63
+
64
+ def _scan_pairs(
65
+ self,
66
+ paths_left_pattern: str,
67
+ paths_right_pattern: Optional[str] = None,
68
+ ) -> List[Tuple[str, Optional[str]]]:
69
+
70
+ left_paths = list(sorted(glob(paths_left_pattern)))
71
+
72
+ right_paths: List[Union[None, str]]
73
+ if paths_right_pattern:
74
+ right_paths = list(sorted(glob(paths_right_pattern)))
75
+ else:
76
+ right_paths = list(None for _ in left_paths)
77
+
78
+ if not left_paths:
79
+ raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_left_pattern}")
80
+
81
+ if not right_paths:
82
+ raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_right_pattern}")
83
+
84
+ if len(left_paths) != len(right_paths):
85
+ raise ValueError(
86
+ f"Found {len(left_paths)} left files but {len(right_paths)} right files using:\n "
87
+ f"left pattern: {paths_left_pattern}\n"
88
+ f"right pattern: {paths_right_pattern}\n"
89
+ )
90
+
91
+ paths = list((left, right) for left, right in zip(left_paths, right_paths))
92
+ return paths
93
+
94
+ @abstractmethod
95
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
96
+ # function that returns a disparity map and an occlusion map
97
+ pass
98
+
99
+ def __getitem__(self, index: int) -> Union[T1, T2]:
100
+ """Return example at given index.
101
+
102
+ Args:
103
+ index(int): The index of the example to retrieve
104
+
105
+ Returns:
106
+ tuple: A 3 or 4-tuple with ``(img_left, img_right, disparity, Optional[valid_mask])`` where ``valid_mask``
107
+ can be a numpy boolean mask of shape (H, W) if the dataset provides a file
108
+ indicating which disparity pixels are valid. The disparity is a numpy array of
109
+ shape (1, H, W) and the images are PIL images. ``disparity`` is None for
110
+ datasets on which for ``split="test"`` the authors did not provide annotations.
111
+ """
112
+ img_left = self._read_img(self._images[index][0])
113
+ img_right = self._read_img(self._images[index][1])
114
+
115
+ dsp_map_left, valid_mask_left = self._read_disparity(self._disparities[index][0])
116
+ dsp_map_right, valid_mask_right = self._read_disparity(self._disparities[index][1])
117
+
118
+ imgs = (img_left, img_right)
119
+ dsp_maps = (dsp_map_left, dsp_map_right)
120
+ valid_masks = (valid_mask_left, valid_mask_right)
121
+
122
+ if self.transforms is not None:
123
+ (
124
+ imgs,
125
+ dsp_maps,
126
+ valid_masks,
127
+ ) = self.transforms(imgs, dsp_maps, valid_masks)
128
+
129
+ if self._has_built_in_disparity_mask or valid_masks[0] is not None:
130
+ return imgs[0], imgs[1], dsp_maps[0], cast(np.ndarray, valid_masks[0])
131
+ else:
132
+ return imgs[0], imgs[1], dsp_maps[0]
133
+
134
+ def __len__(self) -> int:
135
+ return len(self._images)
136
+
137
+
138
+ class CarlaStereo(StereoMatchingDataset):
139
+ """
140
+ Carla simulator data linked in the `CREStereo github repo <https://github.com/megvii-research/CREStereo>`_.
141
+
142
+ The dataset is expected to have the following structure: ::
143
+
144
+ root
145
+ carla-highres
146
+ trainingF
147
+ scene1
148
+ img0.png
149
+ img1.png
150
+ disp0GT.pfm
151
+ disp1GT.pfm
152
+ calib.txt
153
+ scene2
154
+ img0.png
155
+ img1.png
156
+ disp0GT.pfm
157
+ disp1GT.pfm
158
+ calib.txt
159
+ ...
160
+
161
+ Args:
162
+ root (string): Root directory where `carla-highres` is located.
163
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
164
+ """
165
+
166
+ def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
167
+ super().__init__(root, transforms)
168
+
169
+ root = Path(root) / "carla-highres"
170
+
171
+ left_image_pattern = str(root / "trainingF" / "*" / "im0.png")
172
+ right_image_pattern = str(root / "trainingF" / "*" / "im1.png")
173
+ imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
174
+ self._images = imgs
175
+
176
+ left_disparity_pattern = str(root / "trainingF" / "*" / "disp0GT.pfm")
177
+ right_disparity_pattern = str(root / "trainingF" / "*" / "disp1GT.pfm")
178
+ disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
179
+ self._disparities = disparities
180
+
181
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
182
+ disparity_map = _read_pfm_file(file_path)
183
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
184
+ valid_mask = None
185
+ return disparity_map, valid_mask
186
+
187
+ def __getitem__(self, index: int) -> T1:
188
+ """Return example at given index.
189
+
190
+ Args:
191
+ index(int): The index of the example to retrieve
192
+
193
+ Returns:
194
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
195
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
196
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
197
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
198
+ """
199
+ return cast(T1, super().__getitem__(index))
200
+
201
+
202
+ class Kitti2012Stereo(StereoMatchingDataset):
203
+ """
204
+ KITTI dataset from the `2012 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php>`_.
205
+ Uses the RGB images for consistency with KITTI 2015.
206
+
207
+ The dataset is expected to have the following structure: ::
208
+
209
+ root
210
+ Kitti2012
211
+ testing
212
+ colored_0
213
+ 1_10.png
214
+ 2_10.png
215
+ ...
216
+ colored_1
217
+ 1_10.png
218
+ 2_10.png
219
+ ...
220
+ training
221
+ colored_0
222
+ 1_10.png
223
+ 2_10.png
224
+ ...
225
+ colored_1
226
+ 1_10.png
227
+ 2_10.png
228
+ ...
229
+ disp_noc
230
+ 1.png
231
+ 2.png
232
+ ...
233
+ calib
234
+
235
+ Args:
236
+ root (string): Root directory where `Kitti2012` is located.
237
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
238
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
239
+ """
240
+
241
+ _has_built_in_disparity_mask = True
242
+
243
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
244
+ super().__init__(root, transforms)
245
+
246
+ verify_str_arg(split, "split", valid_values=("train", "test"))
247
+
248
+ root = Path(root) / "Kitti2012" / (split + "ing")
249
+
250
+ left_img_pattern = str(root / "colored_0" / "*_10.png")
251
+ right_img_pattern = str(root / "colored_1" / "*_10.png")
252
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
253
+
254
+ if split == "train":
255
+ disparity_pattern = str(root / "disp_noc" / "*.png")
256
+ self._disparities = self._scan_pairs(disparity_pattern, None)
257
+ else:
258
+ self._disparities = list((None, None) for _ in self._images)
259
+
260
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
261
+ # test split has no disparity maps
262
+ if file_path is None:
263
+ return None, None
264
+
265
+ disparity_map = np.asarray(Image.open(file_path)) / 256.0
266
+ # unsqueeze the disparity map into (C, H, W) format
267
+ disparity_map = disparity_map[None, :, :]
268
+ valid_mask = None
269
+ return disparity_map, valid_mask
270
+
271
+ def __getitem__(self, index: int) -> T1:
272
+ """Return example at given index.
273
+
274
+ Args:
275
+ index(int): The index of the example to retrieve
276
+
277
+ Returns:
278
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
279
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
280
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
281
+ generate a valid mask.
282
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
283
+ """
284
+ return cast(T1, super().__getitem__(index))
285
+
286
+
287
+ class Kitti2015Stereo(StereoMatchingDataset):
288
+ """
289
+ KITTI dataset from the `2015 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php>`_.
290
+
291
+ The dataset is expected to have the following structure: ::
292
+
293
+ root
294
+ Kitti2015
295
+ testing
296
+ image_2
297
+ img1.png
298
+ img2.png
299
+ ...
300
+ image_3
301
+ img1.png
302
+ img2.png
303
+ ...
304
+ training
305
+ image_2
306
+ img1.png
307
+ img2.png
308
+ ...
309
+ image_3
310
+ img1.png
311
+ img2.png
312
+ ...
313
+ disp_occ_0
314
+ img1.png
315
+ img2.png
316
+ ...
317
+ disp_occ_1
318
+ img1.png
319
+ img2.png
320
+ ...
321
+ calib
322
+
323
+ Args:
324
+ root (string): Root directory where `Kitti2015` is located.
325
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
326
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
327
+ """
328
+
329
+ _has_built_in_disparity_mask = True
330
+
331
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
332
+ super().__init__(root, transforms)
333
+
334
+ verify_str_arg(split, "split", valid_values=("train", "test"))
335
+
336
+ root = Path(root) / "Kitti2015" / (split + "ing")
337
+ left_img_pattern = str(root / "image_2" / "*.png")
338
+ right_img_pattern = str(root / "image_3" / "*.png")
339
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
340
+
341
+ if split == "train":
342
+ left_disparity_pattern = str(root / "disp_occ_0" / "*.png")
343
+ right_disparity_pattern = str(root / "disp_occ_1" / "*.png")
344
+ self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
345
+ else:
346
+ self._disparities = list((None, None) for _ in self._images)
347
+
348
+ def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
349
+ # test split has no disparity maps
350
+ if file_path is None:
351
+ return None, None
352
+
353
+ disparity_map = np.asarray(Image.open(file_path)) / 256.0
354
+ # unsqueeze the disparity map into (C, H, W) format
355
+ disparity_map = disparity_map[None, :, :]
356
+ valid_mask = None
357
+ return disparity_map, valid_mask
358
+
359
+ def __getitem__(self, index: int) -> T1:
360
+ """Return example at given index.
361
+
362
+ Args:
363
+ index(int): The index of the example to retrieve
364
+
365
+ Returns:
366
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
367
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
368
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
369
+ generate a valid mask.
370
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
371
+ """
372
+ return cast(T1, super().__getitem__(index))
373
+
374
+
375
+ class Middlebury2014Stereo(StereoMatchingDataset):
376
+ """Publicly available scenes from the Middlebury dataset `2014 version <https://vision.middlebury.edu/stereo/data/scenes2014/>`.
377
+
378
+ The dataset mostly follows the original format, without containing the ambient subdirectories. : ::
379
+
380
+ root
381
+ Middlebury2014
382
+ train
383
+ scene1-{perfect,imperfect}
384
+ calib.txt
385
+ im{0,1}.png
386
+ im1E.png
387
+ im1L.png
388
+ disp{0,1}.pfm
389
+ disp{0,1}-n.png
390
+ disp{0,1}-sd.pfm
391
+ disp{0,1}y.pfm
392
+ scene2-{perfect,imperfect}
393
+ calib.txt
394
+ im{0,1}.png
395
+ im1E.png
396
+ im1L.png
397
+ disp{0,1}.pfm
398
+ disp{0,1}-n.png
399
+ disp{0,1}-sd.pfm
400
+ disp{0,1}y.pfm
401
+ ...
402
+ additional
403
+ scene1-{perfect,imperfect}
404
+ calib.txt
405
+ im{0,1}.png
406
+ im1E.png
407
+ im1L.png
408
+ disp{0,1}.pfm
409
+ disp{0,1}-n.png
410
+ disp{0,1}-sd.pfm
411
+ disp{0,1}y.pfm
412
+ ...
413
+ test
414
+ scene1
415
+ calib.txt
416
+ im{0,1}.png
417
+ scene2
418
+ calib.txt
419
+ im{0,1}.png
420
+ ...
421
+
422
+ Args:
423
+ root (string): Root directory of the Middleburry 2014 Dataset.
424
+ split (string, optional): The dataset split of scenes, either "train" (default), "test", or "additional"
425
+ use_ambient_views (boolean, optional): Whether to use different expose or lightning views when possible.
426
+ The dataset samples with equal probability between ``[im1.png, im1E.png, im1L.png]``.
427
+ calibration (string, optional): Whether or not to use the calibrated (default) or uncalibrated scenes.
428
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
429
+ download (boolean, optional): Whether or not to download the dataset in the ``root`` directory.
430
+ """
431
+
432
+ splits = {
433
+ "train": [
434
+ "Adirondack",
435
+ "Jadeplant",
436
+ "Motorcycle",
437
+ "Piano",
438
+ "Pipes",
439
+ "Playroom",
440
+ "Playtable",
441
+ "Recycle",
442
+ "Shelves",
443
+ "Vintage",
444
+ ],
445
+ "additional": [
446
+ "Backpack",
447
+ "Bicycle1",
448
+ "Cable",
449
+ "Classroom1",
450
+ "Couch",
451
+ "Flowers",
452
+ "Mask",
453
+ "Shopvac",
454
+ "Sticks",
455
+ "Storage",
456
+ "Sword1",
457
+ "Sword2",
458
+ "Umbrella",
459
+ ],
460
+ "test": [
461
+ "Plants",
462
+ "Classroom2E",
463
+ "Classroom2",
464
+ "Australia",
465
+ "DjembeL",
466
+ "CrusadeP",
467
+ "Crusade",
468
+ "Hoops",
469
+ "Bicycle2",
470
+ "Staircase",
471
+ "Newkuba",
472
+ "AustraliaP",
473
+ "Djembe",
474
+ "Livingroom",
475
+ "Computer",
476
+ ],
477
+ }
478
+
479
+ _has_built_in_disparity_mask = True
480
+
481
+ def __init__(
482
+ self,
483
+ root: str,
484
+ split: str = "train",
485
+ calibration: Optional[str] = "perfect",
486
+ use_ambient_views: bool = False,
487
+ transforms: Optional[Callable] = None,
488
+ download: bool = False,
489
+ ) -> None:
490
+ super().__init__(root, transforms)
491
+
492
+ verify_str_arg(split, "split", valid_values=("train", "test", "additional"))
493
+ self.split = split
494
+
495
+ if calibration:
496
+ verify_str_arg(calibration, "calibration", valid_values=("perfect", "imperfect", "both", None)) # type: ignore
497
+ if split == "test":
498
+ raise ValueError("Split 'test' has only no calibration settings, please set `calibration=None`.")
499
+ else:
500
+ if split != "test":
501
+ raise ValueError(
502
+ f"Split '{split}' has calibration settings, however None was provided as an argument."
503
+ f"\nSetting calibration to 'perfect' for split '{split}'. Available calibration settings are: 'perfect', 'imperfect', 'both'.",
504
+ )
505
+
506
+ if download:
507
+ self._download_dataset(root)
508
+
509
+ root = Path(root) / "Middlebury2014"
510
+
511
+ if not os.path.exists(root / split):
512
+ raise FileNotFoundError(f"The {split} directory was not found in the provided root directory")
513
+
514
+ split_scenes = self.splits[split]
515
+ # check that the provided root folder contains the scene splits
516
+ if not any(
517
+ # using startswith to account for perfect / imperfect calibrartion
518
+ scene.startswith(s)
519
+ for scene in os.listdir(root / split)
520
+ for s in split_scenes
521
+ ):
522
+ raise FileNotFoundError(f"Provided root folder does not contain any scenes from the {split} split.")
523
+
524
+ calibrartion_suffixes = {
525
+ None: [""],
526
+ "perfect": ["-perfect"],
527
+ "imperfect": ["-imperfect"],
528
+ "both": ["-perfect", "-imperfect"],
529
+ }[calibration]
530
+
531
+ for calibration_suffix in calibrartion_suffixes:
532
+ scene_pattern = "*" + calibration_suffix
533
+ left_img_pattern = str(root / split / scene_pattern / "im0.png")
534
+ right_img_pattern = str(root / split / scene_pattern / "im1.png")
535
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
536
+
537
+ if split == "test":
538
+ self._disparities = list((None, None) for _ in self._images)
539
+ else:
540
+ left_dispartity_pattern = str(root / split / scene_pattern / "disp0.pfm")
541
+ right_dispartity_pattern = str(root / split / scene_pattern / "disp1.pfm")
542
+ self._disparities += self._scan_pairs(left_dispartity_pattern, right_dispartity_pattern)
543
+
544
+ self.use_ambient_views = use_ambient_views
545
+
546
+ def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
547
+ """
548
+ Function that reads either the original right image or an augmented view when ``use_ambient_views`` is True.
549
+ When ``use_ambient_views`` is True, the dataset will return at random one of ``[im1.png, im1E.png, im1L.png]``
550
+ as the right image.
551
+ """
552
+ ambient_file_paths: List[Union[str, Path]] # make mypy happy
553
+
554
+ if not isinstance(file_path, Path):
555
+ file_path = Path(file_path)
556
+
557
+ if file_path.name == "im1.png" and self.use_ambient_views:
558
+ base_path = file_path.parent
559
+ # initialize sampleable container
560
+ ambient_file_paths = list(base_path / view_name for view_name in ["im1E.png", "im1L.png"])
561
+ # double check that we're not going to try to read from an invalid file path
562
+ ambient_file_paths = list(filter(lambda p: os.path.exists(p), ambient_file_paths))
563
+ # keep the original image as an option as well for uniform sampling between base views
564
+ ambient_file_paths.append(file_path)
565
+ file_path = random.choice(ambient_file_paths) # type: ignore
566
+ return super()._read_img(file_path)
567
+
568
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
569
+ # test split has not disparity maps
570
+ if file_path is None:
571
+ return None, None
572
+
573
+ disparity_map = _read_pfm_file(file_path)
574
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
575
+ disparity_map[disparity_map == np.inf] = 0 # remove infinite disparities
576
+ valid_mask = (disparity_map > 0).squeeze(0) # mask out invalid disparities
577
+ return disparity_map, valid_mask
578
+
579
+ def _download_dataset(self, root: str) -> None:
580
+ base_url = "https://vision.middlebury.edu/stereo/data/scenes2014/zip"
581
+ # train and additional splits have 2 different calibration settings
582
+ root = Path(root) / "Middlebury2014"
583
+ split_name = self.split
584
+
585
+ if split_name != "test":
586
+ for split_scene in self.splits[split_name]:
587
+ split_root = root / split_name
588
+ for calibration in ["perfect", "imperfect"]:
589
+ scene_name = f"{split_scene}-{calibration}"
590
+ scene_url = f"{base_url}/{scene_name}.zip"
591
+ print(f"Downloading {scene_url}")
592
+ # download the scene only if it doesn't exist
593
+ if not (split_root / scene_name).exists():
594
+ download_and_extract_archive(
595
+ url=scene_url,
596
+ filename=f"{scene_name}.zip",
597
+ download_root=str(split_root),
598
+ remove_finished=True,
599
+ )
600
+ else:
601
+ os.makedirs(root / "test")
602
+ if any(s not in os.listdir(root / "test") for s in self.splits["test"]):
603
+ # test split is downloaded from a different location
604
+ test_set_url = "https://vision.middlebury.edu/stereo/submit3/zip/MiddEval3-data-F.zip"
605
+ # the unzip is going to produce a directory MiddEval3 with two subdirectories trainingF and testF
606
+ # we want to move the contents from testF into the directory
607
+ download_and_extract_archive(url=test_set_url, download_root=str(root), remove_finished=True)
608
+ for scene_dir, scene_names, _ in os.walk(str(root / "MiddEval3/testF")):
609
+ for scene in scene_names:
610
+ scene_dst_dir = root / "test"
611
+ scene_src_dir = Path(scene_dir) / scene
612
+ os.makedirs(scene_dst_dir, exist_ok=True)
613
+ shutil.move(str(scene_src_dir), str(scene_dst_dir))
614
+
615
+ # cleanup MiddEval3 directory
616
+ shutil.rmtree(str(root / "MiddEval3"))
617
+
618
+ def __getitem__(self, index: int) -> T2:
619
+ """Return example at given index.
620
+
621
+ Args:
622
+ index(int): The index of the example to retrieve
623
+
624
+ Returns:
625
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
626
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
627
+ ``valid_mask`` is implicitly ``None`` for `split=test`.
628
+ """
629
+ return cast(T2, super().__getitem__(index))
630
+
631
+
632
+ class CREStereo(StereoMatchingDataset):
633
+ """Synthetic dataset used in training the `CREStereo <https://arxiv.org/pdf/2203.11483.pdf>`_ architecture.
634
+ Dataset details on the official paper `repo <https://github.com/megvii-research/CREStereo>`_.
635
+
636
+ The dataset is expected to have the following structure: ::
637
+
638
+ root
639
+ CREStereo
640
+ tree
641
+ img1_left.jpg
642
+ img1_right.jpg
643
+ img1_left.disp.jpg
644
+ img1_right.disp.jpg
645
+ img2_left.jpg
646
+ img2_right.jpg
647
+ img2_left.disp.jpg
648
+ img2_right.disp.jpg
649
+ ...
650
+ shapenet
651
+ img1_left.jpg
652
+ img1_right.jpg
653
+ img1_left.disp.jpg
654
+ img1_right.disp.jpg
655
+ ...
656
+ reflective
657
+ img1_left.jpg
658
+ img1_right.jpg
659
+ img1_left.disp.jpg
660
+ img1_right.disp.jpg
661
+ ...
662
+ hole
663
+ img1_left.jpg
664
+ img1_right.jpg
665
+ img1_left.disp.jpg
666
+ img1_right.disp.jpg
667
+ ...
668
+
669
+ Args:
670
+ root (str): Root directory of the dataset.
671
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
672
+ """
673
+
674
+ _has_built_in_disparity_mask = True
675
+
676
+ def __init__(
677
+ self,
678
+ root: str,
679
+ transforms: Optional[Callable] = None,
680
+ ) -> None:
681
+ super().__init__(root, transforms)
682
+
683
+ root = Path(root) / "CREStereo"
684
+
685
+ dirs = ["shapenet", "reflective", "tree", "hole"]
686
+
687
+ for s in dirs:
688
+ left_image_pattern = str(root / s / "*_left.jpg")
689
+ right_image_pattern = str(root / s / "*_right.jpg")
690
+ imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
691
+ self._images += imgs
692
+
693
+ left_disparity_pattern = str(root / s / "*_left.disp.png")
694
+ right_disparity_pattern = str(root / s / "*_right.disp.png")
695
+ disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
696
+ self._disparities += disparities
697
+
698
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
699
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
700
+ # unsqueeze the disparity map into (C, H, W) format
701
+ disparity_map = disparity_map[None, :, :] / 32.0
702
+ valid_mask = None
703
+ return disparity_map, valid_mask
704
+
705
+ def __getitem__(self, index: int) -> T1:
706
+ """Return example at given index.
707
+
708
+ Args:
709
+ index(int): The index of the example to retrieve
710
+
711
+ Returns:
712
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
713
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
714
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
715
+ generate a valid mask.
716
+ """
717
+ return cast(T1, super().__getitem__(index))
718
+
719
+
720
+ class FallingThingsStereo(StereoMatchingDataset):
721
+ """`FallingThings <https://research.nvidia.com/publication/2018-06_falling-things-synthetic-dataset-3d-object-detection-and-pose-estimation>`_ dataset.
722
+
723
+ The dataset is expected to have the following structure: ::
724
+
725
+ root
726
+ FallingThings
727
+ single
728
+ dir1
729
+ scene1
730
+ _object_settings.json
731
+ _camera_settings.json
732
+ image1.left.depth.png
733
+ image1.right.depth.png
734
+ image1.left.jpg
735
+ image1.right.jpg
736
+ image2.left.depth.png
737
+ image2.right.depth.png
738
+ image2.left.jpg
739
+ image2.right
740
+ ...
741
+ scene2
742
+ ...
743
+ mixed
744
+ scene1
745
+ _object_settings.json
746
+ _camera_settings.json
747
+ image1.left.depth.png
748
+ image1.right.depth.png
749
+ image1.left.jpg
750
+ image1.right.jpg
751
+ image2.left.depth.png
752
+ image2.right.depth.png
753
+ image2.left.jpg
754
+ image2.right
755
+ ...
756
+ scene2
757
+ ...
758
+
759
+ Args:
760
+ root (string): Root directory where FallingThings is located.
761
+ variant (string): Which variant to use. Either "single", "mixed", or "both".
762
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
763
+ """
764
+
765
+ def __init__(self, root: str, variant: str = "single", transforms: Optional[Callable] = None) -> None:
766
+ super().__init__(root, transforms)
767
+
768
+ root = Path(root) / "FallingThings"
769
+
770
+ verify_str_arg(variant, "variant", valid_values=("single", "mixed", "both"))
771
+
772
+ variants = {
773
+ "single": ["single"],
774
+ "mixed": ["mixed"],
775
+ "both": ["single", "mixed"],
776
+ }[variant]
777
+
778
+ split_prefix = {
779
+ "single": Path("*") / "*",
780
+ "mixed": Path("*"),
781
+ }
782
+
783
+ for s in variants:
784
+ left_img_pattern = str(root / s / split_prefix[s] / "*.left.jpg")
785
+ right_img_pattern = str(root / s / split_prefix[s] / "*.right.jpg")
786
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
787
+
788
+ left_disparity_pattern = str(root / s / split_prefix[s] / "*.left.depth.png")
789
+ right_disparity_pattern = str(root / s / split_prefix[s] / "*.right.depth.png")
790
+ self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
791
+
792
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
793
+ # (H, W) image
794
+ depth = np.asarray(Image.open(file_path))
795
+ # as per https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
796
+ # in order to extract disparity from depth maps
797
+ camera_settings_path = Path(file_path).parent / "_camera_settings.json"
798
+ with open(camera_settings_path, "r") as f:
799
+ # inverse of depth-from-disparity equation: depth = (baseline * focal) / (disparity * pixel_constatnt)
800
+ intrinsics = json.load(f)
801
+ focal = intrinsics["camera_settings"][0]["intrinsic_settings"]["fx"]
802
+ baseline, pixel_constant = 6, 100 # pixel constant is inverted
803
+ disparity_map = (baseline * focal * pixel_constant) / depth.astype(np.float32)
804
+ # unsqueeze disparity to (C, H, W)
805
+ disparity_map = disparity_map[None, :, :]
806
+ valid_mask = None
807
+ return disparity_map, valid_mask
808
+
809
+ def __getitem__(self, index: int) -> T1:
810
+ """Return example at given index.
811
+
812
+ Args:
813
+ index(int): The index of the example to retrieve
814
+
815
+ Returns:
816
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
817
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
818
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
819
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
820
+ """
821
+ return cast(T1, super().__getitem__(index))
822
+
823
+
824
+ class SceneFlowStereo(StereoMatchingDataset):
825
+ """Dataset interface for `Scene Flow <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ datasets.
826
+ This interface provides access to the `FlyingThings3D, `Monkaa` and `Driving` datasets.
827
+
828
+ The dataset is expected to have the following structure: ::
829
+
830
+ root
831
+ SceneFlow
832
+ Monkaa
833
+ frames_cleanpass
834
+ scene1
835
+ left
836
+ img1.png
837
+ img2.png
838
+ right
839
+ img1.png
840
+ img2.png
841
+ scene2
842
+ left
843
+ img1.png
844
+ img2.png
845
+ right
846
+ img1.png
847
+ img2.png
848
+ frames_finalpass
849
+ scene1
850
+ left
851
+ img1.png
852
+ img2.png
853
+ right
854
+ img1.png
855
+ img2.png
856
+ ...
857
+ ...
858
+ disparity
859
+ scene1
860
+ left
861
+ img1.pfm
862
+ img2.pfm
863
+ right
864
+ img1.pfm
865
+ img2.pfm
866
+ FlyingThings3D
867
+ ...
868
+ ...
869
+
870
+ Args:
871
+ root (string): Root directory where SceneFlow is located.
872
+ variant (string): Which dataset variant to user, "FlyingThings3D" (default), "Monkaa" or "Driving".
873
+ pass_name (string): Which pass to use, "clean" (default), "final" or "both".
874
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
875
+
876
+ """
877
+
878
+ def __init__(
879
+ self,
880
+ root: str,
881
+ variant: str = "FlyingThings3D",
882
+ pass_name: str = "clean",
883
+ transforms: Optional[Callable] = None,
884
+ ) -> None:
885
+ super().__init__(root, transforms)
886
+
887
+ root = Path(root) / "SceneFlow"
888
+
889
+ verify_str_arg(variant, "variant", valid_values=("FlyingThings3D", "Driving", "Monkaa"))
890
+ verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
891
+
892
+ passes = {
893
+ "clean": ["frames_cleanpass"],
894
+ "final": ["frames_finalpass"],
895
+ "both": ["frames_cleanpass", "frames_finalpass"],
896
+ }[pass_name]
897
+
898
+ root = root / variant
899
+
900
+ prefix_directories = {
901
+ "Monkaa": Path("*"),
902
+ "FlyingThings3D": Path("*") / "*" / "*",
903
+ "Driving": Path("*") / "*" / "*",
904
+ }
905
+
906
+ for p in passes:
907
+ left_image_pattern = str(root / p / prefix_directories[variant] / "left" / "*.png")
908
+ right_image_pattern = str(root / p / prefix_directories[variant] / "right" / "*.png")
909
+ self._images += self._scan_pairs(left_image_pattern, right_image_pattern)
910
+
911
+ left_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "left" / "*.pfm")
912
+ right_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "right" / "*.pfm")
913
+ self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
914
+
915
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
916
+ disparity_map = _read_pfm_file(file_path)
917
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
918
+ valid_mask = None
919
+ return disparity_map, valid_mask
920
+
921
+ def __getitem__(self, index: int) -> T1:
922
+ """Return example at given index.
923
+
924
+ Args:
925
+ index(int): The index of the example to retrieve
926
+
927
+ Returns:
928
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
929
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
930
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
931
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
932
+ """
933
+ return cast(T1, super().__getitem__(index))
934
+
935
+
936
+ class SintelStereo(StereoMatchingDataset):
937
+ """Sintel `Stereo Dataset <http://sintel.is.tue.mpg.de/stereo>`_.
938
+
939
+ The dataset is expected to have the following structure: ::
940
+
941
+ root
942
+ Sintel
943
+ training
944
+ final_left
945
+ scene1
946
+ img1.png
947
+ img2.png
948
+ ...
949
+ ...
950
+ final_right
951
+ scene2
952
+ img1.png
953
+ img2.png
954
+ ...
955
+ ...
956
+ disparities
957
+ scene1
958
+ img1.png
959
+ img2.png
960
+ ...
961
+ ...
962
+ occlusions
963
+ scene1
964
+ img1.png
965
+ img2.png
966
+ ...
967
+ ...
968
+ outofframe
969
+ scene1
970
+ img1.png
971
+ img2.png
972
+ ...
973
+ ...
974
+
975
+ Args:
976
+ root (string): Root directory where Sintel Stereo is located.
977
+ pass_name (string): The name of the pass to use, either "final", "clean" or "both".
978
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
979
+ """
980
+
981
+ _has_built_in_disparity_mask = True
982
+
983
+ def __init__(self, root: str, pass_name: str = "final", transforms: Optional[Callable] = None) -> None:
984
+ super().__init__(root, transforms)
985
+
986
+ verify_str_arg(pass_name, "pass_name", valid_values=("final", "clean", "both"))
987
+
988
+ root = Path(root) / "Sintel"
989
+ pass_names = {
990
+ "final": ["final"],
991
+ "clean": ["clean"],
992
+ "both": ["final", "clean"],
993
+ }[pass_name]
994
+
995
+ for p in pass_names:
996
+ left_img_pattern = str(root / "training" / f"{p}_left" / "*" / "*.png")
997
+ right_img_pattern = str(root / "training" / f"{p}_right" / "*" / "*.png")
998
+ self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
999
+
1000
+ disparity_pattern = str(root / "training" / "disparities" / "*" / "*.png")
1001
+ self._disparities += self._scan_pairs(disparity_pattern, None)
1002
+
1003
+ def _get_occlussion_mask_paths(self, file_path: str) -> Tuple[str, str]:
1004
+ # helper function to get the occlusion mask paths
1005
+ # a path will look like .../.../.../training/disparities/scene1/img1.png
1006
+ # we want to get something like .../.../.../training/occlusions/scene1/img1.png
1007
+ fpath = Path(file_path)
1008
+ basename = fpath.name
1009
+ scenedir = fpath.parent
1010
+ # the parent of the scenedir is actually the disparity dir
1011
+ sampledir = scenedir.parent.parent
1012
+
1013
+ occlusion_path = str(sampledir / "occlusions" / scenedir.name / basename)
1014
+ outofframe_path = str(sampledir / "outofframe" / scenedir.name / basename)
1015
+
1016
+ if not os.path.exists(occlusion_path):
1017
+ raise FileNotFoundError(f"Occlusion mask {occlusion_path} does not exist")
1018
+
1019
+ if not os.path.exists(outofframe_path):
1020
+ raise FileNotFoundError(f"Out of frame mask {outofframe_path} does not exist")
1021
+
1022
+ return occlusion_path, outofframe_path
1023
+
1024
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1025
+ if file_path is None:
1026
+ return None, None
1027
+
1028
+ # disparity decoding as per Sintel instructions in the README provided with the dataset
1029
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
1030
+ r, g, b = np.split(disparity_map, 3, axis=-1)
1031
+ disparity_map = r * 4 + g / (2**6) + b / (2**14)
1032
+ # reshape into (C, H, W) format
1033
+ disparity_map = np.transpose(disparity_map, (2, 0, 1))
1034
+ # find the appropriate file paths
1035
+ occlued_mask_path, out_of_frame_mask_path = self._get_occlussion_mask_paths(file_path)
1036
+ # occlusion masks
1037
+ valid_mask = np.asarray(Image.open(occlued_mask_path)) == 0
1038
+ # out of frame masks
1039
+ off_mask = np.asarray(Image.open(out_of_frame_mask_path)) == 0
1040
+ # combine the masks together
1041
+ valid_mask = np.logical_and(off_mask, valid_mask)
1042
+ return disparity_map, valid_mask
1043
+
1044
+ def __getitem__(self, index: int) -> T2:
1045
+ """Return example at given index.
1046
+
1047
+ Args:
1048
+ index(int): The index of the example to retrieve
1049
+
1050
+ Returns:
1051
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
1052
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images whilst
1053
+ the valid_mask is a numpy array of shape (H, W).
1054
+ """
1055
+ return cast(T2, super().__getitem__(index))
1056
+
1057
+
1058
+ class InStereo2k(StereoMatchingDataset):
1059
+ """`InStereo2k <https://github.com/YuhuaXu/StereoDataset>`_ dataset.
1060
+
1061
+ The dataset is expected to have the following structure: ::
1062
+
1063
+ root
1064
+ InStereo2k
1065
+ train
1066
+ scene1
1067
+ left.png
1068
+ right.png
1069
+ left_disp.png
1070
+ right_disp.png
1071
+ ...
1072
+ scene2
1073
+ ...
1074
+ test
1075
+ scene1
1076
+ left.png
1077
+ right.png
1078
+ left_disp.png
1079
+ right_disp.png
1080
+ ...
1081
+ scene2
1082
+ ...
1083
+
1084
+ Args:
1085
+ root (string): Root directory where InStereo2k is located.
1086
+ split (string): Either "train" or "test".
1087
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
1088
+ """
1089
+
1090
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
1091
+ super().__init__(root, transforms)
1092
+
1093
+ root = Path(root) / "InStereo2k" / split
1094
+
1095
+ verify_str_arg(split, "split", valid_values=("train", "test"))
1096
+
1097
+ left_img_pattern = str(root / "*" / "left.png")
1098
+ right_img_pattern = str(root / "*" / "right.png")
1099
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
1100
+
1101
+ left_disparity_pattern = str(root / "*" / "left_disp.png")
1102
+ right_disparity_pattern = str(root / "*" / "right_disp.png")
1103
+ self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
1104
+
1105
+ def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
1106
+ disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
1107
+ # unsqueeze disparity to (C, H, W)
1108
+ disparity_map = disparity_map[None, :, :] / 1024.0
1109
+ valid_mask = None
1110
+ return disparity_map, valid_mask
1111
+
1112
+ def __getitem__(self, index: int) -> T1:
1113
+ """Return example at given index.
1114
+
1115
+ Args:
1116
+ index(int): The index of the example to retrieve
1117
+
1118
+ Returns:
1119
+ tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
1120
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
1121
+ If a ``valid_mask`` is generated within the ``transforms`` parameter,
1122
+ a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
1123
+ """
1124
+ return cast(T1, super().__getitem__(index))
1125
+
1126
+
1127
+ class ETH3DStereo(StereoMatchingDataset):
1128
+ """ETH3D `Low-Res Two-View <https://www.eth3d.net/datasets>`_ dataset.
1129
+
1130
+ The dataset is expected to have the following structure: ::
1131
+
1132
+ root
1133
+ ETH3D
1134
+ two_view_training
1135
+ scene1
1136
+ im1.png
1137
+ im0.png
1138
+ images.txt
1139
+ cameras.txt
1140
+ calib.txt
1141
+ scene2
1142
+ im1.png
1143
+ im0.png
1144
+ images.txt
1145
+ cameras.txt
1146
+ calib.txt
1147
+ ...
1148
+ two_view_training_gt
1149
+ scene1
1150
+ disp0GT.pfm
1151
+ mask0nocc.png
1152
+ scene2
1153
+ disp0GT.pfm
1154
+ mask0nocc.png
1155
+ ...
1156
+ two_view_testing
1157
+ scene1
1158
+ im1.png
1159
+ im0.png
1160
+ images.txt
1161
+ cameras.txt
1162
+ calib.txt
1163
+ scene2
1164
+ im1.png
1165
+ im0.png
1166
+ images.txt
1167
+ cameras.txt
1168
+ calib.txt
1169
+ ...
1170
+
1171
+ Args:
1172
+ root (string): Root directory of the ETH3D Dataset.
1173
+ split (string, optional): The dataset split of scenes, either "train" (default) or "test".
1174
+ transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
1175
+ """
1176
+
1177
+ _has_built_in_disparity_mask = True
1178
+
1179
+ def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
1180
+ super().__init__(root, transforms)
1181
+
1182
+ verify_str_arg(split, "split", valid_values=("train", "test"))
1183
+
1184
+ root = Path(root) / "ETH3D"
1185
+
1186
+ img_dir = "two_view_training" if split == "train" else "two_view_test"
1187
+ anot_dir = "two_view_training_gt"
1188
+
1189
+ left_img_pattern = str(root / img_dir / "*" / "im0.png")
1190
+ right_img_pattern = str(root / img_dir / "*" / "im1.png")
1191
+ self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
1192
+
1193
+ if split == "test":
1194
+ self._disparities = list((None, None) for _ in self._images)
1195
+ else:
1196
+ disparity_pattern = str(root / anot_dir / "*" / "disp0GT.pfm")
1197
+ self._disparities = self._scan_pairs(disparity_pattern, None)
1198
+
1199
+ def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
1200
+ # test split has no disparity maps
1201
+ if file_path is None:
1202
+ return None, None
1203
+
1204
+ disparity_map = _read_pfm_file(file_path)
1205
+ disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
1206
+ mask_path = Path(file_path).parent / "mask0nocc.png"
1207
+ valid_mask = Image.open(mask_path)
1208
+ valid_mask = np.asarray(valid_mask).astype(bool)
1209
+ return disparity_map, valid_mask
1210
+
1211
+ def __getitem__(self, index: int) -> T2:
1212
+ """Return example at given index.
1213
+
1214
+ Args:
1215
+ index(int): The index of the example to retrieve
1216
+
1217
+ Returns:
1218
+ tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
1219
+ The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
1220
+ ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
1221
+ generate a valid mask.
1222
+ Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
1223
+ """
1224
+ return cast(T2, super().__getitem__(index))
wemm/lib/python3.10/site-packages/torchvision/datasets/caltech.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ from typing import Any, Callable, List, Optional, Tuple, Union
4
+
5
+ from PIL import Image
6
+
7
+ from .utils import download_and_extract_archive, verify_str_arg
8
+ from .vision import VisionDataset
9
+
10
+
11
+ class Caltech101(VisionDataset):
12
+ """`Caltech 101 <https://data.caltech.edu/records/20086>`_ Dataset.
13
+
14
+ .. warning::
15
+
16
+ This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
17
+
18
+ Args:
19
+ root (string): Root directory of dataset where directory
20
+ ``caltech101`` exists or will be saved to if download is set to True.
21
+ target_type (string or list, optional): Type of target to use, ``category`` or
22
+ ``annotation``. Can also be a list to output a tuple with all specified
23
+ target types. ``category`` represents the target class, and
24
+ ``annotation`` is a list of points from a hand-generated outline.
25
+ Defaults to ``category``.
26
+ transform (callable, optional): A function/transform that takes in an PIL image
27
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
28
+ target_transform (callable, optional): A function/transform that takes in the
29
+ target and transforms it.
30
+ download (bool, optional): If true, downloads the dataset from the internet and
31
+ puts it in root directory. If dataset is already downloaded, it is not
32
+ downloaded again.
33
+ """
34
+
35
+ def __init__(
36
+ self,
37
+ root: str,
38
+ target_type: Union[List[str], str] = "category",
39
+ transform: Optional[Callable] = None,
40
+ target_transform: Optional[Callable] = None,
41
+ download: bool = False,
42
+ ) -> None:
43
+ super().__init__(os.path.join(root, "caltech101"), transform=transform, target_transform=target_transform)
44
+ os.makedirs(self.root, exist_ok=True)
45
+ if isinstance(target_type, str):
46
+ target_type = [target_type]
47
+ self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation")) for t in target_type]
48
+
49
+ if download:
50
+ self.download()
51
+
52
+ if not self._check_integrity():
53
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
54
+
55
+ self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories")))
56
+ self.categories.remove("BACKGROUND_Google") # this is not a real class
57
+
58
+ # For some reason, the category names in "101_ObjectCategories" and
59
+ # "Annotations" do not always match. This is a manual map between the
60
+ # two. Defaults to using same name, since most names are fine.
61
+ name_map = {
62
+ "Faces": "Faces_2",
63
+ "Faces_easy": "Faces_3",
64
+ "Motorbikes": "Motorbikes_16",
65
+ "airplanes": "Airplanes_Side_2",
66
+ }
67
+ self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories))
68
+
69
+ self.index: List[int] = []
70
+ self.y = []
71
+ for (i, c) in enumerate(self.categories):
72
+ n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c)))
73
+ self.index.extend(range(1, n + 1))
74
+ self.y.extend(n * [i])
75
+
76
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
77
+ """
78
+ Args:
79
+ index (int): Index
80
+
81
+ Returns:
82
+ tuple: (image, target) where the type of target specified by target_type.
83
+ """
84
+ import scipy.io
85
+
86
+ img = Image.open(
87
+ os.path.join(
88
+ self.root,
89
+ "101_ObjectCategories",
90
+ self.categories[self.y[index]],
91
+ f"image_{self.index[index]:04d}.jpg",
92
+ )
93
+ )
94
+
95
+ target: Any = []
96
+ for t in self.target_type:
97
+ if t == "category":
98
+ target.append(self.y[index])
99
+ elif t == "annotation":
100
+ data = scipy.io.loadmat(
101
+ os.path.join(
102
+ self.root,
103
+ "Annotations",
104
+ self.annotation_categories[self.y[index]],
105
+ f"annotation_{self.index[index]:04d}.mat",
106
+ )
107
+ )
108
+ target.append(data["obj_contour"])
109
+ target = tuple(target) if len(target) > 1 else target[0]
110
+
111
+ if self.transform is not None:
112
+ img = self.transform(img)
113
+
114
+ if self.target_transform is not None:
115
+ target = self.target_transform(target)
116
+
117
+ return img, target
118
+
119
+ def _check_integrity(self) -> bool:
120
+ # can be more robust and check hash of files
121
+ return os.path.exists(os.path.join(self.root, "101_ObjectCategories"))
122
+
123
+ def __len__(self) -> int:
124
+ return len(self.index)
125
+
126
+ def download(self) -> None:
127
+ if self._check_integrity():
128
+ print("Files already downloaded and verified")
129
+ return
130
+
131
+ download_and_extract_archive(
132
+ "https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp",
133
+ self.root,
134
+ filename="101_ObjectCategories.tar.gz",
135
+ md5="b224c7392d521a49829488ab0f1120d9",
136
+ )
137
+ download_and_extract_archive(
138
+ "https://drive.google.com/file/d/175kQy3UsZ0wUEHZjqkUDdNVssr7bgh_m",
139
+ self.root,
140
+ filename="Annotations.tar",
141
+ md5="6f83eeb1f24d99cab4eb377263132c91",
142
+ )
143
+
144
+ def extra_repr(self) -> str:
145
+ return "Target type: {target_type}".format(**self.__dict__)
146
+
147
+
148
+ class Caltech256(VisionDataset):
149
+ """`Caltech 256 <https://data.caltech.edu/records/20087>`_ Dataset.
150
+
151
+ Args:
152
+ root (string): Root directory of dataset where directory
153
+ ``caltech256`` exists or will be saved to if download is set to True.
154
+ transform (callable, optional): A function/transform that takes in an PIL image
155
+ and returns a transformed version. E.g, ``transforms.RandomCrop``
156
+ target_transform (callable, optional): A function/transform that takes in the
157
+ target and transforms it.
158
+ download (bool, optional): If true, downloads the dataset from the internet and
159
+ puts it in root directory. If dataset is already downloaded, it is not
160
+ downloaded again.
161
+ """
162
+
163
+ def __init__(
164
+ self,
165
+ root: str,
166
+ transform: Optional[Callable] = None,
167
+ target_transform: Optional[Callable] = None,
168
+ download: bool = False,
169
+ ) -> None:
170
+ super().__init__(os.path.join(root, "caltech256"), transform=transform, target_transform=target_transform)
171
+ os.makedirs(self.root, exist_ok=True)
172
+
173
+ if download:
174
+ self.download()
175
+
176
+ if not self._check_integrity():
177
+ raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
178
+
179
+ self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories")))
180
+ self.index: List[int] = []
181
+ self.y = []
182
+ for (i, c) in enumerate(self.categories):
183
+ n = len(
184
+ [
185
+ item
186
+ for item in os.listdir(os.path.join(self.root, "256_ObjectCategories", c))
187
+ if item.endswith(".jpg")
188
+ ]
189
+ )
190
+ self.index.extend(range(1, n + 1))
191
+ self.y.extend(n * [i])
192
+
193
+ def __getitem__(self, index: int) -> Tuple[Any, Any]:
194
+ """
195
+ Args:
196
+ index (int): Index
197
+
198
+ Returns:
199
+ tuple: (image, target) where target is index of the target class.
200
+ """
201
+ img = Image.open(
202
+ os.path.join(
203
+ self.root,
204
+ "256_ObjectCategories",
205
+ self.categories[self.y[index]],
206
+ f"{self.y[index] + 1:03d}_{self.index[index]:04d}.jpg",
207
+ )
208
+ )
209
+
210
+ target = self.y[index]
211
+
212
+ if self.transform is not None:
213
+ img = self.transform(img)
214
+
215
+ if self.target_transform is not None:
216
+ target = self.target_transform(target)
217
+
218
+ return img, target
219
+
220
+ def _check_integrity(self) -> bool:
221
+ # can be more robust and check hash of files
222
+ return os.path.exists(os.path.join(self.root, "256_ObjectCategories"))
223
+
224
+ def __len__(self) -> int:
225
+ return len(self.index)
226
+
227
+ def download(self) -> None:
228
+ if self._check_integrity():
229
+ print("Files already downloaded and verified")
230
+ return
231
+
232
+ download_and_extract_archive(
233
+ "https://drive.google.com/file/d/1r6o0pSROcV1_VwT4oSjA2FBUSCWGuxLK",
234
+ self.root,
235
+ filename="256_ObjectCategories.tar",
236
+ md5="67b4f42ca05d46448c6bb8ecd2220f6d",
237
+ )