ZTWHHH commited on
Commit
fbe4d79
·
verified ·
1 Parent(s): 442e555

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__init__.py +16 -0
  2. evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc +0 -0
  3. evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc +0 -0
  4. evalkit_cambrian/lib/python3.10/site-packages/imageio/config/extensions.py +2002 -0
  5. evalkit_cambrian/lib/python3.10/site-packages/imageio/config/plugins.py +782 -0
  6. evalkit_cambrian/lib/python3.10/site-packages/imageio/freeze.py +11 -0
  7. evalkit_cambrian/lib/python3.10/site-packages/imageio/plugins/grab.py +105 -0
  8. evalkit_cambrian/lib/python3.10/site-packages/imageio/py.typed +0 -0
  9. evalkit_cambrian/lib/python3.10/site-packages/ninja-1.11.1.3.dist-info/licenses/LICENSE_Apache_20 +191 -0
  10. infer_4_47_1/lib/python3.10/site-packages/ray/__pycache__/cluster_utils.cpython-310.pyc +0 -0
  11. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc +0 -0
  12. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/compat.cpython-310.pyc +0 -0
  13. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc +0 -0
  14. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc +0 -0
  15. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc +0 -0
  16. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log.cpython-310.pyc +0 -0
  17. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log_monitor.cpython-310.pyc +0 -0
  18. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/logging_utils.cpython-310.pyc +0 -0
  19. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/node.cpython-310.pyc +0 -0
  20. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc +0 -0
  21. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc +0 -0
  22. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/ray_experimental_perf.cpython-310.pyc +0 -0
  23. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc +0 -0
  24. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc +0 -0
  25. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/tls_utils.cpython-310.pyc +0 -0
  26. infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/utils.cpython-310.pyc +0 -0
  27. infer_4_47_1/lib/python3.10/site-packages/ray/_private/usage/__init__.py +0 -0
  28. infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__init__.py +0 -0
  29. infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__pycache__/__init__.cpython-310.pyc +0 -0
  30. infer_4_47_1/lib/python3.10/site-packages/ray/data/__init__.py +157 -0
  31. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/__init__.py +0 -0
  32. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/aggregate.py +365 -0
  33. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/arrow_block.py +650 -0
  34. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/batcher.py +325 -0
  35. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/block_builder.py +39 -0
  36. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/compute.py +151 -0
  37. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/delegating_block_builder.py +76 -0
  38. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/equalize.py +142 -0
  39. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/logging.py +208 -0
  40. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py +147 -0
  41. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/null_aggregate.py +276 -0
  42. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/output_buffer.py +109 -0
  43. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py +627 -0
  44. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/plan.py +602 -0
  45. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/row.py +42 -0
  46. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/split.py +297 -0
  47. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/stats.py +1495 -0
  48. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/torch_iterable_dataset.py +10 -0
  49. infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/util.py +1091 -0
  50. infer_4_47_1/lib/python3.10/site-packages/ray/data/block.py +477 -0
evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .extensions import (
2
+ extension_list,
3
+ known_extensions,
4
+ FileExtension,
5
+ video_extensions,
6
+ )
7
+ from .plugins import known_plugins, PluginConfig
8
+
9
+ __all__ = [
10
+ "known_plugins",
11
+ "PluginConfig",
12
+ "extension_list",
13
+ "known_extensions",
14
+ "FileExtension",
15
+ "video_extensions",
16
+ ]
evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (422 Bytes). View file
 
evalkit_cambrian/lib/python3.10/site-packages/imageio/config/__pycache__/extensions.cpython-310.pyc ADDED
Binary file (32.5 kB). View file
 
evalkit_cambrian/lib/python3.10/site-packages/imageio/config/extensions.py ADDED
@@ -0,0 +1,2002 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A set of objects representing each file extension recognized by ImageIO. If an
3
+ extension is not listed here it is still supported, as long as there exists a
4
+ supporting backend.
5
+
6
+ """
7
+
8
+
9
+ class FileExtension:
10
+ """File Extension Metadata
11
+
12
+ This class holds information about a image file format associated with a
13
+ given extension. This information is used to track plugins that are known to
14
+ be able to handle a particular format. It also contains additional
15
+ information about a format, which is used when creating the supported format
16
+ docs.
17
+
18
+ Plugins known to be able to handle this format are ordered by a ``priority``
19
+ list. This list is used to determine the ideal plugin to use when choosing a
20
+ plugin based on file extension.
21
+
22
+ Parameters
23
+ ----------
24
+ extension : str
25
+ The name of the extension including the initial dot, e.g. ".png".
26
+ priority : List
27
+ A list of plugin names (entries in config.known_plugins) that can handle
28
+ this format. The position of a plugin expresses a preference, e.g.
29
+ ["plugin1", "plugin2"] indicates that, if available, plugin1 should be
30
+ preferred over plugin2 when handling a request related to this format.
31
+ name : str
32
+ The full name of the format.
33
+ description : str
34
+ A description of the format.
35
+ external_link : str
36
+ A link to further information about the format. Typically, the format's
37
+ specification.
38
+ volume_support : str
39
+ If True, the format/extension supports volumetric image data.
40
+
41
+ Examples
42
+ --------
43
+ >>> FileExtension(
44
+ name="Bitmap",
45
+ extension=".bmp",
46
+ priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"],
47
+ external_link="https://en.wikipedia.org/wiki/BMP_file_format",
48
+ )
49
+
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ *,
55
+ extension,
56
+ priority,
57
+ name=None,
58
+ description=None,
59
+ external_link=None,
60
+ volume_support=False
61
+ ):
62
+ self.extension = extension
63
+ self.priority = priority
64
+ self.name = name
65
+ self.description = description
66
+ self.external_link = external_link
67
+ self.default_priority = priority.copy()
68
+ self.volume_support = volume_support
69
+
70
+ def reset(self):
71
+ self.priority = self.default_priority.copy()
72
+
73
+
74
+ extension_list = [
75
+ FileExtension(
76
+ name="Hasselblad raw",
77
+ extension=".3fr",
78
+ priority=["RAW-FI"],
79
+ ),
80
+ FileExtension(
81
+ name="Sony alpha",
82
+ extension=".arw",
83
+ priority=["RAW-FI"],
84
+ ),
85
+ FileExtension(
86
+ name="Animated Portable Network Graphics",
87
+ external_link="https://en.wikipedia.org/wiki/APNG",
88
+ extension=".apng",
89
+ priority=["pillow", "pyav"],
90
+ ),
91
+ FileExtension(
92
+ name="Audio Video Interleave",
93
+ extension=".avi",
94
+ priority=["FFMPEG"],
95
+ ),
96
+ FileExtension(
97
+ name="Casio raw format",
98
+ extension=".bay",
99
+ priority=["RAW-FI"],
100
+ ),
101
+ FileExtension(
102
+ extension=".blp",
103
+ priority=["pillow"],
104
+ ),
105
+ FileExtension(
106
+ name="Bitmap",
107
+ extension=".bmp",
108
+ priority=["pillow", "BMP-PIL", "BMP-FI", "ITK", "pyav", "opencv"],
109
+ external_link="https://en.wikipedia.org/wiki/BMP_file_format",
110
+ ),
111
+ FileExtension(
112
+ name="Device-Independent Bitmap",
113
+ extension=".dip",
114
+ priority=["opencv"],
115
+ external_link="https://en.wikipedia.org/wiki/BMP_file_format",
116
+ ),
117
+ FileExtension(
118
+ name="Re-Volt mipmap",
119
+ extension=".bmq",
120
+ priority=["RAW-FI"],
121
+ ),
122
+ FileExtension(
123
+ name="Binary Structured Data Format",
124
+ extension=".bsdf",
125
+ priority=["BSDF"],
126
+ external_link="http://bsdf.io/",
127
+ ),
128
+ FileExtension(
129
+ name="Binary Universal Form for the Representation of meteorological data",
130
+ extension=".bufr",
131
+ priority=["pillow", "BUFR-PIL"],
132
+ ),
133
+ FileExtension(
134
+ name="Silicon Graphics Image",
135
+ extension=".bw",
136
+ priority=["pillow", "SGI-PIL", "SGI-FI"],
137
+ ),
138
+ FileExtension(
139
+ name="Scirra Construct",
140
+ extension=".cap",
141
+ priority=["RAW-FI"],
142
+ ),
143
+ FileExtension(
144
+ name="AMETEK High Speed Camera Format",
145
+ extension=".cine",
146
+ priority=["RAW-FI"],
147
+ external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution",
148
+ ),
149
+ FileExtension(extension=".cr2", priority=["RAW-FI"]),
150
+ FileExtension(
151
+ extension=".crw",
152
+ priority=["RAW-FI"],
153
+ ),
154
+ FileExtension(
155
+ extension=".cs1",
156
+ priority=["RAW-FI"],
157
+ ),
158
+ FileExtension(
159
+ name="Computerized Tomography",
160
+ extension=".ct",
161
+ priority=["DICOM"],
162
+ ),
163
+ FileExtension(
164
+ name="Windows Cursor Icons",
165
+ extension=".cur",
166
+ priority=["pillow", "CUR-PIL"],
167
+ ),
168
+ FileExtension(
169
+ name="Dr. Halo",
170
+ extension=".cut",
171
+ priority=["CUT-FI"],
172
+ ),
173
+ FileExtension(
174
+ extension=".dc2",
175
+ priority=["RAW-FI"],
176
+ ),
177
+ FileExtension(
178
+ name="DICOM file format",
179
+ extension=".dcm",
180
+ priority=["DICOM", "ITK"],
181
+ ),
182
+ FileExtension(
183
+ extension=".dcr",
184
+ priority=["RAW-FI"],
185
+ ),
186
+ FileExtension(
187
+ name="Intel DCX",
188
+ extension=".dcx",
189
+ priority=["pillow", "DCX-PIL"],
190
+ ),
191
+ FileExtension(
192
+ name="DirectX Texture Container",
193
+ extension=".dds",
194
+ priority=["pillow", "DDS-FI", "DDS-PIL"],
195
+ ),
196
+ FileExtension(
197
+ name="Windows Bitmap",
198
+ extension=".dib",
199
+ priority=["pillow", "DIB-PIL"],
200
+ ),
201
+ FileExtension(
202
+ name="DICOM file format",
203
+ extension=".dicom",
204
+ priority=["ITK"],
205
+ ),
206
+ FileExtension(
207
+ extension=".dng",
208
+ priority=["RAW-FI"],
209
+ ),
210
+ FileExtension(
211
+ extension=".drf",
212
+ priority=["RAW-FI"],
213
+ ),
214
+ FileExtension(
215
+ extension=".dsc",
216
+ priority=["RAW-FI"],
217
+ ),
218
+ FileExtension(
219
+ name="Enhanced Compression Wavelet",
220
+ extension=".ecw",
221
+ priority=["GDAL"],
222
+ ),
223
+ FileExtension(
224
+ name="Windows Metafile",
225
+ extension=".emf",
226
+ priority=["pillow", "WMF-PIL"],
227
+ ),
228
+ FileExtension(
229
+ name="Encapsulated Postscript",
230
+ extension=".eps",
231
+ priority=["pillow", "EPS-PIL"],
232
+ ),
233
+ FileExtension(
234
+ extension=".erf",
235
+ priority=["RAW-FI"],
236
+ ),
237
+ FileExtension(
238
+ name="OpenEXR",
239
+ extension=".exr",
240
+ external_link="https://openexr.readthedocs.io/en/latest/",
241
+ priority=["EXR-FI", "pyav", "opencv"],
242
+ ),
243
+ FileExtension(
244
+ extension=".fff",
245
+ priority=["RAW-FI"],
246
+ ),
247
+ FileExtension(
248
+ name="Flexible Image Transport System File",
249
+ extension=".fit",
250
+ priority=["pillow", "FITS-PIL", "FITS"],
251
+ ),
252
+ FileExtension(
253
+ name="Flexible Image Transport System File",
254
+ extension=".fits",
255
+ priority=["pillow", "FITS-PIL", "FITS", "pyav"],
256
+ ),
257
+ FileExtension(
258
+ name="Autodesk FLC Animation",
259
+ extension=".flc",
260
+ priority=["pillow", "FLI-PIL"],
261
+ ),
262
+ FileExtension(
263
+ name="Autodesk FLI Animation",
264
+ extension=".fli",
265
+ priority=["pillow", "FLI-PIL"],
266
+ ),
267
+ FileExtension(
268
+ name="Kodak FlashPix",
269
+ extension=".fpx",
270
+ priority=["pillow", "FPX-PIL"],
271
+ ),
272
+ FileExtension(
273
+ name="Independence War 2: Edge Of Chaos Texture Format",
274
+ extension=".ftc",
275
+ priority=["pillow", "FTEX-PIL"],
276
+ ),
277
+ FileExtension(
278
+ name="Flexible Image Transport System File",
279
+ extension=".fts",
280
+ priority=["FITS"],
281
+ ),
282
+ FileExtension(
283
+ name="Independence War 2: Edge Of Chaos Texture Format",
284
+ extension=".ftu",
285
+ priority=["pillow", "FTEX-PIL"],
286
+ ),
287
+ FileExtension(
288
+ name="Flexible Image Transport System File",
289
+ extension=".fz",
290
+ priority=["FITS"],
291
+ ),
292
+ FileExtension(
293
+ name="Raw fax format CCITT G.3",
294
+ extension=".g3",
295
+ priority=["G3-FI"],
296
+ ),
297
+ FileExtension(
298
+ name="GIMP brush file",
299
+ extension=".gbr",
300
+ priority=["pillow", "GBR-PIL"],
301
+ ),
302
+ FileExtension(
303
+ name="Grassroots DICOM",
304
+ extension=".gdcm",
305
+ priority=["ITK"],
306
+ ),
307
+ FileExtension(
308
+ name="Graphics Interchange Format",
309
+ extension=".gif",
310
+ priority=["pillow", "GIF-PIL", "pyav"],
311
+ ),
312
+ FileExtension(
313
+ name="UMDS GIPL",
314
+ extension=".gipl",
315
+ priority=["ITK"],
316
+ ),
317
+ FileExtension(
318
+ name="gridded meteorological data",
319
+ extension=".grib",
320
+ priority=["pillow", "GRIB-PIL"],
321
+ ),
322
+ FileExtension(
323
+ name="Hierarchical Data Format 5",
324
+ extension=".h5",
325
+ priority=["pillow", "HDF5-PIL"],
326
+ ),
327
+ FileExtension(
328
+ name="Hierarchical Data Format 5",
329
+ extension=".hdf",
330
+ priority=["pillow", "HDF5-PIL"],
331
+ ),
332
+ FileExtension(
333
+ name="Hierarchical Data Format 5",
334
+ extension=".hdf5",
335
+ priority=["ITK"],
336
+ ),
337
+ FileExtension(
338
+ name="JPEG Extended Range",
339
+ extension=".hdp",
340
+ priority=["JPEG-XR-FI"],
341
+ ),
342
+ FileExtension(
343
+ name="High Dynamic Range Image",
344
+ extension=".hdr",
345
+ priority=["HDR-FI", "ITK", "opencv"],
346
+ ),
347
+ FileExtension(
348
+ extension=".ia",
349
+ priority=["RAW-FI"],
350
+ ),
351
+ FileExtension(
352
+ extension=".icb",
353
+ priority=["pillow"],
354
+ ),
355
+ FileExtension(
356
+ name="Mac OS Icon File",
357
+ extension=".icns",
358
+ priority=["pillow", "ICNS-PIL"],
359
+ ),
360
+ FileExtension(
361
+ name="Windows Icon File",
362
+ extension=".ico",
363
+ priority=["pillow", "ICO-FI", "ICO-PIL", "pyav"],
364
+ ),
365
+ FileExtension(
366
+ name="ILBM Interleaved Bitmap",
367
+ extension=".iff",
368
+ priority=["IFF-FI"],
369
+ ),
370
+ FileExtension(
371
+ name="IPTC/NAA",
372
+ extension=".iim",
373
+ priority=["pillow", "IPTC-PIL"],
374
+ ),
375
+ FileExtension(
376
+ extension=".iiq",
377
+ priority=["RAW-FI"],
378
+ ),
379
+ FileExtension(
380
+ name="IFUNC Image Memory",
381
+ extension=".im",
382
+ priority=["pillow", "IM-PIL"],
383
+ ),
384
+ FileExtension(
385
+ extension=".img",
386
+ priority=["ITK", "GDAL"],
387
+ ),
388
+ FileExtension(
389
+ extension=".img.gz",
390
+ priority=["ITK"],
391
+ ),
392
+ FileExtension(
393
+ name="IM Tools",
394
+ extension=".IMT",
395
+ priority=["pillow", "IMT-PIL"],
396
+ ),
397
+ FileExtension(
398
+ name="Image Processing Lab",
399
+ extension=".ipl",
400
+ priority=["ITK"],
401
+ ),
402
+ FileExtension(
403
+ name="JPEG 2000",
404
+ extension=".j2c",
405
+ priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
406
+ ),
407
+ FileExtension(
408
+ name="JPEG 2000",
409
+ extension=".j2k",
410
+ priority=["pillow", "J2K-FI", "JPEG2000-PIL", "pyav"],
411
+ ),
412
+ FileExtension(
413
+ name="JPEG",
414
+ extension=".jfif",
415
+ priority=["pillow", "JPEG-PIL"],
416
+ ),
417
+ FileExtension(
418
+ name="JPEG",
419
+ extension=".jif",
420
+ priority=["JPEG-FI"],
421
+ ),
422
+ FileExtension(
423
+ name="JPEG Network Graphics",
424
+ extension=".jng",
425
+ priority=["JNG-FI"],
426
+ ),
427
+ FileExtension(
428
+ name="JPEG 2000",
429
+ extension=".jp2",
430
+ priority=["pillow", "JP2-FI", "JPEG2000-PIL", "pyav", "opencv"],
431
+ ),
432
+ FileExtension(
433
+ name="JPEG 2000",
434
+ extension=".jpc",
435
+ priority=["pillow", "JPEG2000-PIL"],
436
+ ),
437
+ FileExtension(
438
+ name="JPEG",
439
+ extension=".jpe",
440
+ priority=["pillow", "JPEG-FI", "JPEG-PIL", "opencv"],
441
+ ),
442
+ FileExtension(
443
+ name="Joint Photographic Experts Group",
444
+ extension=".jpeg",
445
+ priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
446
+ ),
447
+ FileExtension(
448
+ name="JPEG 2000",
449
+ extension=".jpf",
450
+ priority=["pillow", "JPEG2000-PIL"],
451
+ ),
452
+ FileExtension(
453
+ name="Joint Photographic Experts Group",
454
+ extension=".jpg",
455
+ priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL", "pyav", "opencv"],
456
+ ),
457
+ FileExtension(
458
+ name="JPEG 2000",
459
+ extension=".jpx",
460
+ priority=["pillow", "JPEG2000-PIL"],
461
+ ),
462
+ FileExtension(
463
+ name="JPEG Extended Range",
464
+ extension=".jxr",
465
+ priority=["JPEG-XR-FI"],
466
+ ),
467
+ FileExtension(
468
+ extension=".k25",
469
+ priority=["RAW-FI"],
470
+ ),
471
+ FileExtension(
472
+ extension=".kc2",
473
+ priority=["RAW-FI"],
474
+ ),
475
+ FileExtension(
476
+ extension=".kdc",
477
+ priority=["RAW-FI"],
478
+ ),
479
+ FileExtension(
480
+ name="C64 Koala Graphics",
481
+ extension=".koa",
482
+ priority=["KOALA-FI"],
483
+ ),
484
+ FileExtension(
485
+ name="ILBM Interleaved Bitmap",
486
+ extension=".lbm",
487
+ priority=["IFF-FI"],
488
+ ),
489
+ FileExtension(
490
+ name="Lytro F01",
491
+ extension=".lfp",
492
+ priority=["LYTRO-LFP"],
493
+ ),
494
+ FileExtension(
495
+ name="Lytro Illum",
496
+ extension=".lfr",
497
+ priority=["LYTRO-LFR"],
498
+ ),
499
+ FileExtension(
500
+ name="ZEISS LSM",
501
+ extension=".lsm",
502
+ priority=["tifffile", "ITK", "TIFF"],
503
+ ),
504
+ FileExtension(
505
+ name="McIdas area file",
506
+ extension=".MCIDAS",
507
+ priority=["pillow", "MCIDAS-PIL"],
508
+ external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html",
509
+ ),
510
+ FileExtension(
511
+ extension=".mdc",
512
+ priority=["RAW-FI"],
513
+ ),
514
+ FileExtension(
515
+ extension=".mef",
516
+ priority=["RAW-FI"],
517
+ ),
518
+ FileExtension(
519
+ name="FreeSurfer File Format",
520
+ extension=".mgh",
521
+ priority=["ITK"],
522
+ ),
523
+ FileExtension(
524
+ name="ITK MetaImage",
525
+ extension=".mha",
526
+ priority=["ITK"],
527
+ ),
528
+ FileExtension(
529
+ name="ITK MetaImage Header",
530
+ extension=".mhd",
531
+ priority=["ITK"],
532
+ ),
533
+ FileExtension(
534
+ name="Microsoft Image Composer",
535
+ extension=".mic",
536
+ priority=["pillow", "MIC-PIL"],
537
+ ),
538
+ FileExtension(
539
+ name="Matroska Multimedia Container",
540
+ extension=".mkv",
541
+ priority=["FFMPEG", "pyav"],
542
+ ),
543
+ FileExtension(
544
+ name="Medical Imaging NetCDF",
545
+ extension=".mnc",
546
+ priority=["ITK"],
547
+ ),
548
+ FileExtension(
549
+ name="Medical Imaging NetCDF 2",
550
+ extension=".mnc2",
551
+ priority=["ITK"],
552
+ ),
553
+ FileExtension(
554
+ name="Leaf Raw Image Format",
555
+ extension=".mos",
556
+ priority=["RAW-FI"],
557
+ ),
558
+ FileExtension(
559
+ name="QuickTime File Format",
560
+ extension=".mov",
561
+ priority=["FFMPEG", "pyav"],
562
+ ),
563
+ FileExtension(
564
+ name="MPEG-4 Part 14",
565
+ extension=".mp4",
566
+ priority=["FFMPEG", "pyav"],
567
+ ),
568
+ FileExtension(
569
+ name="MPEG-1 Moving Picture Experts Group",
570
+ extension=".mpeg",
571
+ priority=["FFMPEG", "pyav"],
572
+ ),
573
+ FileExtension(
574
+ name="Moving Picture Experts Group",
575
+ extension=".mpg",
576
+ priority=["pillow", "FFMPEG", "pyav"],
577
+ ),
578
+ FileExtension(
579
+ name="JPEG Multi-Picture Format",
580
+ extension=".mpo",
581
+ priority=["pillow", "MPO-PIL"],
582
+ ),
583
+ FileExtension(
584
+ name="Magnetic resonance imaging",
585
+ extension=".mri",
586
+ priority=["DICOM"],
587
+ ),
588
+ FileExtension(
589
+ extension=".mrw",
590
+ priority=["RAW-FI"],
591
+ ),
592
+ FileExtension(
593
+ name="Windows Paint",
594
+ extension=".msp",
595
+ priority=["pillow", "MSP-PIL"],
596
+ ),
597
+ FileExtension(
598
+ extension=".nef",
599
+ priority=["RAW-FI", "rawpy"],
600
+ ),
601
+ FileExtension(
602
+ extension=".nhdr",
603
+ priority=["ITK"],
604
+ ),
605
+ FileExtension(
606
+ extension=".nia",
607
+ priority=["ITK"],
608
+ ),
609
+ FileExtension(
610
+ extension=".nii",
611
+ priority=["ITK"],
612
+ ),
613
+ FileExtension(
614
+ name="nii.gz",
615
+ extension=".nii.gz",
616
+ priority=["ITK"],
617
+ ),
618
+ FileExtension(
619
+ name="Numpy Array",
620
+ extension=".npz",
621
+ priority=["NPZ"],
622
+ volume_support=True,
623
+ ),
624
+ FileExtension(
625
+ extension=".nrrd",
626
+ priority=["ITK"],
627
+ ),
628
+ FileExtension(
629
+ extension=".nrw",
630
+ priority=["RAW-FI"],
631
+ ),
632
+ FileExtension(
633
+ extension=".orf",
634
+ priority=["RAW-FI"],
635
+ ),
636
+ FileExtension(
637
+ extension=".palm",
638
+ priority=["pillow"],
639
+ ),
640
+ FileExtension(
641
+ name="Portable Bitmap",
642
+ extension=".pbm",
643
+ priority=["PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
644
+ ),
645
+ FileExtension(
646
+ name="Kodak PhotoCD",
647
+ extension=".pcd",
648
+ priority=["pillow", "PCD-FI", "PCD-PIL"],
649
+ ),
650
+ FileExtension(
651
+ name="Macintosh PICT",
652
+ extension=".pct",
653
+ priority=["PICT-FI"],
654
+ ),
655
+ FileExtension(
656
+ name="Zsoft Paintbrush",
657
+ extension=".PCX",
658
+ priority=["pillow", "PCX-FI", "PCX-PIL"],
659
+ ),
660
+ FileExtension(
661
+ extension=".pdf",
662
+ priority=["pillow"],
663
+ ),
664
+ FileExtension(
665
+ extension=".pef",
666
+ priority=["RAW-FI"],
667
+ ),
668
+ FileExtension(
669
+ extension=".pfm",
670
+ priority=["PFM-FI", "pyav", "opencv"],
671
+ ),
672
+ FileExtension(
673
+ name="Portable Greymap",
674
+ extension=".pgm",
675
+ priority=["pillow", "PGM-FI", "PGMRAW-FI", "pyav", "opencv"],
676
+ ),
677
+ FileExtension(
678
+ name="Macintosh PICT",
679
+ extension=".pic",
680
+ priority=["PICT-FI", "ITK", "opencv"],
681
+ ),
682
+ FileExtension(
683
+ name="Macintosh PICT",
684
+ extension=".pict",
685
+ priority=["PICT-FI"],
686
+ ),
687
+ FileExtension(
688
+ name="Portable Network Graphics",
689
+ extension=".png",
690
+ priority=["pillow", "PNG-PIL", "PNG-FI", "ITK", "pyav", "opencv"],
691
+ ),
692
+ FileExtension(
693
+ name="Portable Image Format",
694
+ extension=".pnm",
695
+ priority=["pillow", "opencv"],
696
+ ),
697
+ FileExtension(
698
+ name="Pbmplus image",
699
+ extension=".ppm",
700
+ priority=["pillow", "PPM-PIL", "pyav"],
701
+ ),
702
+ FileExtension(
703
+ name="Pbmplus image",
704
+ extension=".pbm",
705
+ priority=["pillow", "PPM-PIL", "PPM-FI"],
706
+ ),
707
+ FileExtension(
708
+ name="Portable image format",
709
+ extension=".pxm",
710
+ priority=["opencv"],
711
+ ),
712
+ FileExtension(
713
+ name="Portable Pixelmap (ASCII)",
714
+ extension=".ppm",
715
+ priority=["PPM-FI", "opencv"],
716
+ ),
717
+ FileExtension(
718
+ name="Portable Pixelmap (Raw)",
719
+ extension=".ppm",
720
+ priority=["PPMRAW-FI"],
721
+ ),
722
+ FileExtension(
723
+ name="Ghostscript",
724
+ extension=".ps",
725
+ priority=["pillow", "EPS-PIL"],
726
+ ),
727
+ FileExtension(
728
+ name="Adope Photoshop 2.5 and 3.0",
729
+ extension=".psd",
730
+ priority=["pillow", "PSD-PIL", "PSD-FI"],
731
+ ),
732
+ FileExtension(
733
+ extension=".ptx",
734
+ priority=["RAW-FI"],
735
+ ),
736
+ FileExtension(
737
+ extension=".pxn",
738
+ priority=["RAW-FI"],
739
+ ),
740
+ FileExtension(
741
+ name="PIXAR raster image",
742
+ extension=".pxr",
743
+ priority=["pillow", "PIXAR-PIL"],
744
+ ),
745
+ FileExtension(
746
+ extension=".qtk",
747
+ priority=["RAW-FI"],
748
+ ),
749
+ FileExtension(
750
+ extension=".raf",
751
+ priority=["RAW-FI"],
752
+ ),
753
+ FileExtension(
754
+ name="Sun Raster File",
755
+ extension=".ras",
756
+ priority=["pillow", "SUN-PIL", "RAS-FI", "pyav", "opencv"],
757
+ ),
758
+ FileExtension(
759
+ name="Sun Raster File",
760
+ extension=".sr",
761
+ priority=["opencv"],
762
+ ),
763
+ FileExtension(
764
+ extension=".raw",
765
+ priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW", "rawpy"],
766
+ ),
767
+ FileExtension(
768
+ extension=".rdc",
769
+ priority=["RAW-FI"],
770
+ ),
771
+ FileExtension(
772
+ name="Silicon Graphics Image",
773
+ extension=".rgb",
774
+ priority=["pillow", "SGI-PIL"],
775
+ ),
776
+ FileExtension(
777
+ name="Silicon Graphics Image",
778
+ extension=".rgba",
779
+ priority=["pillow", "SGI-PIL"],
780
+ ),
781
+ FileExtension(
782
+ extension=".rw2",
783
+ priority=["RAW-FI"],
784
+ ),
785
+ FileExtension(
786
+ extension=".rwl",
787
+ priority=["RAW-FI"],
788
+ ),
789
+ FileExtension(
790
+ extension=".rwz",
791
+ priority=["RAW-FI"],
792
+ ),
793
+ FileExtension(
794
+ name="Silicon Graphics Image",
795
+ extension=".sgi",
796
+ priority=["pillow", "SGI-PIL", "pyav"],
797
+ ),
798
+ FileExtension(
799
+ name="SPE File Format",
800
+ extension=".spe",
801
+ priority=["SPE"],
802
+ ),
803
+ FileExtension(
804
+ extension=".SPIDER",
805
+ priority=["pillow", "SPIDER-PIL"],
806
+ ),
807
+ FileExtension(
808
+ extension=".sr2",
809
+ priority=["RAW-FI"],
810
+ ),
811
+ FileExtension(
812
+ extension=".srf",
813
+ priority=["RAW-FI"],
814
+ ),
815
+ FileExtension(
816
+ extension=".srw",
817
+ priority=["RAW-FI"],
818
+ ),
819
+ FileExtension(
820
+ extension=".sti",
821
+ priority=["RAW-FI"],
822
+ ),
823
+ FileExtension(
824
+ extension=".stk",
825
+ priority=["tifffile", "TIFF"],
826
+ ),
827
+ FileExtension(
828
+ name="ShockWave Flash",
829
+ extension=".swf",
830
+ priority=["SWF", "pyav"],
831
+ ),
832
+ FileExtension(
833
+ name="Truevision TGA",
834
+ extension=".targa",
835
+ priority=["pillow", "TARGA-FI"],
836
+ ),
837
+ FileExtension(
838
+ name="Truevision TGA",
839
+ extension=".tga",
840
+ priority=["pillow", "TGA-PIL", "TARGA-FI", "pyav"],
841
+ ),
842
+ FileExtension(
843
+ name="Tagged Image File",
844
+ extension=".tif",
845
+ priority=[
846
+ "tifffile",
847
+ "TIFF",
848
+ "pillow",
849
+ "TIFF-PIL",
850
+ "TIFF-FI",
851
+ "FEI",
852
+ "ITK",
853
+ "GDAL",
854
+ "pyav",
855
+ "opencv",
856
+ ],
857
+ volume_support=True,
858
+ ),
859
+ FileExtension(
860
+ name="Tagged Image File Format",
861
+ extension=".tiff",
862
+ priority=[
863
+ "tifffile",
864
+ "TIFF",
865
+ "pillow",
866
+ "TIFF-PIL",
867
+ "TIFF-FI",
868
+ "FEI",
869
+ "ITK",
870
+ "GDAL",
871
+ "pyav",
872
+ "opencv",
873
+ ],
874
+ volume_support=True,
875
+ ),
876
+ FileExtension(
877
+ extension=".vda",
878
+ priority=["pillow"],
879
+ ),
880
+ FileExtension(
881
+ extension=".vst",
882
+ priority=["pillow"],
883
+ ),
884
+ FileExtension(
885
+ extension=".vtk",
886
+ priority=["ITK"],
887
+ ),
888
+ FileExtension(
889
+ name="Wireless Bitmap",
890
+ extension=".wap",
891
+ priority=["WBMP-FI"],
892
+ ),
893
+ FileExtension(
894
+ name="Wireless Bitmap",
895
+ extension=".wbm",
896
+ priority=["WBMP-FI"],
897
+ ),
898
+ FileExtension(
899
+ name="Wireless Bitmap",
900
+ extension=".wbmp",
901
+ priority=["WBMP-FI"],
902
+ ),
903
+ FileExtension(
904
+ name="JPEG Extended Range",
905
+ extension=".wdp",
906
+ priority=["JPEG-XR-FI"],
907
+ ),
908
+ FileExtension(
909
+ name="Matroska",
910
+ extension=".webm",
911
+ priority=["FFMPEG", "pyav"],
912
+ ),
913
+ FileExtension(
914
+ name="Google WebP",
915
+ extension=".webp",
916
+ priority=["pillow", "WEBP-FI", "pyav", "opencv"],
917
+ ),
918
+ FileExtension(
919
+ name="Windows Meta File",
920
+ extension=".wmf",
921
+ priority=["pillow", "WMF-PIL"],
922
+ ),
923
+ FileExtension(
924
+ name="Windows Media Video",
925
+ extension=".wmv",
926
+ priority=["FFMPEG"],
927
+ ),
928
+ FileExtension(
929
+ name="X11 Bitmap",
930
+ extension=".xbm",
931
+ priority=["pillow", "XBM-PIL", "XBM-FI", "pyav"],
932
+ ),
933
+ FileExtension(
934
+ name="X11 Pixel Map",
935
+ extension=".xpm",
936
+ priority=["pillow", "XPM-PIL", "XPM-FI"],
937
+ ),
938
+ FileExtension(
939
+ name="Thumbnail Image",
940
+ extension=".XVTHUMB",
941
+ priority=["pillow", "XVTHUMB-PIL"],
942
+ ),
943
+ FileExtension(
944
+ extension=".dpx",
945
+ priority=["pyav"],
946
+ ),
947
+ FileExtension(
948
+ extension=".im1",
949
+ priority=["pyav"],
950
+ ),
951
+ FileExtension(
952
+ extension=".im24",
953
+ priority=["pyav"],
954
+ ),
955
+ FileExtension(
956
+ extension=".im8",
957
+ priority=["pyav"],
958
+ ),
959
+ FileExtension(
960
+ extension=".jls",
961
+ priority=["pyav"],
962
+ ),
963
+ FileExtension(
964
+ extension=".ljpg",
965
+ priority=["pyav"],
966
+ ),
967
+ FileExtension(
968
+ extension=".pam",
969
+ priority=["pyav"],
970
+ ),
971
+ FileExtension(
972
+ extension=".pcx",
973
+ priority=["pyav"],
974
+ ),
975
+ FileExtension(
976
+ extension=".pgmyuv",
977
+ priority=["pyav"],
978
+ ),
979
+ FileExtension(
980
+ extension=".pix",
981
+ priority=["pyav"],
982
+ ),
983
+ FileExtension(
984
+ extension=".ppm",
985
+ priority=["pyav"],
986
+ ),
987
+ FileExtension(
988
+ extension=".rs",
989
+ priority=["pyav"],
990
+ ),
991
+ FileExtension(
992
+ extension=".sun",
993
+ priority=["pyav"],
994
+ ),
995
+ FileExtension(
996
+ extension=".sunras",
997
+ priority=["pyav"],
998
+ ),
999
+ FileExtension(
1000
+ extension=".xface",
1001
+ priority=["pyav"],
1002
+ ),
1003
+ FileExtension(
1004
+ extension=".xwd",
1005
+ priority=["pyav"],
1006
+ ),
1007
+ FileExtension(
1008
+ extension=".y",
1009
+ priority=["pyav"],
1010
+ ),
1011
+ FileExtension(
1012
+ name="3GP (3GPP file format)",
1013
+ extension=".3g2",
1014
+ priority=["pyav"],
1015
+ ),
1016
+ FileExtension(
1017
+ name="3GP (3GPP file format)",
1018
+ extension=".3gp",
1019
+ priority=["pyav"],
1020
+ ),
1021
+ FileExtension(
1022
+ name="3GP (3GPP file format)",
1023
+ extension=".f4v",
1024
+ priority=["pyav"],
1025
+ ),
1026
+ FileExtension(
1027
+ name="3GP (3GPP file format)",
1028
+ extension=".ism",
1029
+ priority=["pyav"],
1030
+ ),
1031
+ FileExtension(
1032
+ name="3GP (3GPP file format)",
1033
+ extension=".isma",
1034
+ priority=["pyav"],
1035
+ ),
1036
+ FileExtension(
1037
+ name="3GP (3GPP file format)",
1038
+ extension=".ismv",
1039
+ priority=["pyav"],
1040
+ ),
1041
+ FileExtension(
1042
+ name="3GP (3GPP file format)",
1043
+ extension=".m4a",
1044
+ priority=["pyav"],
1045
+ ),
1046
+ FileExtension(
1047
+ name="3GP (3GPP file format)",
1048
+ extension=".m4b",
1049
+ priority=["pyav"],
1050
+ ),
1051
+ FileExtension(
1052
+ name="3GP (3GPP file format)",
1053
+ extension=".mj2",
1054
+ priority=["pyav"],
1055
+ ),
1056
+ FileExtension(
1057
+ name="3GP (3GPP file format)",
1058
+ extension=".psp",
1059
+ priority=["pyav"],
1060
+ ),
1061
+ FileExtension(
1062
+ name="3GP2 (3GPP2 file format)",
1063
+ extension=".3g2",
1064
+ priority=["pyav"],
1065
+ ),
1066
+ FileExtension(
1067
+ name="3GP2 (3GPP2 file format)",
1068
+ extension=".3gp",
1069
+ priority=["pyav"],
1070
+ ),
1071
+ FileExtension(
1072
+ name="3GP2 (3GPP2 file format)",
1073
+ extension=".f4v",
1074
+ priority=["pyav"],
1075
+ ),
1076
+ FileExtension(
1077
+ name="3GP2 (3GPP2 file format)",
1078
+ extension=".ism",
1079
+ priority=["pyav"],
1080
+ ),
1081
+ FileExtension(
1082
+ name="3GP2 (3GPP2 file format)",
1083
+ extension=".isma",
1084
+ priority=["pyav"],
1085
+ ),
1086
+ FileExtension(
1087
+ name="3GP2 (3GPP2 file format)",
1088
+ extension=".ismv",
1089
+ priority=["pyav"],
1090
+ ),
1091
+ FileExtension(
1092
+ name="3GP2 (3GPP2 file format)",
1093
+ extension=".m4a",
1094
+ priority=["pyav"],
1095
+ ),
1096
+ FileExtension(
1097
+ name="3GP2 (3GPP2 file format)",
1098
+ extension=".m4b",
1099
+ priority=["pyav"],
1100
+ ),
1101
+ FileExtension(
1102
+ name="3GP2 (3GPP2 file format)",
1103
+ extension=".mj2",
1104
+ priority=["pyav"],
1105
+ ),
1106
+ FileExtension(
1107
+ name="3GP2 (3GPP2 file format)",
1108
+ extension=".psp",
1109
+ priority=["pyav"],
1110
+ ),
1111
+ FileExtension(
1112
+ name="3GPP AMR",
1113
+ extension=".amr",
1114
+ priority=["pyav"],
1115
+ ),
1116
+ FileExtension(
1117
+ name="a64 - video for Commodore 64",
1118
+ extension=".A64",
1119
+ priority=["pyav"],
1120
+ ),
1121
+ FileExtension(
1122
+ name="a64 - video for Commodore 64",
1123
+ extension=".a64",
1124
+ priority=["pyav"],
1125
+ ),
1126
+ FileExtension(
1127
+ name="Adobe Filmstrip",
1128
+ extension=".flm",
1129
+ priority=["pyav"],
1130
+ ),
1131
+ FileExtension(
1132
+ name="AMV",
1133
+ extension=".amv",
1134
+ priority=["pyav"],
1135
+ ),
1136
+ FileExtension(
1137
+ name="ASF (Advanced / Active Streaming Format)",
1138
+ extension=".asf",
1139
+ priority=["pyav"],
1140
+ ),
1141
+ FileExtension(
1142
+ name="ASF (Advanced / Active Streaming Format)",
1143
+ extension=".asf",
1144
+ priority=["pyav"],
1145
+ ),
1146
+ FileExtension(
1147
+ name="ASF (Advanced / Active Streaming Format)",
1148
+ extension=".wmv",
1149
+ priority=["pyav"],
1150
+ ),
1151
+ FileExtension(
1152
+ name="ASF (Advanced / Active Streaming Format)",
1153
+ extension=".wmv",
1154
+ priority=["pyav"],
1155
+ ),
1156
+ FileExtension(
1157
+ name="AV1 Annex B",
1158
+ extension=".obu",
1159
+ priority=["pyav"],
1160
+ ),
1161
+ FileExtension(
1162
+ name="AV1 low overhead OBU",
1163
+ extension=".obu",
1164
+ priority=["pyav"],
1165
+ ),
1166
+ FileExtension(
1167
+ name="AVI (Audio Video Interleaved)",
1168
+ extension=".avi",
1169
+ priority=["pyav"],
1170
+ ),
1171
+ FileExtension(
1172
+ name="AVR (Audio Visual Research)",
1173
+ extension=".avr",
1174
+ priority=["pyav"],
1175
+ ),
1176
+ FileExtension(
1177
+ name="Beam Software SIFF",
1178
+ extension=".vb",
1179
+ priority=["pyav"],
1180
+ ),
1181
+ FileExtension(
1182
+ name="CD Graphics",
1183
+ extension=".cdg",
1184
+ priority=["pyav"],
1185
+ ),
1186
+ FileExtension(
1187
+ name="Commodore CDXL video",
1188
+ extension=".cdxl",
1189
+ priority=["pyav"],
1190
+ ),
1191
+ FileExtension(
1192
+ name="Commodore CDXL video",
1193
+ extension=".xl",
1194
+ priority=["pyav"],
1195
+ ),
1196
+ FileExtension(
1197
+ name="DASH Muxer",
1198
+ extension=".mpd",
1199
+ priority=["pyav"],
1200
+ ),
1201
+ FileExtension(
1202
+ name="Digital Pictures SGA",
1203
+ extension=".sga",
1204
+ priority=["pyav"],
1205
+ ),
1206
+ FileExtension(
1207
+ name="Discworld II BMV",
1208
+ extension=".bmv",
1209
+ priority=["pyav"],
1210
+ ),
1211
+ FileExtension(
1212
+ name="DV (Digital Video)",
1213
+ extension=".dif",
1214
+ priority=["pyav"],
1215
+ ),
1216
+ FileExtension(
1217
+ name="DV (Digital Video)",
1218
+ extension=".dv",
1219
+ priority=["pyav"],
1220
+ ),
1221
+ FileExtension(
1222
+ name="F4V Adobe Flash Video",
1223
+ extension=".f4v",
1224
+ priority=["pyav"],
1225
+ ),
1226
+ FileExtension(
1227
+ name="FLV (Flash Video)",
1228
+ extension=".flv",
1229
+ priority=["pyav"],
1230
+ ),
1231
+ FileExtension(
1232
+ name="GXF (General eXchange Format)",
1233
+ extension=".gxf",
1234
+ priority=["pyav"],
1235
+ ),
1236
+ FileExtension(
1237
+ name="iCE Draw File",
1238
+ extension=".idf",
1239
+ priority=["pyav"],
1240
+ ),
1241
+ FileExtension(
1242
+ name="IFV CCTV DVR",
1243
+ extension=".ifv",
1244
+ priority=["pyav"],
1245
+ ),
1246
+ FileExtension(
1247
+ name="iPod H.264 MP4 (MPEG-4 Part 14)",
1248
+ extension=".m4a",
1249
+ priority=["pyav"],
1250
+ ),
1251
+ FileExtension(
1252
+ name="iPod H.264 MP4 (MPEG-4 Part 14)",
1253
+ extension=".m4b",
1254
+ priority=["pyav"],
1255
+ ),
1256
+ FileExtension(
1257
+ name="iPod H.264 MP4 (MPEG-4 Part 14)",
1258
+ extension=".m4v",
1259
+ priority=["pyav"],
1260
+ ),
1261
+ FileExtension(
1262
+ name="IVR (Internet Video Recording)",
1263
+ extension=".ivr",
1264
+ priority=["pyav"],
1265
+ ),
1266
+ FileExtension(
1267
+ name="Konami PS2 SVAG",
1268
+ extension=".svag",
1269
+ priority=["pyav"],
1270
+ ),
1271
+ FileExtension(
1272
+ name="KUX (YouKu)",
1273
+ extension=".kux",
1274
+ priority=["pyav"],
1275
+ ),
1276
+ FileExtension(
1277
+ name="live RTMP FLV (Flash Video)",
1278
+ extension=".flv",
1279
+ priority=["pyav"],
1280
+ ),
1281
+ FileExtension(
1282
+ name="Loki SDL MJPEG",
1283
+ extension=".mjpg",
1284
+ priority=["pyav"],
1285
+ ),
1286
+ FileExtension(
1287
+ name="LVF",
1288
+ extension=".lvf",
1289
+ priority=["pyav"],
1290
+ ),
1291
+ FileExtension(
1292
+ name="Matroska / WebM",
1293
+ extension=".mk3d",
1294
+ priority=["pyav"],
1295
+ ),
1296
+ FileExtension(
1297
+ name="Matroska / WebM",
1298
+ extension=".mka",
1299
+ priority=["pyav"],
1300
+ ),
1301
+ FileExtension(
1302
+ name="Matroska / WebM",
1303
+ extension=".mks",
1304
+ priority=["pyav"],
1305
+ ),
1306
+ FileExtension(
1307
+ name="Microsoft XMV",
1308
+ extension=".xmv",
1309
+ priority=["pyav"],
1310
+ ),
1311
+ FileExtension(
1312
+ name="MIME multipart JPEG",
1313
+ extension=".mjpg",
1314
+ priority=["pyav"],
1315
+ ),
1316
+ FileExtension(
1317
+ name="MobiClip MODS",
1318
+ extension=".mods",
1319
+ priority=["pyav"],
1320
+ ),
1321
+ FileExtension(
1322
+ name="MobiClip MOFLEX",
1323
+ extension=".moflex",
1324
+ priority=["pyav"],
1325
+ ),
1326
+ FileExtension(
1327
+ name="Motion Pixels MVI",
1328
+ extension=".mvi",
1329
+ priority=["pyav"],
1330
+ ),
1331
+ FileExtension(
1332
+ name="MP4 (MPEG-4 Part 14)",
1333
+ extension=".3g2",
1334
+ priority=["pyav"],
1335
+ ),
1336
+ FileExtension(
1337
+ name="MP4 (MPEG-4 Part 14)",
1338
+ extension=".3gp",
1339
+ priority=["pyav"],
1340
+ ),
1341
+ FileExtension(
1342
+ name="MP4 (MPEG-4 Part 14)",
1343
+ extension=".f4v",
1344
+ priority=["pyav"],
1345
+ ),
1346
+ FileExtension(
1347
+ name="MP4 (MPEG-4 Part 14)",
1348
+ extension=".ism",
1349
+ priority=["pyav"],
1350
+ ),
1351
+ FileExtension(
1352
+ name="MP4 (MPEG-4 Part 14)",
1353
+ extension=".isma",
1354
+ priority=["pyav"],
1355
+ ),
1356
+ FileExtension(
1357
+ name="MP4 (MPEG-4 Part 14)",
1358
+ extension=".ismv",
1359
+ priority=["pyav"],
1360
+ ),
1361
+ FileExtension(
1362
+ name="MP4 (MPEG-4 Part 14)",
1363
+ extension=".m4a",
1364
+ priority=["pyav"],
1365
+ ),
1366
+ FileExtension(
1367
+ name="MP4 (MPEG-4 Part 14)",
1368
+ extension=".m4b",
1369
+ priority=["pyav"],
1370
+ ),
1371
+ FileExtension(
1372
+ name="MP4 (MPEG-4 Part 14)",
1373
+ extension=".mj2",
1374
+ priority=["pyav"],
1375
+ ),
1376
+ FileExtension(
1377
+ name="MP4 (MPEG-4 Part 14)",
1378
+ extension=".psp",
1379
+ priority=["pyav"],
1380
+ ),
1381
+ FileExtension(
1382
+ name="MPEG-2 PS (DVD VOB)",
1383
+ extension=".dvd",
1384
+ priority=["pyav"],
1385
+ ),
1386
+ FileExtension(
1387
+ name="MPEG-2 PS (SVCD)",
1388
+ extension=".vob",
1389
+ priority=["pyav"],
1390
+ ),
1391
+ FileExtension(
1392
+ name="MPEG-2 PS (VOB)",
1393
+ extension=".vob",
1394
+ priority=["pyav"],
1395
+ ),
1396
+ FileExtension(
1397
+ name="MPEG-TS (MPEG-2 Transport Stream)",
1398
+ extension=".m2t",
1399
+ priority=["pyav"],
1400
+ ),
1401
+ FileExtension(
1402
+ name="MPEG-TS (MPEG-2 Transport Stream)",
1403
+ extension=".m2ts",
1404
+ priority=["pyav"],
1405
+ ),
1406
+ FileExtension(
1407
+ name="MPEG-TS (MPEG-2 Transport Stream)",
1408
+ extension=".mts",
1409
+ priority=["pyav"],
1410
+ ),
1411
+ FileExtension(
1412
+ name="MPEG-TS (MPEG-2 Transport Stream)",
1413
+ extension=".ts",
1414
+ priority=["pyav"],
1415
+ ),
1416
+ FileExtension(
1417
+ name="Musepack",
1418
+ extension=".mpc",
1419
+ priority=["pyav"],
1420
+ ),
1421
+ FileExtension(
1422
+ name="MXF (Material eXchange Format) Operational Pattern Atom",
1423
+ extension=".mxf",
1424
+ priority=["pyav"],
1425
+ ),
1426
+ FileExtension(
1427
+ name="MXF (Material eXchange Format)",
1428
+ extension=".mxf",
1429
+ priority=["pyav"],
1430
+ ),
1431
+ FileExtension(
1432
+ name="MxPEG clip",
1433
+ extension=".mxg",
1434
+ priority=["pyav"],
1435
+ ),
1436
+ FileExtension(
1437
+ name="NC camera feed",
1438
+ extension=".v",
1439
+ priority=["pyav"],
1440
+ ),
1441
+ FileExtension(
1442
+ name="NUT",
1443
+ extension=".nut",
1444
+ priority=["pyav"],
1445
+ ),
1446
+ FileExtension(
1447
+ name="Ogg Video",
1448
+ extension=".ogv",
1449
+ priority=["pyav"],
1450
+ ),
1451
+ FileExtension(
1452
+ name="Ogg",
1453
+ extension=".ogg",
1454
+ priority=["pyav"],
1455
+ ),
1456
+ FileExtension(
1457
+ name="On2 IVF",
1458
+ extension=".ivf",
1459
+ priority=["pyav"],
1460
+ ),
1461
+ FileExtension(
1462
+ name="PSP MP4 (MPEG-4 Part 14)",
1463
+ extension=".psp",
1464
+ priority=["pyav"],
1465
+ ),
1466
+ FileExtension(
1467
+ name="Psygnosis YOP",
1468
+ extension=".yop",
1469
+ priority=["pyav"],
1470
+ ),
1471
+ FileExtension(
1472
+ name="QuickTime / MOV",
1473
+ extension=".3g2",
1474
+ priority=["pyav"],
1475
+ ),
1476
+ FileExtension(
1477
+ name="QuickTime / MOV",
1478
+ extension=".3gp",
1479
+ priority=["pyav"],
1480
+ ),
1481
+ FileExtension(
1482
+ name="QuickTime / MOV",
1483
+ extension=".f4v",
1484
+ priority=["pyav"],
1485
+ ),
1486
+ FileExtension(
1487
+ name="QuickTime / MOV",
1488
+ extension=".ism",
1489
+ priority=["pyav"],
1490
+ ),
1491
+ FileExtension(
1492
+ name="QuickTime / MOV",
1493
+ extension=".isma",
1494
+ priority=["pyav"],
1495
+ ),
1496
+ FileExtension(
1497
+ name="QuickTime / MOV",
1498
+ extension=".ismv",
1499
+ priority=["pyav"],
1500
+ ),
1501
+ FileExtension(
1502
+ name="QuickTime / MOV",
1503
+ extension=".m4a",
1504
+ priority=["pyav"],
1505
+ ),
1506
+ FileExtension(
1507
+ name="QuickTime / MOV",
1508
+ extension=".m4b",
1509
+ priority=["pyav"],
1510
+ ),
1511
+ FileExtension(
1512
+ name="QuickTime / MOV",
1513
+ extension=".mj2",
1514
+ priority=["pyav"],
1515
+ ),
1516
+ FileExtension(
1517
+ name="QuickTime / MOV",
1518
+ extension=".psp",
1519
+ priority=["pyav"],
1520
+ ),
1521
+ FileExtension(
1522
+ name="raw AVS2-P2/IEEE1857.4 video",
1523
+ extension=".avs",
1524
+ priority=["pyav"],
1525
+ ),
1526
+ FileExtension(
1527
+ name="raw AVS2-P2/IEEE1857.4 video",
1528
+ extension=".avs2",
1529
+ priority=["pyav"],
1530
+ ),
1531
+ FileExtension(
1532
+ name="raw AVS3-P2/IEEE1857.10",
1533
+ extension=".avs3",
1534
+ priority=["pyav"],
1535
+ ),
1536
+ FileExtension(
1537
+ name="raw Chinese AVS (Audio Video Standard) video",
1538
+ extension=".cavs",
1539
+ priority=["pyav"],
1540
+ ),
1541
+ FileExtension(
1542
+ name="raw Dirac",
1543
+ extension=".drc",
1544
+ priority=["pyav"],
1545
+ ),
1546
+ FileExtension(
1547
+ name="raw Dirac",
1548
+ extension=".vc2",
1549
+ priority=["pyav"],
1550
+ ),
1551
+ FileExtension(
1552
+ name="raw DNxHD (SMPTE VC-3)",
1553
+ extension=".dnxhd",
1554
+ priority=["pyav"],
1555
+ ),
1556
+ FileExtension(
1557
+ name="raw DNxHD (SMPTE VC-3)",
1558
+ extension=".dnxhr",
1559
+ priority=["pyav"],
1560
+ ),
1561
+ FileExtension(
1562
+ name="raw GSM",
1563
+ extension=".gsm",
1564
+ priority=["pyav"],
1565
+ ),
1566
+ FileExtension(
1567
+ name="raw H.261",
1568
+ extension=".h261",
1569
+ priority=["pyav"],
1570
+ ),
1571
+ FileExtension(
1572
+ name="raw H.263",
1573
+ extension=".h263",
1574
+ priority=["pyav"],
1575
+ ),
1576
+ FileExtension(
1577
+ name="raw H.264 video",
1578
+ extension=".264",
1579
+ priority=["pyav"],
1580
+ ),
1581
+ FileExtension(
1582
+ name="raw H.264 video",
1583
+ extension=".avc",
1584
+ priority=["pyav"],
1585
+ ),
1586
+ FileExtension(
1587
+ name="raw H.264 video",
1588
+ extension=".h264",
1589
+ priority=["pyav", "FFMPEG"],
1590
+ ),
1591
+ FileExtension(
1592
+ name="raw H.264 video",
1593
+ extension=".h26l",
1594
+ priority=["pyav"],
1595
+ ),
1596
+ FileExtension(
1597
+ name="raw HEVC video",
1598
+ extension=".265",
1599
+ priority=["pyav"],
1600
+ ),
1601
+ FileExtension(
1602
+ name="raw HEVC video",
1603
+ extension=".h265",
1604
+ priority=["pyav"],
1605
+ ),
1606
+ FileExtension(
1607
+ name="raw HEVC video",
1608
+ extension=".hevc",
1609
+ priority=["pyav"],
1610
+ ),
1611
+ FileExtension(
1612
+ name="raw id RoQ",
1613
+ extension=".roq",
1614
+ priority=["pyav"],
1615
+ ),
1616
+ FileExtension(
1617
+ name="raw Ingenient MJPEG",
1618
+ extension=".cgi",
1619
+ priority=["pyav"],
1620
+ ),
1621
+ FileExtension(
1622
+ name="raw IPU Video",
1623
+ extension=".ipu",
1624
+ priority=["pyav"],
1625
+ ),
1626
+ FileExtension(
1627
+ name="raw MJPEG 2000 video",
1628
+ extension=".j2k",
1629
+ priority=["pyav"],
1630
+ ),
1631
+ FileExtension(
1632
+ name="raw MJPEG video",
1633
+ extension=".mjpeg",
1634
+ priority=["pyav"],
1635
+ ),
1636
+ FileExtension(
1637
+ name="raw MJPEG video",
1638
+ extension=".mjpg",
1639
+ priority=["pyav"],
1640
+ ),
1641
+ FileExtension(
1642
+ name="raw MJPEG video",
1643
+ extension=".mpo",
1644
+ priority=["pyav"],
1645
+ ),
1646
+ FileExtension(
1647
+ name="raw MPEG-1 video",
1648
+ extension=".m1v",
1649
+ priority=["pyav"],
1650
+ ),
1651
+ FileExtension(
1652
+ name="raw MPEG-1 video",
1653
+ extension=".mpeg",
1654
+ priority=["pyav"],
1655
+ ),
1656
+ FileExtension(
1657
+ name="raw MPEG-1 video",
1658
+ extension=".mpg",
1659
+ priority=["pyav"],
1660
+ ),
1661
+ FileExtension(
1662
+ name="raw MPEG-2 video",
1663
+ extension=".m2v",
1664
+ priority=["pyav"],
1665
+ ),
1666
+ FileExtension(
1667
+ name="raw MPEG-4 video",
1668
+ extension=".m4v",
1669
+ priority=["pyav"],
1670
+ ),
1671
+ FileExtension(
1672
+ name="raw VC-1 video",
1673
+ extension=".vc1",
1674
+ priority=["pyav"],
1675
+ ),
1676
+ FileExtension(
1677
+ name="raw video",
1678
+ extension=".cif",
1679
+ priority=["pyav"],
1680
+ ),
1681
+ FileExtension(
1682
+ name="raw video",
1683
+ extension=".qcif",
1684
+ priority=["pyav"],
1685
+ ),
1686
+ FileExtension(
1687
+ name="raw video",
1688
+ extension=".rgb",
1689
+ priority=["pyav"],
1690
+ ),
1691
+ FileExtension(
1692
+ name="raw video",
1693
+ extension=".yuv",
1694
+ priority=["pyav"],
1695
+ ),
1696
+ FileExtension(
1697
+ name="RealMedia",
1698
+ extension=".rm",
1699
+ priority=["pyav"],
1700
+ ),
1701
+ FileExtension(
1702
+ name="SDR2",
1703
+ extension=".sdr2",
1704
+ priority=["pyav"],
1705
+ ),
1706
+ FileExtension(
1707
+ name="Sega FILM / CPK",
1708
+ extension=".cpk",
1709
+ priority=["pyav"],
1710
+ ),
1711
+ FileExtension(
1712
+ name="SER (Simple uncompressed video format for astronomical capturing)",
1713
+ extension=".ser",
1714
+ priority=["pyav"],
1715
+ ),
1716
+ FileExtension(
1717
+ name="Simbiosis Interactive IMX",
1718
+ extension=".imx",
1719
+ priority=["pyav"],
1720
+ ),
1721
+ FileExtension(
1722
+ name="Square SVS",
1723
+ extension=".svs",
1724
+ priority=["tifffile", "pyav"],
1725
+ ),
1726
+ FileExtension(
1727
+ name="TiVo TY Stream",
1728
+ extension=".ty",
1729
+ priority=["pyav"],
1730
+ ),
1731
+ FileExtension(
1732
+ name="TiVo TY Stream",
1733
+ extension=".ty+",
1734
+ priority=["pyav"],
1735
+ ),
1736
+ FileExtension(
1737
+ name="Uncompressed 4:2:2 10-bit",
1738
+ extension=".v210",
1739
+ priority=["pyav"],
1740
+ ),
1741
+ FileExtension(
1742
+ name="Uncompressed 4:2:2 10-bit",
1743
+ extension=".yuv10",
1744
+ priority=["pyav"],
1745
+ ),
1746
+ FileExtension(
1747
+ name="VC-1 test bitstream",
1748
+ extension=".rcv",
1749
+ priority=["pyav"],
1750
+ ),
1751
+ FileExtension(
1752
+ name="Video CCTV DAT",
1753
+ extension=".dat",
1754
+ priority=["pyav"],
1755
+ ),
1756
+ FileExtension(
1757
+ name="Video DAV",
1758
+ extension=".dav",
1759
+ priority=["pyav"],
1760
+ ),
1761
+ FileExtension(
1762
+ name="Vivo",
1763
+ extension=".viv",
1764
+ priority=["pyav"],
1765
+ ),
1766
+ FileExtension(
1767
+ name="WebM Chunk Muxer",
1768
+ extension=".chk",
1769
+ priority=["pyav"],
1770
+ ),
1771
+ FileExtension(
1772
+ name="WebM",
1773
+ extension=".mk3d",
1774
+ priority=["pyav"],
1775
+ ),
1776
+ FileExtension(
1777
+ name="WebM",
1778
+ extension=".mka",
1779
+ priority=["pyav"],
1780
+ ),
1781
+ FileExtension(
1782
+ name="WebM",
1783
+ extension=".mks",
1784
+ priority=["pyav"],
1785
+ ),
1786
+ FileExtension(
1787
+ name="Windows Television (WTV)",
1788
+ extension=".wtv",
1789
+ priority=["pyav"],
1790
+ ),
1791
+ FileExtension(
1792
+ name="Xilam DERF",
1793
+ extension=".adp",
1794
+ priority=["pyav"],
1795
+ ),
1796
+ FileExtension(
1797
+ name="YUV4MPEG pipe",
1798
+ extension=".y4m",
1799
+ priority=["pyav"],
1800
+ ),
1801
+ FileExtension(
1802
+ extension=".qpi",
1803
+ priority=["tifffile"],
1804
+ ),
1805
+ FileExtension(
1806
+ name="PCO Camera",
1807
+ extension=".pcoraw",
1808
+ priority=["tifffile"],
1809
+ ),
1810
+ FileExtension(
1811
+ name="PCO Camera",
1812
+ extension=".rec",
1813
+ priority=["tifffile"],
1814
+ ),
1815
+ FileExtension(
1816
+ name="Perkin Elmer Vectra",
1817
+ extension=".qptiff",
1818
+ priority=["tifffile"],
1819
+ ),
1820
+ FileExtension(
1821
+ name="Pyramid Encoded TIFF",
1822
+ extension=".ptiff",
1823
+ priority=["tifffile"],
1824
+ ),
1825
+ FileExtension(
1826
+ name="Pyramid Encoded TIFF",
1827
+ extension=".ptif",
1828
+ priority=["tifffile"],
1829
+ ),
1830
+ FileExtension(
1831
+ name="Opticks Gel",
1832
+ extension=".gel",
1833
+ priority=["tifffile"],
1834
+ ),
1835
+ FileExtension(
1836
+ name="Zoomify Image Format",
1837
+ extension=".zif",
1838
+ priority=["tifffile"],
1839
+ ),
1840
+ FileExtension(
1841
+ name="Hamamatsu Slide Scanner",
1842
+ extension=".ndpi",
1843
+ priority=["tifffile"],
1844
+ ),
1845
+ FileExtension(
1846
+ name="Roche Digital Pathology",
1847
+ extension=".bif",
1848
+ priority=["tifffile"],
1849
+ ),
1850
+ FileExtension(
1851
+ extension=".tf8",
1852
+ priority=["tifffile"],
1853
+ ),
1854
+ FileExtension(
1855
+ extension=".btf",
1856
+ priority=["tifffile"],
1857
+ ),
1858
+ FileExtension(
1859
+ name="High Efficiency Image File Format",
1860
+ extension=".heic",
1861
+ priority=["pillow"],
1862
+ ),
1863
+ FileExtension(
1864
+ name="AV1 Image File Format",
1865
+ extension=".avif",
1866
+ priority=["pillow"],
1867
+ ),
1868
+ ]
1869
+ extension_list.sort(key=lambda x: x.extension)
1870
+
1871
+
1872
+ known_extensions = dict()
1873
+ for ext in extension_list:
1874
+ if ext.extension not in known_extensions:
1875
+ known_extensions[ext.extension] = list()
1876
+ known_extensions[ext.extension].append(ext)
1877
+
1878
+ extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list]
1879
+
1880
+ _video_extension_strings = [
1881
+ ".264",
1882
+ ".265",
1883
+ ".3g2",
1884
+ ".3gp",
1885
+ ".a64",
1886
+ ".A64",
1887
+ ".adp",
1888
+ ".amr",
1889
+ ".amv",
1890
+ ".asf",
1891
+ ".avc",
1892
+ ".avi",
1893
+ ".avr",
1894
+ ".avs",
1895
+ ".avs2",
1896
+ ".avs3",
1897
+ ".bmv",
1898
+ ".cavs",
1899
+ ".cdg",
1900
+ ".cdxl",
1901
+ ".cgi",
1902
+ ".chk",
1903
+ ".cif",
1904
+ ".cpk",
1905
+ ".dat",
1906
+ ".dav",
1907
+ ".dif",
1908
+ ".dnxhd",
1909
+ ".dnxhr",
1910
+ ".drc",
1911
+ ".dv",
1912
+ ".dvd",
1913
+ ".f4v",
1914
+ ".flm",
1915
+ ".flv",
1916
+ ".gsm",
1917
+ ".gxf",
1918
+ ".h261",
1919
+ ".h263",
1920
+ ".h264",
1921
+ ".h265",
1922
+ ".h26l",
1923
+ ".hevc",
1924
+ ".idf",
1925
+ ".ifv",
1926
+ ".imx",
1927
+ ".ipu",
1928
+ ".ism",
1929
+ ".isma",
1930
+ ".ismv",
1931
+ ".ivf",
1932
+ ".ivr",
1933
+ ".j2k",
1934
+ ".kux",
1935
+ ".lvf",
1936
+ ".m1v",
1937
+ ".m2t",
1938
+ ".m2ts",
1939
+ ".m2v",
1940
+ ".m4a",
1941
+ ".m4b",
1942
+ ".m4v",
1943
+ ".mj2",
1944
+ ".mjpeg",
1945
+ ".mjpg",
1946
+ ".mk3d",
1947
+ ".mka",
1948
+ ".mks",
1949
+ ".mkv",
1950
+ ".mods",
1951
+ ".moflex",
1952
+ ".mov",
1953
+ ".mp4",
1954
+ ".mpc",
1955
+ ".mpd",
1956
+ ".mpeg",
1957
+ ".mpg",
1958
+ ".mpo",
1959
+ ".mts",
1960
+ ".mvi",
1961
+ ".mxf",
1962
+ ".mxg",
1963
+ ".nut",
1964
+ ".obu",
1965
+ ".ogg",
1966
+ ".ogv",
1967
+ ".psp",
1968
+ ".qcif",
1969
+ ".rcv",
1970
+ ".rgb",
1971
+ ".rm",
1972
+ ".roq",
1973
+ ".sdr2",
1974
+ ".ser",
1975
+ ".sga",
1976
+ ".svag",
1977
+ ".svs",
1978
+ ".ts",
1979
+ ".ty",
1980
+ ".ty+",
1981
+ ".v",
1982
+ ".v210",
1983
+ ".vb",
1984
+ ".vc1",
1985
+ ".vc2",
1986
+ ".viv",
1987
+ ".vob",
1988
+ ".webm",
1989
+ ".wmv",
1990
+ ".wtv",
1991
+ ".xl",
1992
+ ".xmv",
1993
+ ".y4m",
1994
+ ".yop",
1995
+ ".yuv",
1996
+ ".yuv10",
1997
+ ]
1998
+ video_extensions = list()
1999
+ for ext_string in _video_extension_strings:
2000
+ formats = known_extensions[ext_string]
2001
+ video_extensions.append(formats[0])
2002
+ video_extensions.sort(key=lambda x: x.extension)
evalkit_cambrian/lib/python3.10/site-packages/imageio/config/plugins.py ADDED
@@ -0,0 +1,782 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ from ..core.legacy_plugin_wrapper import LegacyPlugin
4
+
5
+
6
+ class PluginConfig:
7
+ """Plugin Configuration Metadata
8
+
9
+ This class holds the information needed to lazy-import plugins.
10
+
11
+ Parameters
12
+ ----------
13
+ name : str
14
+ The name of the plugin.
15
+ class_name : str
16
+ The name of the plugin class inside the plugin module.
17
+ module_name : str
18
+ The name of the module/package from which to import the plugin.
19
+ is_legacy : bool
20
+ If True, this plugin is a v2 plugin and will be wrapped in a
21
+ LegacyPlugin. Default: False.
22
+ package_name : str
23
+ If the given module name points to a relative module, then the package
24
+ name determines the package it is relative to.
25
+ install_name : str
26
+ The name of the optional dependency that can be used to install this
27
+ plugin if it is missing.
28
+ legacy_args : Dict
29
+ A dictionary of kwargs to pass to the v2 plugin (Format) upon construction.
30
+
31
+ Examples
32
+ --------
33
+ >>> PluginConfig(
34
+ name="TIFF",
35
+ class_name="TiffFormat",
36
+ module_name="imageio.plugins.tifffile",
37
+ is_legacy=True,
38
+ install_name="tifffile",
39
+ legacy_args={
40
+ "description": "TIFF format",
41
+ "extensions": ".tif .tiff .stk .lsm",
42
+ "modes": "iIvV",
43
+ },
44
+ )
45
+ >>> PluginConfig(
46
+ name="pillow",
47
+ class_name="PillowPlugin",
48
+ module_name="imageio.plugins.pillow"
49
+ )
50
+
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ name,
56
+ class_name,
57
+ module_name,
58
+ *,
59
+ is_legacy=False,
60
+ package_name=None,
61
+ install_name=None,
62
+ legacy_args=None,
63
+ ):
64
+ legacy_args = legacy_args or dict()
65
+
66
+ self.name = name
67
+ self.class_name = class_name
68
+ self.module_name = module_name
69
+ self.package_name = package_name
70
+
71
+ self.is_legacy = is_legacy
72
+ self.install_name = install_name or self.name
73
+ self.legacy_args = {"name": name, "description": "A legacy plugin"}
74
+ self.legacy_args.update(legacy_args)
75
+
76
+ @property
77
+ def format(self):
78
+ """For backwards compatibility with FormatManager
79
+
80
+ Delete when migrating to v3
81
+ """
82
+ if not self.is_legacy:
83
+ raise RuntimeError("Can only get format for legacy plugins.")
84
+
85
+ module = importlib.import_module(self.module_name, self.package_name)
86
+ clazz = getattr(module, self.class_name)
87
+ return clazz(**self.legacy_args)
88
+
89
+ @property
90
+ def plugin_class(self):
91
+ """Get the plugin class (import if needed)
92
+
93
+ Returns
94
+ -------
95
+ plugin_class : Any
96
+ The class that can be used to instantiate plugins.
97
+
98
+ """
99
+
100
+ module = importlib.import_module(self.module_name, self.package_name)
101
+ clazz = getattr(module, self.class_name)
102
+
103
+ if self.is_legacy:
104
+ legacy_plugin = clazz(**self.legacy_args)
105
+
106
+ def partial_legacy_plugin(request):
107
+ return LegacyPlugin(request, legacy_plugin)
108
+
109
+ clazz = partial_legacy_plugin
110
+
111
+ return clazz
112
+
113
+
114
+ known_plugins = dict()
115
+ known_plugins["pillow"] = PluginConfig(
116
+ name="pillow", class_name="PillowPlugin", module_name="imageio.plugins.pillow"
117
+ )
118
+ known_plugins["pyav"] = PluginConfig(
119
+ name="pyav", class_name="PyAVPlugin", module_name="imageio.plugins.pyav"
120
+ )
121
+ known_plugins["opencv"] = PluginConfig(
122
+ name="opencv", class_name="OpenCVPlugin", module_name="imageio.plugins.opencv"
123
+ )
124
+ known_plugins["tifffile"] = PluginConfig(
125
+ name="tifffile",
126
+ class_name="TifffilePlugin",
127
+ module_name="imageio.plugins.tifffile_v3",
128
+ )
129
+ known_plugins["SPE"] = PluginConfig(
130
+ name="spe", class_name="SpePlugin", module_name="imageio.plugins.spe"
131
+ )
132
+ known_plugins["rawpy"] = PluginConfig(
133
+ name="rawpy", class_name="RawPyPlugin", module_name="imageio.plugins.rawpy"
134
+ )
135
+
136
+ # Legacy plugins
137
+ # ==============
138
+ #
139
+ # Which are partly registered by format, partly by plugin, and partly by a mix
140
+ # of both. We keep the naming here for backwards compatibility.
141
+ # In v3 this should become a single entry per plugin named after the plugin
142
+ # We can choose extension-specific priority in ``config.extensions``.
143
+ #
144
+ # Note: Since python 3.7 order of insertion determines the order of dict().keys()
145
+ # This means that the order here determines the order by which plugins are
146
+ # checked during the full fallback search. We don't advertise this downstream,
147
+ # but it could be a useful thing to keep in mind to choose a sensible default
148
+ # search order.
149
+
150
+ known_plugins["TIFF"] = PluginConfig(
151
+ name="TIFF",
152
+ class_name="TiffFormat",
153
+ module_name="imageio.plugins.tifffile",
154
+ is_legacy=True,
155
+ install_name="tifffile",
156
+ legacy_args={
157
+ "description": "TIFF format",
158
+ "extensions": ".tif .tiff .stk .lsm",
159
+ "modes": "iIvV",
160
+ },
161
+ )
162
+
163
+ # PILLOW plugin formats (legacy)
164
+ PILLOW_FORMATS = [
165
+ ("BMP", "Windows Bitmap", ".bmp", "PillowFormat"),
166
+ ("BUFR", "BUFR", ".bufr", "PillowFormat"),
167
+ ("CUR", "Windows Cursor", ".cur", "PillowFormat"),
168
+ ("DCX", "Intel DCX", ".dcx", "PillowFormat"),
169
+ ("DDS", "DirectDraw Surface", ".dds", "PillowFormat"),
170
+ ("DIB", "Windows Bitmap", "", "PillowFormat"),
171
+ ("EPS", "Encapsulated Postscript", ".ps .eps", "PillowFormat"),
172
+ ("FITS", "FITS", ".fit .fits", "PillowFormat"),
173
+ ("FLI", "Autodesk FLI/FLC Animation", ".fli .flc", "PillowFormat"),
174
+ ("FPX", "FlashPix", ".fpx", "PillowFormat"),
175
+ ("FTEX", "Texture File Format (IW2:EOC)", ".ftc .ftu", "PillowFormat"),
176
+ ("GBR", "GIMP brush file", ".gbr", "PillowFormat"),
177
+ ("GIF", "Compuserve GIF", ".gif", "GIFFormat"),
178
+ ("GRIB", "GRIB", ".grib", "PillowFormat"),
179
+ ("HDF5", "HDF5", ".h5 .hdf", "PillowFormat"),
180
+ ("ICNS", "Mac OS icns resource", ".icns", "PillowFormat"),
181
+ ("ICO", "Windows Icon", ".ico", "PillowFormat"),
182
+ ("IM", "IFUNC Image Memory", ".im", "PillowFormat"),
183
+ ("IMT", "IM Tools", "", "PillowFormat"),
184
+ ("IPTC", "IPTC/NAA", ".iim", "PillowFormat"),
185
+ ("JPEG", "JPEG (ISO 10918)", ".jfif .jpe .jpg .jpeg", "JPEGFormat"),
186
+ (
187
+ "JPEG2000",
188
+ "JPEG 2000 (ISO 15444)",
189
+ ".jp2 .j2k .jpc .jpf .jpx .j2c",
190
+ "JPEG2000Format",
191
+ ),
192
+ ("MCIDAS", "McIdas area file", "", "PillowFormat"),
193
+ ("MIC", "Microsoft Image Composer", ".mic", "PillowFormat"),
194
+ # skipped in legacy pillow
195
+ # ("MPEG", "MPEG", ".mpg .mpeg", "PillowFormat"),
196
+ ("MPO", "MPO (CIPA DC-007)", ".mpo", "PillowFormat"),
197
+ ("MSP", "Windows Paint", ".msp", "PillowFormat"),
198
+ ("PCD", "Kodak PhotoCD", ".pcd", "PillowFormat"),
199
+ ("PCX", "Paintbrush", ".pcx", "PillowFormat"),
200
+ ("PIXAR", "PIXAR raster image", ".pxr", "PillowFormat"),
201
+ ("PNG", "Portable network graphics", ".png", "PNGFormat"),
202
+ ("PPM", "Pbmplus image", ".pbm .pgm .ppm", "PillowFormat"),
203
+ ("PSD", "Adobe Photoshop", ".psd", "PillowFormat"),
204
+ ("SGI", "SGI Image File Format", ".bw .rgb .rgba .sgi", "PillowFormat"),
205
+ ("SPIDER", "Spider 2D image", "", "PillowFormat"),
206
+ ("SUN", "Sun Raster File", ".ras", "PillowFormat"),
207
+ ("TGA", "Targa", ".tga", "PillowFormat"),
208
+ ("TIFF", "Adobe TIFF", ".tif .tiff", "TIFFFormat"),
209
+ ("WMF", "Windows Metafile", ".wmf .emf", "PillowFormat"),
210
+ ("XBM", "X11 Bitmap", ".xbm", "PillowFormat"),
211
+ ("XPM", "X11 Pixel Map", ".xpm", "PillowFormat"),
212
+ ("XVTHUMB", "XV thumbnail image", "", "PillowFormat"),
213
+ ]
214
+ for id, summary, ext, class_name in PILLOW_FORMATS:
215
+ config = PluginConfig(
216
+ name=id.upper() + "-PIL",
217
+ class_name=class_name,
218
+ module_name="imageio.plugins.pillow_legacy",
219
+ is_legacy=True,
220
+ install_name="pillow",
221
+ legacy_args={
222
+ "description": summary + " via Pillow",
223
+ "extensions": ext,
224
+ "modes": "iI" if class_name == "GIFFormat" else "i",
225
+ "plugin_id": id,
226
+ },
227
+ )
228
+ known_plugins[config.name] = config
229
+
230
+ known_plugins["FFMPEG"] = PluginConfig(
231
+ name="FFMPEG",
232
+ class_name="FfmpegFormat",
233
+ module_name="imageio.plugins.ffmpeg",
234
+ is_legacy=True,
235
+ install_name="ffmpeg",
236
+ legacy_args={
237
+ "description": "Many video formats and cameras (via ffmpeg)",
238
+ "extensions": ".mov .avi .mpg .mpeg .mp4 .mkv .webm .wmv .h264",
239
+ "modes": "I",
240
+ },
241
+ )
242
+
243
+ known_plugins["BSDF"] = PluginConfig(
244
+ name="BSDF",
245
+ class_name="BsdfFormat",
246
+ module_name="imageio.plugins.bsdf",
247
+ is_legacy=True,
248
+ install_name="bsdf",
249
+ legacy_args={
250
+ "description": "Format based on the Binary Structured Data Format",
251
+ "extensions": ".bsdf",
252
+ "modes": "iIvV",
253
+ },
254
+ )
255
+
256
+ known_plugins["DICOM"] = PluginConfig(
257
+ name="DICOM",
258
+ class_name="DicomFormat",
259
+ module_name="imageio.plugins.dicom",
260
+ is_legacy=True,
261
+ install_name="dicom",
262
+ legacy_args={
263
+ "description": "Digital Imaging and Communications in Medicine",
264
+ "extensions": ".dcm .ct .mri",
265
+ "modes": "iIvV",
266
+ },
267
+ )
268
+
269
+ known_plugins["FEI"] = PluginConfig(
270
+ name="FEI",
271
+ class_name="FEISEMFormat",
272
+ module_name="imageio.plugins.feisem",
273
+ is_legacy=True,
274
+ install_name="feisem",
275
+ legacy_args={
276
+ "description": "FEI-SEM TIFF format",
277
+ "extensions": [".tif", ".tiff"],
278
+ "modes": "iv",
279
+ },
280
+ )
281
+
282
+ known_plugins["FITS"] = PluginConfig(
283
+ name="FITS",
284
+ class_name="FitsFormat",
285
+ module_name="imageio.plugins.fits",
286
+ is_legacy=True,
287
+ install_name="fits",
288
+ legacy_args={
289
+ "description": "Flexible Image Transport System (FITS) format",
290
+ "extensions": ".fits .fit .fts .fz",
291
+ "modes": "iIvV",
292
+ },
293
+ )
294
+
295
+ known_plugins["GDAL"] = PluginConfig(
296
+ name="GDAL",
297
+ class_name="GdalFormat",
298
+ module_name="imageio.plugins.gdal",
299
+ is_legacy=True,
300
+ install_name="gdal",
301
+ legacy_args={
302
+ "description": "Geospatial Data Abstraction Library",
303
+ "extensions": ".tiff .tif .img .ecw .jpg .jpeg",
304
+ "modes": "iIvV",
305
+ },
306
+ )
307
+
308
+ known_plugins["ITK"] = PluginConfig(
309
+ name="ITK",
310
+ class_name="ItkFormat",
311
+ module_name="imageio.plugins.simpleitk",
312
+ is_legacy=True,
313
+ install_name="simpleitk",
314
+ legacy_args={
315
+ "description": "Insight Segmentation and Registration Toolkit (ITK) format",
316
+ "extensions": " ".join(
317
+ (
318
+ ".gipl",
319
+ ".ipl",
320
+ ".mha",
321
+ ".mhd",
322
+ ".nhdr",
323
+ ".nia",
324
+ ".hdr",
325
+ ".nrrd",
326
+ ".nii",
327
+ ".nii.gz",
328
+ ".img",
329
+ ".img.gz",
330
+ ".vtk",
331
+ ".hdf5",
332
+ ".lsm",
333
+ ".mnc",
334
+ ".mnc2",
335
+ ".mgh",
336
+ ".mnc",
337
+ ".pic",
338
+ ".bmp",
339
+ ".jpeg",
340
+ ".jpg",
341
+ ".png",
342
+ ".tiff",
343
+ ".tif",
344
+ ".dicom",
345
+ ".dcm",
346
+ ".gdcm",
347
+ )
348
+ ),
349
+ "modes": "iIvV",
350
+ },
351
+ )
352
+
353
+ known_plugins["NPZ"] = PluginConfig(
354
+ name="NPZ",
355
+ class_name="NpzFormat",
356
+ module_name="imageio.plugins.npz",
357
+ is_legacy=True,
358
+ install_name="numpy",
359
+ legacy_args={
360
+ "description": "Numpy's compressed array format",
361
+ "extensions": ".npz",
362
+ "modes": "iIvV",
363
+ },
364
+ )
365
+
366
+ known_plugins["SWF"] = PluginConfig(
367
+ name="SWF",
368
+ class_name="SWFFormat",
369
+ module_name="imageio.plugins.swf",
370
+ is_legacy=True,
371
+ install_name="swf",
372
+ legacy_args={
373
+ "description": "Shockwave flash",
374
+ "extensions": ".swf",
375
+ "modes": "I",
376
+ },
377
+ )
378
+
379
+ known_plugins["SCREENGRAB"] = PluginConfig(
380
+ name="SCREENGRAB",
381
+ class_name="ScreenGrabFormat",
382
+ module_name="imageio.plugins.grab",
383
+ is_legacy=True,
384
+ install_name="pillow",
385
+ legacy_args={
386
+ "description": "Grab screenshots (Windows and OS X only)",
387
+ "extensions": [],
388
+ "modes": "i",
389
+ },
390
+ )
391
+
392
+ known_plugins["CLIPBOARDGRAB"] = PluginConfig(
393
+ name="CLIPBOARDGRAB",
394
+ class_name="ClipboardGrabFormat",
395
+ module_name="imageio.plugins.grab",
396
+ is_legacy=True,
397
+ install_name="pillow",
398
+ legacy_args={
399
+ "description": "Grab from clipboard (Windows only)",
400
+ "extensions": [],
401
+ "modes": "i",
402
+ },
403
+ )
404
+
405
+ # LYTRO plugin (legacy)
406
+ lytro_formats = [
407
+ ("lytro-lfr", "Lytro Illum lfr image file", ".lfr", "i", "LytroLfrFormat"),
408
+ (
409
+ "lytro-illum-raw",
410
+ "Lytro Illum raw image file",
411
+ ".raw",
412
+ "i",
413
+ "LytroIllumRawFormat",
414
+ ),
415
+ ("lytro-lfp", "Lytro F01 lfp image file", ".lfp", "i", "LytroLfpFormat"),
416
+ ("lytro-f01-raw", "Lytro F01 raw image file", ".raw", "i", "LytroF01RawFormat"),
417
+ ]
418
+ for name, des, ext, mode, class_name in lytro_formats:
419
+ config = PluginConfig(
420
+ name=name.upper(),
421
+ class_name=class_name,
422
+ module_name="imageio.plugins.lytro",
423
+ is_legacy=True,
424
+ install_name="lytro",
425
+ legacy_args={
426
+ "description": des,
427
+ "extensions": ext,
428
+ "modes": mode,
429
+ },
430
+ )
431
+ known_plugins[config.name] = config
432
+
433
+ # FreeImage plugin (legacy)
434
+ FREEIMAGE_FORMATS = [
435
+ (
436
+ "BMP",
437
+ 0,
438
+ "Windows or OS/2 Bitmap",
439
+ ".bmp",
440
+ "i",
441
+ "FreeimageBmpFormat",
442
+ "imageio.plugins.freeimage",
443
+ ),
444
+ (
445
+ "CUT",
446
+ 21,
447
+ "Dr. Halo",
448
+ ".cut",
449
+ "i",
450
+ "FreeimageFormat",
451
+ "imageio.plugins.freeimage",
452
+ ),
453
+ (
454
+ "DDS",
455
+ 24,
456
+ "DirectX Surface",
457
+ ".dds",
458
+ "i",
459
+ "FreeimageFormat",
460
+ "imageio.plugins.freeimage",
461
+ ),
462
+ (
463
+ "EXR",
464
+ 29,
465
+ "ILM OpenEXR",
466
+ ".exr",
467
+ "i",
468
+ "FreeimageFormat",
469
+ "imageio.plugins.freeimage",
470
+ ),
471
+ (
472
+ "G3",
473
+ 27,
474
+ "Raw fax format CCITT G.3",
475
+ ".g3",
476
+ "i",
477
+ "FreeimageFormat",
478
+ "imageio.plugins.freeimage",
479
+ ),
480
+ (
481
+ "GIF",
482
+ 25,
483
+ "Static and animated gif (FreeImage)",
484
+ ".gif",
485
+ "iI",
486
+ "GifFormat",
487
+ "imageio.plugins.freeimagemulti",
488
+ ),
489
+ (
490
+ "HDR",
491
+ 26,
492
+ "High Dynamic Range Image",
493
+ ".hdr",
494
+ "i",
495
+ "FreeimageFormat",
496
+ "imageio.plugins.freeimage",
497
+ ),
498
+ (
499
+ "ICO",
500
+ 1,
501
+ "Windows Icon",
502
+ ".ico",
503
+ "iI",
504
+ "IcoFormat",
505
+ "imageio.plugins.freeimagemulti",
506
+ ),
507
+ (
508
+ "IFF",
509
+ 5,
510
+ "IFF Interleaved Bitmap",
511
+ ".iff .lbm",
512
+ "i",
513
+ "FreeimageFormat",
514
+ "imageio.plugins.freeimage",
515
+ ),
516
+ (
517
+ "J2K",
518
+ 30,
519
+ "JPEG-2000 codestream",
520
+ ".j2k .j2c",
521
+ "i",
522
+ "FreeimageFormat",
523
+ "imageio.plugins.freeimage",
524
+ ),
525
+ (
526
+ "JNG",
527
+ 3,
528
+ "JPEG Network Graphics",
529
+ ".jng",
530
+ "i",
531
+ "FreeimageFormat",
532
+ "imageio.plugins.freeimage",
533
+ ),
534
+ (
535
+ "JP2",
536
+ 31,
537
+ "JPEG-2000 File Format",
538
+ ".jp2",
539
+ "i",
540
+ "FreeimageFormat",
541
+ "imageio.plugins.freeimage",
542
+ ),
543
+ (
544
+ "JPEG",
545
+ 2,
546
+ "JPEG - JFIF Compliant",
547
+ ".jpg .jif .jpeg .jpe",
548
+ "i",
549
+ "FreeimageJpegFormat",
550
+ "imageio.plugins.freeimage",
551
+ ),
552
+ (
553
+ "JPEG-XR",
554
+ 36,
555
+ "JPEG XR image format",
556
+ ".jxr .wdp .hdp",
557
+ "i",
558
+ "FreeimageFormat",
559
+ "imageio.plugins.freeimage",
560
+ ),
561
+ (
562
+ "KOALA",
563
+ 4,
564
+ "C64 Koala Graphics",
565
+ ".koa",
566
+ "i",
567
+ "FreeimageFormat",
568
+ "imageio.plugins.freeimage",
569
+ ),
570
+ # not registered in legacy pillow
571
+ # ("MNG", 6, "Multiple-image Network Graphics", ".mng", "i", "FreeimageFormat", "imageio.plugins.freeimage"),
572
+ (
573
+ "PBM",
574
+ 7,
575
+ "Portable Bitmap (ASCII)",
576
+ ".pbm",
577
+ "i",
578
+ "FreeimageFormat",
579
+ "imageio.plugins.freeimage",
580
+ ),
581
+ (
582
+ "PBMRAW",
583
+ 8,
584
+ "Portable Bitmap (RAW)",
585
+ ".pbm",
586
+ "i",
587
+ "FreeimageFormat",
588
+ "imageio.plugins.freeimage",
589
+ ),
590
+ (
591
+ "PCD",
592
+ 9,
593
+ "Kodak PhotoCD",
594
+ ".pcd",
595
+ "i",
596
+ "FreeimageFormat",
597
+ "imageio.plugins.freeimage",
598
+ ),
599
+ (
600
+ "PCX",
601
+ 10,
602
+ "Zsoft Paintbrush",
603
+ ".pcx",
604
+ "i",
605
+ "FreeimageFormat",
606
+ "imageio.plugins.freeimage",
607
+ ),
608
+ (
609
+ "PFM",
610
+ 32,
611
+ "Portable floatmap",
612
+ ".pfm",
613
+ "i",
614
+ "FreeimageFormat",
615
+ "imageio.plugins.freeimage",
616
+ ),
617
+ (
618
+ "PGM",
619
+ 11,
620
+ "Portable Greymap (ASCII)",
621
+ ".pgm",
622
+ "i",
623
+ "FreeimageFormat",
624
+ "imageio.plugins.freeimage",
625
+ ),
626
+ (
627
+ "PGMRAW",
628
+ 12,
629
+ "Portable Greymap (RAW)",
630
+ ".pgm",
631
+ "i",
632
+ "FreeimageFormat",
633
+ "imageio.plugins.freeimage",
634
+ ),
635
+ (
636
+ "PICT",
637
+ 33,
638
+ "Macintosh PICT",
639
+ ".pct .pict .pic",
640
+ "i",
641
+ "FreeimageFormat",
642
+ "imageio.plugins.freeimage",
643
+ ),
644
+ (
645
+ "PNG",
646
+ 13,
647
+ "Portable Network Graphics",
648
+ ".png",
649
+ "i",
650
+ "FreeimagePngFormat",
651
+ "imageio.plugins.freeimage",
652
+ ),
653
+ (
654
+ "PPM",
655
+ 14,
656
+ "Portable Pixelmap (ASCII)",
657
+ ".ppm",
658
+ "i",
659
+ "FreeimagePnmFormat",
660
+ "imageio.plugins.freeimage",
661
+ ),
662
+ (
663
+ "PPMRAW",
664
+ 15,
665
+ "Portable Pixelmap (RAW)",
666
+ ".ppm",
667
+ "i",
668
+ "FreeimagePnmFormat",
669
+ "imageio.plugins.freeimage",
670
+ ),
671
+ (
672
+ "PSD",
673
+ 20,
674
+ "Adobe Photoshop",
675
+ ".psd",
676
+ "i",
677
+ "FreeimageFormat",
678
+ "imageio.plugins.freeimage",
679
+ ),
680
+ (
681
+ "RAS",
682
+ 16,
683
+ "Sun Raster Image",
684
+ ".ras",
685
+ "i",
686
+ "FreeimageFormat",
687
+ "imageio.plugins.freeimage",
688
+ ),
689
+ (
690
+ "RAW",
691
+ 34,
692
+ "RAW camera image",
693
+ ".3fr .arw .bay .bmq .cap .cine .cr2 .crw .cs1 .dc2 "
694
+ ".dcr .drf .dsc .dng .erf .fff .ia .iiq .k25 .kc2 .kdc .mdc .mef .mos .mrw .nef .nrw .orf "
695
+ ".pef .ptx .pxn .qtk .raf .raw .rdc .rw2 .rwl .rwz .sr2 .srf .srw .sti",
696
+ "i",
697
+ "FreeimageFormat",
698
+ "imageio.plugins.freeimage",
699
+ ),
700
+ (
701
+ "SGI",
702
+ 28,
703
+ "SGI Image Format",
704
+ ".sgi .rgb .rgba .bw",
705
+ "i",
706
+ "FreeimageFormat",
707
+ "imageio.plugins.freeimage",
708
+ ),
709
+ (
710
+ "TARGA",
711
+ 17,
712
+ "Truevision Targa",
713
+ ".tga .targa",
714
+ "i",
715
+ "FreeimageFormat",
716
+ "imageio.plugins.freeimage",
717
+ ),
718
+ (
719
+ "TIFF",
720
+ 18,
721
+ "Tagged Image File Format",
722
+ ".tif .tiff",
723
+ "i",
724
+ "FreeimageFormat",
725
+ "imageio.plugins.freeimage",
726
+ ),
727
+ (
728
+ "WBMP",
729
+ 19,
730
+ "Wireless Bitmap",
731
+ ".wap .wbmp .wbm",
732
+ "i",
733
+ "FreeimageFormat",
734
+ "imageio.plugins.freeimage",
735
+ ),
736
+ (
737
+ "WebP",
738
+ 35,
739
+ "Google WebP image format",
740
+ ".webp",
741
+ "i",
742
+ "FreeimageFormat",
743
+ "imageio.plugins.freeimage",
744
+ ),
745
+ (
746
+ "XBM",
747
+ 22,
748
+ "X11 Bitmap Format",
749
+ ".xbm",
750
+ "i",
751
+ "FreeimageFormat",
752
+ "imageio.plugins.freeimage",
753
+ ),
754
+ (
755
+ "XPM",
756
+ 23,
757
+ "X11 Pixmap Format",
758
+ ".xpm",
759
+ "i",
760
+ "FreeimageFormat",
761
+ "imageio.plugins.freeimage",
762
+ ),
763
+ ]
764
+ for name, i, des, ext, mode, class_name, module_name in FREEIMAGE_FORMATS:
765
+ config = PluginConfig(
766
+ name=name.upper() + "-FI",
767
+ class_name=class_name,
768
+ module_name=module_name,
769
+ is_legacy=True,
770
+ install_name="freeimage",
771
+ legacy_args={
772
+ "description": des,
773
+ "extensions": ext,
774
+ "modes": mode,
775
+ "fif": i,
776
+ },
777
+ )
778
+ known_plugins[config.name] = config
779
+
780
+ # exists for backwards compatibility with FormatManager
781
+ # delete in V3
782
+ _original_order = [x for x, config in known_plugins.items() if config.is_legacy]
evalkit_cambrian/lib/python3.10/site-packages/imageio/freeze.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper functions for freezing imageio.
3
+ """
4
+
5
+
6
+ def get_includes():
7
+ return ["email", "urllib.request", "numpy", "zipfile", "io"]
8
+
9
+
10
+ def get_excludes():
11
+ return []
evalkit_cambrian/lib/python3.10/site-packages/imageio/plugins/grab.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PIL-based formats to take screenshots and grab from the clipboard.
3
+ """
4
+
5
+ import threading
6
+
7
+ import numpy as np
8
+
9
+ from ..core import Format
10
+
11
+
12
+ class BaseGrabFormat(Format):
13
+ """Base format for grab formats."""
14
+
15
+ _pillow_imported = False
16
+ _ImageGrab = None
17
+
18
+ def __init__(self, *args, **kwargs):
19
+ super(BaseGrabFormat, self).__init__(*args, **kwargs)
20
+ self._lock = threading.RLock()
21
+
22
+ def _can_write(self, request):
23
+ return False
24
+
25
+ def _init_pillow(self):
26
+ with self._lock:
27
+ if not self._pillow_imported:
28
+ self._pillow_imported = True # more like tried to import
29
+ import PIL
30
+
31
+ if not hasattr(PIL, "__version__"): # pragma: no cover
32
+ raise ImportError("Imageio Pillow requires " "Pillow, not PIL!")
33
+ try:
34
+ from PIL import ImageGrab
35
+ except ImportError:
36
+ return None
37
+ self._ImageGrab = ImageGrab
38
+ return self._ImageGrab
39
+
40
+ class Reader(Format.Reader):
41
+ def _open(self):
42
+ pass
43
+
44
+ def _close(self):
45
+ pass
46
+
47
+ def _get_data(self, index):
48
+ return self.format._get_data(index)
49
+
50
+
51
+ class ScreenGrabFormat(BaseGrabFormat):
52
+ """The ScreenGrabFormat provided a means to grab screenshots using
53
+ the uri of "<screen>".
54
+
55
+ This functionality is provided via Pillow. Note that "<screen>" is
56
+ only supported on Windows and OS X.
57
+
58
+ Parameters for reading
59
+ ----------------------
60
+ No parameters.
61
+ """
62
+
63
+ def _can_read(self, request):
64
+ if request.filename != "<screen>":
65
+ return False
66
+ return bool(self._init_pillow())
67
+
68
+ def _get_data(self, index):
69
+ ImageGrab = self._init_pillow()
70
+ assert ImageGrab
71
+
72
+ pil_im = ImageGrab.grab()
73
+ assert pil_im is not None
74
+ im = np.asarray(pil_im)
75
+ return im, {}
76
+
77
+
78
+ class ClipboardGrabFormat(BaseGrabFormat):
79
+ """The ClipboardGrabFormat provided a means to grab image data from
80
+ the clipboard, using the uri "<clipboard>"
81
+
82
+ This functionality is provided via Pillow. Note that "<clipboard>" is
83
+ only supported on Windows.
84
+
85
+ Parameters for reading
86
+ ----------------------
87
+ No parameters.
88
+ """
89
+
90
+ def _can_read(self, request):
91
+ if request.filename != "<clipboard>":
92
+ return False
93
+ return bool(self._init_pillow())
94
+
95
+ def _get_data(self, index):
96
+ ImageGrab = self._init_pillow()
97
+ assert ImageGrab
98
+
99
+ pil_im = ImageGrab.grabclipboard()
100
+ if pil_im is None:
101
+ raise RuntimeError(
102
+ "There seems to be no image data on the " "clipboard now."
103
+ )
104
+ im = np.asarray(pil_im)
105
+ return im, {}
evalkit_cambrian/lib/python3.10/site-packages/imageio/py.typed ADDED
File without changes
evalkit_cambrian/lib/python3.10/site-packages/ninja-1.11.1.3.dist-info/licenses/LICENSE_Apache_20 ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction, and
10
+ distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by the copyright
13
+ owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all other entities
16
+ that control, are controlled by, or are under common control with that entity.
17
+ For the purposes of this definition, "control" means (i) the power, direct or
18
+ indirect, to cause the direction or management of such entity, whether by
19
+ contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
20
+ outstanding shares, or (iii) beneficial ownership of such entity.
21
+
22
+ "You" (or "Your") shall mean an individual or Legal Entity exercising
23
+ permissions granted by this License.
24
+
25
+ "Source" form shall mean the preferred form for making modifications, including
26
+ but not limited to software source code, documentation source, and configuration
27
+ files.
28
+
29
+ "Object" form shall mean any form resulting from mechanical transformation or
30
+ translation of a Source form, including but not limited to compiled object code,
31
+ generated documentation, and conversions to other media types.
32
+
33
+ "Work" shall mean the work of authorship, whether in Source or Object form, made
34
+ available under the License, as indicated by a copyright notice that is included
35
+ in or attached to the work (an example is provided in the Appendix below).
36
+
37
+ "Derivative Works" shall mean any work, whether in Source or Object form, that
38
+ is based on (or derived from) the Work and for which the editorial revisions,
39
+ annotations, elaborations, or other modifications represent, as a whole, an
40
+ original work of authorship. For the purposes of this License, Derivative Works
41
+ shall not include works that remain separable from, or merely link (or bind by
42
+ name) to the interfaces of, the Work and Derivative Works thereof.
43
+
44
+ "Contribution" shall mean any work of authorship, including the original version
45
+ of the Work and any modifications or additions to that Work or Derivative Works
46
+ thereof, that is intentionally submitted to Licensor for inclusion in the Work
47
+ by the copyright owner or by an individual or Legal Entity authorized to submit
48
+ on behalf of the copyright owner. For the purposes of this definition,
49
+ "submitted" means any form of electronic, verbal, or written communication sent
50
+ to the Licensor or its representatives, including but not limited to
51
+ communication on electronic mailing lists, source code control systems, and
52
+ issue tracking systems that are managed by, or on behalf of, the Licensor for
53
+ the purpose of discussing and improving the Work, but excluding communication
54
+ that is conspicuously marked or otherwise designated in writing by the copyright
55
+ owner as "Not a Contribution."
56
+
57
+ "Contributor" shall mean Licensor and any individual or Legal Entity on behalf
58
+ of whom a Contribution has been received by Licensor and subsequently
59
+ incorporated within the Work.
60
+
61
+ 2. Grant of Copyright License.
62
+
63
+ Subject to the terms and conditions of this License, each Contributor hereby
64
+ grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
65
+ irrevocable copyright license to reproduce, prepare Derivative Works of,
66
+ publicly display, publicly perform, sublicense, and distribute the Work and such
67
+ Derivative Works in Source or Object form.
68
+
69
+ 3. Grant of Patent License.
70
+
71
+ Subject to the terms and conditions of this License, each Contributor hereby
72
+ grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
73
+ irrevocable (except as stated in this section) patent license to make, have
74
+ made, use, offer to sell, sell, import, and otherwise transfer the Work, where
75
+ such license applies only to those patent claims licensable by such Contributor
76
+ that are necessarily infringed by their Contribution(s) alone or by combination
77
+ of their Contribution(s) with the Work to which such Contribution(s) was
78
+ submitted. If You institute patent litigation against any entity (including a
79
+ cross-claim or counterclaim in a lawsuit) alleging that the Work or a
80
+ Contribution incorporated within the Work constitutes direct or contributory
81
+ patent infringement, then any patent licenses granted to You under this License
82
+ for that Work shall terminate as of the date such litigation is filed.
83
+
84
+ 4. Redistribution.
85
+
86
+ You may reproduce and distribute copies of the Work or Derivative Works thereof
87
+ in any medium, with or without modifications, and in Source or Object form,
88
+ provided that You meet the following conditions:
89
+
90
+ You must give any other recipients of the Work or Derivative Works a copy of
91
+ this License; and
92
+ You must cause any modified files to carry prominent notices stating that You
93
+ changed the files; and
94
+ You must retain, in the Source form of any Derivative Works that You distribute,
95
+ all copyright, patent, trademark, and attribution notices from the Source form
96
+ of the Work, excluding those notices that do not pertain to any part of the
97
+ Derivative Works; and
98
+ If the Work includes a "NOTICE" text file as part of its distribution, then any
99
+ Derivative Works that You distribute must include a readable copy of the
100
+ attribution notices contained within such NOTICE file, excluding those notices
101
+ that do not pertain to any part of the Derivative Works, in at least one of the
102
+ following places: within a NOTICE text file distributed as part of the
103
+ Derivative Works; within the Source form or documentation, if provided along
104
+ with the Derivative Works; or, within a display generated by the Derivative
105
+ Works, if and wherever such third-party notices normally appear. The contents of
106
+ the NOTICE file are for informational purposes only and do not modify the
107
+ License. You may add Your own attribution notices within Derivative Works that
108
+ You distribute, alongside or as an addendum to the NOTICE text from the Work,
109
+ provided that such additional attribution notices cannot be construed as
110
+ modifying the License.
111
+ You may add Your own copyright statement to Your modifications and may provide
112
+ additional or different license terms and conditions for use, reproduction, or
113
+ distribution of Your modifications, or for any such Derivative Works as a whole,
114
+ provided Your use, reproduction, and distribution of the Work otherwise complies
115
+ with the conditions stated in this License.
116
+
117
+ 5. Submission of Contributions.
118
+
119
+ Unless You explicitly state otherwise, any Contribution intentionally submitted
120
+ for inclusion in the Work by You to the Licensor shall be under the terms and
121
+ conditions of this License, without any additional terms or conditions.
122
+ Notwithstanding the above, nothing herein shall supersede or modify the terms of
123
+ any separate license agreement you may have executed with Licensor regarding
124
+ such Contributions.
125
+
126
+ 6. Trademarks.
127
+
128
+ This License does not grant permission to use the trade names, trademarks,
129
+ service marks, or product names of the Licensor, except as required for
130
+ reasonable and customary use in describing the origin of the Work and
131
+ reproducing the content of the NOTICE file.
132
+
133
+ 7. Disclaimer of Warranty.
134
+
135
+ Unless required by applicable law or agreed to in writing, Licensor provides the
136
+ Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
137
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
138
+ including, without limitation, any warranties or conditions of TITLE,
139
+ NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
140
+ solely responsible for determining the appropriateness of using or
141
+ redistributing the Work and assume any risks associated with Your exercise of
142
+ permissions under this License.
143
+
144
+ 8. Limitation of Liability.
145
+
146
+ In no event and under no legal theory, whether in tort (including negligence),
147
+ contract, or otherwise, unless required by applicable law (such as deliberate
148
+ and grossly negligent acts) or agreed to in writing, shall any Contributor be
149
+ liable to You for damages, including any direct, indirect, special, incidental,
150
+ or consequential damages of any character arising as a result of this License or
151
+ out of the use or inability to use the Work (including but not limited to
152
+ damages for loss of goodwill, work stoppage, computer failure or malfunction, or
153
+ any and all other commercial damages or losses), even if such Contributor has
154
+ been advised of the possibility of such damages.
155
+
156
+ 9. Accepting Warranty or Additional Liability.
157
+
158
+ While redistributing the Work or Derivative Works thereof, You may choose to
159
+ offer, and charge a fee for, acceptance of support, warranty, indemnity, or
160
+ other liability obligations and/or rights consistent with this License. However,
161
+ in accepting such obligations, You may act only on Your own behalf and on Your
162
+ sole responsibility, not on behalf of any other Contributor, and only if You
163
+ agree to indemnify, defend, and hold each Contributor harmless for any liability
164
+ incurred by, or claims asserted against, such Contributor by reason of your
165
+ accepting any such warranty or additional liability.
166
+
167
+ END OF TERMS AND CONDITIONS
168
+
169
+ APPENDIX: How to apply the Apache License to your work
170
+
171
+ To apply the Apache License to your work, attach the following boilerplate
172
+ notice, with the fields enclosed by brackets "[]" replaced with your own
173
+ identifying information. (Don't include the brackets!) The text should be
174
+ enclosed in the appropriate comment syntax for the file format. We also
175
+ recommend that a file or class name and description of purpose be included on
176
+ the same "printed page" as the copyright notice for easier identification within
177
+ third-party archives.
178
+
179
+ Copyright [yyyy] [name of copyright owner]
180
+
181
+ Licensed under the Apache License, Version 2.0 (the "License");
182
+ you may not use this file except in compliance with the License.
183
+ You may obtain a copy of the License at
184
+
185
+ http://www.apache.org/licenses/LICENSE-2.0
186
+
187
+ Unless required by applicable law or agreed to in writing, software
188
+ distributed under the License is distributed on an "AS IS" BASIS,
189
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
190
+ See the License for the specific language governing permissions and
191
+ limitations under the License.
infer_4_47_1/lib/python3.10/site-packages/ray/__pycache__/cluster_utils.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/compat.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/conftest_utils.cpython-310.pyc ADDED
Binary file (634 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/dict.cpython-310.pyc ADDED
Binary file (6.72 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/gcs_pubsub.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/log_monitor.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/logging_utils.cpython-310.pyc ADDED
Binary file (956 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/node.cpython-310.pyc ADDED
Binary file (45.3 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/process_watcher.cpython-310.pyc ADDED
Binary file (5.06 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/protobuf_compat.cpython-310.pyc ADDED
Binary file (1.64 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/ray_experimental_perf.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/serialization.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/state_api_test_utils.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/tls_utils.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/__pycache__/utils.cpython-310.pyc ADDED
Binary file (56.4 kB). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/_private/usage/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/ray/_private/workers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (178 Bytes). View file
 
infer_4_47_1/lib/python3.10/site-packages/ray/data/__init__.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Short term workaround for https://github.com/ray-project/ray/issues/32435
2
+ # Dataset has a hard dependency on pandas, so it doesn't need to be delayed.
3
+ import pandas # noqa
4
+ from packaging.version import parse as parse_version
5
+
6
+ from ray._private.utils import _get_pyarrow_version
7
+ from ray.data._internal.compute import ActorPoolStrategy
8
+ from ray.data._internal.datasource.tfrecords_datasource import TFXReadOptions
9
+ from ray.data._internal.execution.interfaces import (
10
+ ExecutionOptions,
11
+ ExecutionResources,
12
+ NodeIdStr,
13
+ )
14
+ from ray.data._internal.logging import configure_logging
15
+ from ray.data.context import DataContext, DatasetContext
16
+ from ray.data.dataset import Dataset, Schema
17
+ from ray.data.datasource import (
18
+ BlockBasedFileDatasink,
19
+ Datasink,
20
+ Datasource,
21
+ ReadTask,
22
+ RowBasedFileDatasink,
23
+ )
24
+ from ray.data.iterator import DataIterator, DatasetIterator
25
+ from ray.data.preprocessor import Preprocessor
26
+ from ray.data.read_api import ( # noqa: F401
27
+ from_arrow,
28
+ from_arrow_refs,
29
+ from_blocks,
30
+ from_dask,
31
+ from_huggingface,
32
+ from_items,
33
+ from_mars,
34
+ from_modin,
35
+ from_numpy,
36
+ from_numpy_refs,
37
+ from_pandas,
38
+ from_pandas_refs,
39
+ from_spark,
40
+ from_tf,
41
+ from_torch,
42
+ range,
43
+ range_tensor,
44
+ read_avro,
45
+ read_bigquery,
46
+ read_binary_files,
47
+ read_csv,
48
+ read_databricks_tables,
49
+ read_datasource,
50
+ read_delta_sharing_tables,
51
+ read_hudi,
52
+ read_iceberg,
53
+ read_images,
54
+ read_json,
55
+ read_lance,
56
+ read_mongo,
57
+ read_numpy,
58
+ read_parquet,
59
+ read_parquet_bulk,
60
+ read_sql,
61
+ read_text,
62
+ read_tfrecords,
63
+ read_webdataset,
64
+ )
65
+
66
+ # Module-level cached global functions for callable classes. It needs to be defined here
67
+ # since it has to be process-global across cloudpickled funcs.
68
+ _map_actor_context = None
69
+
70
+ configure_logging()
71
+
72
+ try:
73
+ import pyarrow as pa
74
+
75
+ # https://github.com/apache/arrow/pull/38608 deprecated `PyExtensionType`, and
76
+ # disabled it's deserialization by default. To ensure that users can load data
77
+ # written with earlier version of Ray Data, we enable auto-loading of serialized
78
+ # tensor extensions.
79
+ pyarrow_version = _get_pyarrow_version()
80
+ if not isinstance(pyarrow_version, str):
81
+ # PyArrow is mocked in documentation builds. In this case, we don't need to do
82
+ # anything.
83
+ pass
84
+ else:
85
+ from ray._private.ray_constants import env_bool
86
+
87
+ RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE = env_bool(
88
+ "RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE", False
89
+ )
90
+
91
+ if (
92
+ parse_version(pyarrow_version) >= parse_version("14.0.1")
93
+ and RAY_DATA_AUTOLOAD_PYEXTENSIONTYPE
94
+ ):
95
+ pa.PyExtensionType.set_auto_load(True)
96
+ # Import these arrow extension types to ensure that they are registered.
97
+ from ray.air.util.tensor_extensions.arrow import ( # noqa
98
+ ArrowTensorType,
99
+ ArrowVariableShapedTensorType,
100
+ )
101
+ except ModuleNotFoundError:
102
+ pass
103
+
104
+
105
+ __all__ = [
106
+ "ActorPoolStrategy",
107
+ "BlockBasedFileDatasink",
108
+ "Dataset",
109
+ "DataContext",
110
+ "DatasetContext", # Backwards compatibility alias.
111
+ "DataIterator",
112
+ "DatasetIterator", # Backwards compatibility alias.
113
+ "Datasink",
114
+ "Datasource",
115
+ "ExecutionOptions",
116
+ "ExecutionResources",
117
+ "NodeIdStr",
118
+ "ReadTask",
119
+ "RowBasedFileDatasink",
120
+ "Schema",
121
+ "from_dask",
122
+ "from_items",
123
+ "from_arrow",
124
+ "from_arrow_refs",
125
+ "from_mars",
126
+ "from_modin",
127
+ "from_numpy",
128
+ "from_numpy_refs",
129
+ "from_pandas",
130
+ "from_pandas_refs",
131
+ "from_spark",
132
+ "from_tf",
133
+ "from_torch",
134
+ "from_huggingface",
135
+ "range",
136
+ "range_tensor",
137
+ "read_avro",
138
+ "read_text",
139
+ "read_binary_files",
140
+ "read_csv",
141
+ "read_datasource",
142
+ "read_delta_sharing_tables",
143
+ "read_hudi",
144
+ "read_iceberg",
145
+ "read_images",
146
+ "read_json",
147
+ "read_lance",
148
+ "read_numpy",
149
+ "read_mongo",
150
+ "read_parquet",
151
+ "read_parquet_bulk",
152
+ "read_sql",
153
+ "read_tfrecords",
154
+ "read_webdataset",
155
+ "Preprocessor",
156
+ "TFXReadOptions",
157
+ ]
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/__init__.py ADDED
File without changes
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/aggregate.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union
3
+
4
+ from ray.data._internal.null_aggregate import (
5
+ _null_wrap_accumulate_block,
6
+ _null_wrap_accumulate_row,
7
+ _null_wrap_finalize,
8
+ _null_wrap_init,
9
+ _null_wrap_merge,
10
+ )
11
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey
12
+ from ray.data.aggregate import AggregateFn
13
+ from ray.data.block import AggType, Block, BlockAccessor
14
+
15
+ if TYPE_CHECKING:
16
+ import pyarrow as pa
17
+
18
+
19
+ class _AggregateOnKeyBase(AggregateFn):
20
+ def _set_key_fn(self, on: str):
21
+ self._key_fn = on
22
+
23
+ def _validate(self, schema: Optional[Union[type, "pa.lib.Schema"]]) -> None:
24
+ SortKey(self._key_fn).validate_schema(schema)
25
+
26
+
27
+ class Count(AggregateFn):
28
+ """Defines count aggregation."""
29
+
30
+ def __init__(self):
31
+ super().__init__(
32
+ init=lambda k: 0,
33
+ accumulate_block=(
34
+ lambda a, block: a + BlockAccessor.for_block(block).num_rows()
35
+ ),
36
+ merge=lambda a1, a2: a1 + a2,
37
+ name="count()",
38
+ )
39
+
40
+
41
+ class Sum(_AggregateOnKeyBase):
42
+ """Defines sum aggregation."""
43
+
44
+ def __init__(
45
+ self,
46
+ on: Optional[str] = None,
47
+ ignore_nulls: bool = True,
48
+ alias_name: Optional[str] = None,
49
+ ):
50
+ self._set_key_fn(on)
51
+ if alias_name:
52
+ self._rs_name = alias_name
53
+ else:
54
+ self._rs_name = f"sum({str(on)})"
55
+
56
+ null_merge = _null_wrap_merge(ignore_nulls, lambda a1, a2: a1 + a2)
57
+
58
+ super().__init__(
59
+ init=_null_wrap_init(lambda k: 0),
60
+ merge=null_merge,
61
+ accumulate_block=_null_wrap_accumulate_block(
62
+ ignore_nulls,
63
+ lambda block: BlockAccessor.for_block(block).sum(on, ignore_nulls),
64
+ null_merge,
65
+ ),
66
+ finalize=_null_wrap_finalize(lambda a: a),
67
+ name=(self._rs_name),
68
+ )
69
+
70
+
71
+ class Min(_AggregateOnKeyBase):
72
+ """Defines min aggregation."""
73
+
74
+ def __init__(
75
+ self,
76
+ on: Optional[str] = None,
77
+ ignore_nulls: bool = True,
78
+ alias_name: Optional[str] = None,
79
+ ):
80
+ self._set_key_fn(on)
81
+ if alias_name:
82
+ self._rs_name = alias_name
83
+ else:
84
+ self._rs_name = f"min({str(on)})"
85
+
86
+ null_merge = _null_wrap_merge(ignore_nulls, min)
87
+
88
+ super().__init__(
89
+ init=_null_wrap_init(lambda k: float("inf")),
90
+ merge=null_merge,
91
+ accumulate_block=_null_wrap_accumulate_block(
92
+ ignore_nulls,
93
+ lambda block: BlockAccessor.for_block(block).min(on, ignore_nulls),
94
+ null_merge,
95
+ ),
96
+ finalize=_null_wrap_finalize(lambda a: a),
97
+ name=(self._rs_name),
98
+ )
99
+
100
+
101
+ class Max(_AggregateOnKeyBase):
102
+ """Defines max aggregation."""
103
+
104
+ def __init__(
105
+ self,
106
+ on: Optional[str] = None,
107
+ ignore_nulls: bool = True,
108
+ alias_name: Optional[str] = None,
109
+ ):
110
+ self._set_key_fn(on)
111
+ if alias_name:
112
+ self._rs_name = alias_name
113
+ else:
114
+ self._rs_name = f"max({str(on)})"
115
+
116
+ null_merge = _null_wrap_merge(ignore_nulls, max)
117
+
118
+ super().__init__(
119
+ init=_null_wrap_init(lambda k: float("-inf")),
120
+ merge=null_merge,
121
+ accumulate_block=_null_wrap_accumulate_block(
122
+ ignore_nulls,
123
+ lambda block: BlockAccessor.for_block(block).max(on, ignore_nulls),
124
+ null_merge,
125
+ ),
126
+ finalize=_null_wrap_finalize(lambda a: a),
127
+ name=(self._rs_name),
128
+ )
129
+
130
+
131
+ class Mean(_AggregateOnKeyBase):
132
+ """Defines mean aggregation."""
133
+
134
+ def __init__(
135
+ self,
136
+ on: Optional[str] = None,
137
+ ignore_nulls: bool = True,
138
+ alias_name: Optional[str] = None,
139
+ ):
140
+ self._set_key_fn(on)
141
+ if alias_name:
142
+ self._rs_name = alias_name
143
+ else:
144
+ self._rs_name = f"mean({str(on)})"
145
+
146
+ null_merge = _null_wrap_merge(
147
+ ignore_nulls, lambda a1, a2: [a1[0] + a2[0], a1[1] + a2[1]]
148
+ )
149
+
150
+ def vectorized_mean(block: Block) -> AggType:
151
+ block_acc = BlockAccessor.for_block(block)
152
+ count = block_acc.count(on)
153
+ if count == 0 or count is None:
154
+ # Empty or all null.
155
+ return None
156
+ sum_ = block_acc.sum(on, ignore_nulls)
157
+ if sum_ is None:
158
+ # ignore_nulls=False and at least one null.
159
+ return None
160
+ return [sum_, count]
161
+
162
+ super().__init__(
163
+ init=_null_wrap_init(lambda k: [0, 0]),
164
+ merge=null_merge,
165
+ accumulate_block=_null_wrap_accumulate_block(
166
+ ignore_nulls,
167
+ vectorized_mean,
168
+ null_merge,
169
+ ),
170
+ finalize=_null_wrap_finalize(lambda a: a[0] / a[1]),
171
+ name=(self._rs_name),
172
+ )
173
+
174
+
175
+ class Std(_AggregateOnKeyBase):
176
+ """Defines standard deviation aggregation.
177
+
178
+ Uses Welford's online method for an accumulator-style computation of the
179
+ standard deviation. This method was chosen due to its numerical
180
+ stability, and it being computable in a single pass.
181
+ This may give different (but more accurate) results than NumPy, Pandas,
182
+ and sklearn, which use a less numerically stable two-pass algorithm.
183
+ See
184
+ https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
185
+ """
186
+
187
+ def __init__(
188
+ self,
189
+ on: Optional[str] = None,
190
+ ddof: int = 1,
191
+ ignore_nulls: bool = True,
192
+ alias_name: Optional[str] = None,
193
+ ):
194
+ self._set_key_fn(on)
195
+ if alias_name:
196
+ self._rs_name = alias_name
197
+ else:
198
+ self._rs_name = f"std({str(on)})"
199
+
200
+ def merge(a: List[float], b: List[float]):
201
+ # Merges two accumulations into one.
202
+ # See
203
+ # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
204
+ M2_a, mean_a, count_a = a
205
+ M2_b, mean_b, count_b = b
206
+ delta = mean_b - mean_a
207
+ count = count_a + count_b
208
+ # NOTE: We use this mean calculation since it's more numerically
209
+ # stable than mean_a + delta * count_b / count, which actually
210
+ # deviates from Pandas in the ~15th decimal place and causes our
211
+ # exact comparison tests to fail.
212
+ mean = (mean_a * count_a + mean_b * count_b) / count
213
+ # Update the sum of squared differences.
214
+ M2 = M2_a + M2_b + (delta**2) * count_a * count_b / count
215
+ return [M2, mean, count]
216
+
217
+ null_merge = _null_wrap_merge(ignore_nulls, merge)
218
+
219
+ def vectorized_std(block: Block) -> AggType:
220
+ block_acc = BlockAccessor.for_block(block)
221
+ count = block_acc.count(on)
222
+ if count == 0 or count is None:
223
+ # Empty or all null.
224
+ return None
225
+ sum_ = block_acc.sum(on, ignore_nulls)
226
+ if sum_ is None:
227
+ # ignore_nulls=False and at least one null.
228
+ return None
229
+ mean = sum_ / count
230
+ M2 = block_acc.sum_of_squared_diffs_from_mean(on, ignore_nulls, mean)
231
+ return [M2, mean, count]
232
+
233
+ def finalize(a: List[float]):
234
+ # Compute the final standard deviation from the accumulated
235
+ # sum of squared differences from current mean and the count.
236
+ M2, mean, count = a
237
+ if count < 2:
238
+ return 0.0
239
+ return math.sqrt(M2 / (count - ddof))
240
+
241
+ super().__init__(
242
+ init=_null_wrap_init(lambda k: [0, 0, 0]),
243
+ merge=null_merge,
244
+ accumulate_block=_null_wrap_accumulate_block(
245
+ ignore_nulls,
246
+ vectorized_std,
247
+ null_merge,
248
+ ),
249
+ finalize=_null_wrap_finalize(finalize),
250
+ name=(self._rs_name),
251
+ )
252
+
253
+
254
+ class AbsMax(_AggregateOnKeyBase):
255
+ """Defines absolute max aggregation."""
256
+
257
+ def __init__(
258
+ self,
259
+ on: Optional[str] = None,
260
+ ignore_nulls: bool = True,
261
+ alias_name: Optional[str] = None,
262
+ ):
263
+ self._set_key_fn(on)
264
+ on_fn = _to_on_fn(on)
265
+ if alias_name:
266
+ self._rs_name = alias_name
267
+ else:
268
+ self._rs_name = f"abs_max({str(on)})"
269
+
270
+ super().__init__(
271
+ init=_null_wrap_init(lambda k: 0),
272
+ merge=_null_wrap_merge(ignore_nulls, max),
273
+ accumulate_row=_null_wrap_accumulate_row(
274
+ ignore_nulls, on_fn, lambda a, r: max(a, abs(r))
275
+ ),
276
+ finalize=_null_wrap_finalize(lambda a: a),
277
+ name=(self._rs_name),
278
+ )
279
+
280
+
281
+ def _to_on_fn(on: Optional[str]):
282
+ if on is None:
283
+ return lambda r: r
284
+ elif isinstance(on, str):
285
+ return lambda r: r[on]
286
+ else:
287
+ return on
288
+
289
+
290
+ class Quantile(_AggregateOnKeyBase):
291
+ """Defines Quantile aggregation."""
292
+
293
+ def __init__(
294
+ self,
295
+ on: Optional[str] = None,
296
+ q: float = 0.5,
297
+ ignore_nulls: bool = True,
298
+ alias_name: Optional[str] = None,
299
+ ):
300
+ self._set_key_fn(on)
301
+ self._q = q
302
+ if alias_name:
303
+ self._rs_name = alias_name
304
+ else:
305
+ self._rs_name = f"quantile({str(on)})"
306
+
307
+ def merge(a: List[int], b: List[int]):
308
+ if isinstance(a, List) and isinstance(b, List):
309
+ a.extend(b)
310
+ return a
311
+ if isinstance(a, List) and (not isinstance(b, List)):
312
+ if b is not None and b != "":
313
+ a.append(b)
314
+ return a
315
+ if isinstance(b, List) and (not isinstance(a, List)):
316
+ if a is not None and a != "":
317
+ b.append(a)
318
+ return b
319
+
320
+ ls = []
321
+ if a is not None and a != "":
322
+ ls.append(a)
323
+ if b is not None and b != "":
324
+ ls.append(b)
325
+ return ls
326
+
327
+ null_merge = _null_wrap_merge(ignore_nulls, merge)
328
+
329
+ def block_row_ls(block: Block) -> AggType:
330
+ block_acc = BlockAccessor.for_block(block)
331
+ ls = []
332
+ for row in block_acc.iter_rows(public_row_format=False):
333
+ ls.append(row.get(on))
334
+ return ls
335
+
336
+ import math
337
+
338
+ def percentile(input_values, key: Optional[Callable[[Any], Any]] = None):
339
+ if not input_values:
340
+ return None
341
+
342
+ if key is None:
343
+ key = lambda x: x # noqa: E731
344
+
345
+ input_values = sorted(input_values)
346
+ k = (len(input_values) - 1) * self._q
347
+ f = math.floor(k)
348
+ c = math.ceil(k)
349
+ if f == c:
350
+ return key(input_values[int(k)])
351
+ d0 = key(input_values[int(f)]) * (c - k)
352
+ d1 = key(input_values[int(c)]) * (k - f)
353
+ return round(d0 + d1, 5)
354
+
355
+ super().__init__(
356
+ init=_null_wrap_init(lambda k: [0]),
357
+ merge=null_merge,
358
+ accumulate_block=_null_wrap_accumulate_block(
359
+ ignore_nulls,
360
+ block_row_ls,
361
+ null_merge,
362
+ ),
363
+ finalize=_null_wrap_finalize(percentile),
364
+ name=(self._rs_name),
365
+ )
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/arrow_block.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import heapq
3
+ import logging
4
+ import random
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ Any,
8
+ Callable,
9
+ Dict,
10
+ Iterator,
11
+ List,
12
+ Optional,
13
+ Sequence,
14
+ Tuple,
15
+ TypeVar,
16
+ Union,
17
+ )
18
+
19
+ import numpy as np
20
+
21
+ from ray._private.utils import _get_pyarrow_version
22
+ from ray.air.constants import TENSOR_COLUMN_NAME
23
+ from ray.air.util.tensor_extensions.arrow import (
24
+ convert_to_pyarrow_array,
25
+ pyarrow_table_from_pydict,
26
+ )
27
+ from ray.data._internal.arrow_ops import transform_polars, transform_pyarrow
28
+ from ray.data._internal.numpy_support import convert_to_numpy
29
+ from ray.data._internal.row import TableRow
30
+ from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder
31
+ from ray.data._internal.util import NULL_SENTINEL, find_partitions
32
+ from ray.data.block import (
33
+ Block,
34
+ BlockAccessor,
35
+ BlockExecStats,
36
+ BlockMetadata,
37
+ BlockType,
38
+ KeyType,
39
+ U,
40
+ )
41
+ from ray.data.context import DataContext
42
+
43
+ try:
44
+ import pyarrow
45
+ except ImportError:
46
+ pyarrow = None
47
+
48
+
49
+ if TYPE_CHECKING:
50
+ import pandas
51
+
52
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey
53
+ from ray.data.aggregate import AggregateFn
54
+
55
+
56
+ T = TypeVar("T")
57
+ logger = logging.getLogger(__name__)
58
+
59
+
60
+ # We offload some transformations to polars for performance.
61
+ def get_sort_transform(context: DataContext) -> Callable:
62
+ if context.use_polars:
63
+ return transform_polars.sort
64
+ else:
65
+ return transform_pyarrow.sort
66
+
67
+
68
+ def get_concat_and_sort_transform(context: DataContext) -> Callable:
69
+ if context.use_polars:
70
+ return transform_polars.concat_and_sort
71
+ else:
72
+ return transform_pyarrow.concat_and_sort
73
+
74
+
75
+ class ArrowRow(TableRow):
76
+ """
77
+ Row of a tabular Dataset backed by a Arrow Table block.
78
+ """
79
+
80
+ def __getitem__(self, key: Union[str, List[str]]) -> Any:
81
+ from ray.data.extensions import get_arrow_extension_tensor_types
82
+
83
+ tensor_arrow_extension_types = get_arrow_extension_tensor_types()
84
+
85
+ def get_item(keys: List[str]) -> Any:
86
+ schema = self._row.schema
87
+ if isinstance(schema.field(keys[0]).type, tensor_arrow_extension_types):
88
+ # Build a tensor row.
89
+ return tuple(
90
+ [
91
+ ArrowBlockAccessor._build_tensor_row(self._row, col_name=key)
92
+ for key in keys
93
+ ]
94
+ )
95
+
96
+ table = self._row.select(keys)
97
+ if len(table) == 0:
98
+ return None
99
+
100
+ items = [col[0] for col in table.columns]
101
+ try:
102
+ # Try to interpret this as a pyarrow.Scalar value.
103
+ return tuple([item.as_py() for item in items])
104
+
105
+ except AttributeError:
106
+ # Assume that this row is an element of an extension array, and
107
+ # that it is bypassing pyarrow's scalar model for Arrow < 8.0.0.
108
+ return items
109
+
110
+ is_single_item = isinstance(key, str)
111
+ keys = [key] if is_single_item else key
112
+
113
+ items = get_item(keys)
114
+
115
+ if items is None:
116
+ return None
117
+ elif is_single_item:
118
+ return items[0]
119
+ else:
120
+ return items
121
+
122
+ def __iter__(self) -> Iterator:
123
+ for k in self._row.column_names:
124
+ yield k
125
+
126
+ def __len__(self):
127
+ return self._row.num_columns
128
+
129
+
130
+ class ArrowBlockBuilder(TableBlockBuilder):
131
+ def __init__(self):
132
+ if pyarrow is None:
133
+ raise ImportError("Run `pip install pyarrow` for Arrow support")
134
+ super().__init__((pyarrow.Table, bytes))
135
+
136
+ @staticmethod
137
+ def _table_from_pydict(columns: Dict[str, List[Any]]) -> Block:
138
+ pa_cols: Dict[str, pyarrow.Array] = dict()
139
+
140
+ for col_name, col_vals in columns.items():
141
+ np_col_vals = convert_to_numpy(col_vals)
142
+
143
+ pa_cols[col_name] = convert_to_pyarrow_array(np_col_vals, col_name)
144
+
145
+ return pyarrow_table_from_pydict(pa_cols)
146
+
147
+ @staticmethod
148
+ def _concat_tables(tables: List[Block]) -> Block:
149
+ return transform_pyarrow.concat(tables)
150
+
151
+ @staticmethod
152
+ def _concat_would_copy() -> bool:
153
+ return False
154
+
155
+ @staticmethod
156
+ def _empty_table() -> "pyarrow.Table":
157
+ return pyarrow_table_from_pydict({})
158
+
159
+ def block_type(self) -> BlockType:
160
+ return BlockType.ARROW
161
+
162
+
163
+ class ArrowBlockAccessor(TableBlockAccessor):
164
+ ROW_TYPE = ArrowRow
165
+
166
+ def __init__(self, table: "pyarrow.Table"):
167
+ if pyarrow is None:
168
+ raise ImportError("Run `pip install pyarrow` for Arrow support")
169
+ super().__init__(table)
170
+
171
+ def column_names(self) -> List[str]:
172
+ return self._table.column_names
173
+
174
+ def append_column(self, name: str, data: Any) -> Block:
175
+ assert name not in self._table.column_names
176
+
177
+ if any(isinstance(item, np.ndarray) for item in data):
178
+ raise NotImplementedError(
179
+ f"`{self.__class__.__name__}.append_column()` doesn't support "
180
+ "array-like data."
181
+ )
182
+
183
+ return self._table.append_column(name, [data])
184
+
185
+ @classmethod
186
+ def from_bytes(cls, data: bytes) -> "ArrowBlockAccessor":
187
+ reader = pyarrow.ipc.open_stream(data)
188
+ return cls(reader.read_all())
189
+
190
+ @staticmethod
191
+ def _build_tensor_row(
192
+ row: ArrowRow, col_name: str = TENSOR_COLUMN_NAME
193
+ ) -> np.ndarray:
194
+ from packaging.version import parse as parse_version
195
+
196
+ element = row[col_name][0]
197
+ # TODO(Clark): Reduce this to np.asarray(element) once we only support Arrow
198
+ # 9.0.0+.
199
+ pyarrow_version = _get_pyarrow_version()
200
+ if pyarrow_version is not None:
201
+ pyarrow_version = parse_version(pyarrow_version)
202
+ if pyarrow_version is None or pyarrow_version >= parse_version("8.0.0"):
203
+ assert isinstance(element, pyarrow.ExtensionScalar)
204
+ if pyarrow_version is None or pyarrow_version >= parse_version("9.0.0"):
205
+ # For Arrow 9.0.0+, accessing an element in a chunked tensor array
206
+ # produces an ArrowTensorScalar, which we convert to an ndarray using
207
+ # .as_py().
208
+ element = element.as_py()
209
+ else:
210
+ # For Arrow 8.*, accessing an element in a chunked tensor array produces
211
+ # an ExtensionScalar, which we convert to an ndarray using our custom
212
+ # method.
213
+ element = element.type._extension_scalar_to_ndarray(element)
214
+ # For Arrow < 8.0.0, accessing an element in a chunked tensor array produces an
215
+ # ndarray, which we return directly.
216
+ assert isinstance(element, np.ndarray), type(element)
217
+ return element
218
+
219
+ def slice(self, start: int, end: int, copy: bool = False) -> "pyarrow.Table":
220
+ view = self._table.slice(start, end - start)
221
+ if copy:
222
+ view = transform_pyarrow.combine_chunks(view)
223
+ return view
224
+
225
+ def random_shuffle(self, random_seed: Optional[int]) -> "pyarrow.Table":
226
+ # TODO(swang): Creating this np.array index can add a lot of memory
227
+ # pressure when there are a large number of small rows. Investigate
228
+ # random shuffling in place to reduce memory pressure.
229
+ # See https://github.com/ray-project/ray/issues/42146.
230
+ random = np.random.RandomState(random_seed)
231
+ return self.take(random.permutation(self.num_rows()))
232
+
233
+ def schema(self) -> "pyarrow.lib.Schema":
234
+ return self._table.schema
235
+
236
+ def to_pandas(self) -> "pandas.DataFrame":
237
+ from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays
238
+
239
+ df = self._table.to_pandas()
240
+ ctx = DataContext.get_current()
241
+ if ctx.enable_tensor_extension_casting:
242
+ df = _cast_tensor_columns_to_ndarrays(df)
243
+ return df
244
+
245
+ def to_numpy(
246
+ self, columns: Optional[Union[str, List[str]]] = None
247
+ ) -> Union[np.ndarray, Dict[str, np.ndarray]]:
248
+ if columns is None:
249
+ columns = self._table.column_names
250
+ should_be_single_ndarray = False
251
+ elif isinstance(columns, list):
252
+ should_be_single_ndarray = False
253
+ else:
254
+ columns = [columns]
255
+ should_be_single_ndarray = True
256
+
257
+ column_names_set = set(self._table.column_names)
258
+ for column in columns:
259
+ if column not in column_names_set:
260
+ raise ValueError(
261
+ f"Cannot find column {column}, available columns: "
262
+ f"{column_names_set}"
263
+ )
264
+
265
+ column_values_ndarrays = []
266
+
267
+ for col_name in columns:
268
+ col = self._table[col_name]
269
+
270
+ # Combine columnar values arrays to make these contiguous
271
+ # (making them compatible with numpy format)
272
+ combined_array = transform_pyarrow.combine_chunked_array(col)
273
+
274
+ column_values_ndarrays.append(
275
+ transform_pyarrow.to_numpy(combined_array, zero_copy_only=False)
276
+ )
277
+
278
+ if should_be_single_ndarray:
279
+ assert len(columns) == 1
280
+ return column_values_ndarrays[0]
281
+ else:
282
+ return dict(zip(columns, column_values_ndarrays))
283
+
284
+ def to_arrow(self) -> "pyarrow.Table":
285
+ return self._table
286
+
287
+ def num_rows(self) -> int:
288
+ # Arrow may represent an empty table via an N > 0 row, 0-column table, e.g. when
289
+ # slicing an empty table, so we return 0 if num_columns == 0.
290
+ return self._table.num_rows if self._table.num_columns > 0 else 0
291
+
292
+ def size_bytes(self) -> int:
293
+ return self._table.nbytes
294
+
295
+ def _zip(self, acc: BlockAccessor) -> "Block":
296
+ r = self.to_arrow()
297
+ s = acc.to_arrow()
298
+ for col_name in s.column_names:
299
+ col = s.column(col_name)
300
+ # Ensure the column names are unique after zip.
301
+ if col_name in r.column_names:
302
+ i = 1
303
+ new_name = col_name
304
+ while new_name in r.column_names:
305
+ new_name = "{}_{}".format(col_name, i)
306
+ i += 1
307
+ col_name = new_name
308
+ r = r.append_column(col_name, col)
309
+ return r
310
+
311
+ @staticmethod
312
+ def builder() -> ArrowBlockBuilder:
313
+ return ArrowBlockBuilder()
314
+
315
+ @staticmethod
316
+ def _empty_table() -> "pyarrow.Table":
317
+ return ArrowBlockBuilder._empty_table()
318
+
319
+ def take(
320
+ self,
321
+ indices: Union[List[int], "pyarrow.Array", "pyarrow.ChunkedArray"],
322
+ ) -> "pyarrow.Table":
323
+ """Select rows from the underlying table.
324
+
325
+ This method is an alternative to pyarrow.Table.take(), which breaks for
326
+ extension arrays.
327
+ """
328
+ return transform_pyarrow.take_table(self._table, indices)
329
+
330
+ def select(self, columns: List[str]) -> "pyarrow.Table":
331
+ if not all(isinstance(col, str) for col in columns):
332
+ raise ValueError(
333
+ "Columns must be a list of column name strings when aggregating on "
334
+ f"Arrow blocks, but got: {columns}."
335
+ )
336
+ return self._table.select(columns)
337
+
338
+ def _sample(self, n_samples: int, sort_key: "SortKey") -> "pyarrow.Table":
339
+ indices = random.sample(range(self._table.num_rows), n_samples)
340
+ table = self._table.select(sort_key.get_columns())
341
+ return transform_pyarrow.take_table(table, indices)
342
+
343
+ def count(self, on: str) -> Optional[U]:
344
+ """Count the number of non-null values in the provided column."""
345
+ import pyarrow.compute as pac
346
+
347
+ if not isinstance(on, str):
348
+ raise ValueError(
349
+ "on must be a string when aggregating on Arrow blocks, but got:"
350
+ f"{type(on)}."
351
+ )
352
+
353
+ if self.num_rows() == 0:
354
+ return None
355
+
356
+ col = self._table[on]
357
+ return pac.count(col).as_py()
358
+
359
+ def _apply_arrow_compute(
360
+ self, compute_fn: Callable, on: str, ignore_nulls: bool
361
+ ) -> Optional[U]:
362
+ """Helper providing null handling around applying an aggregation to a column."""
363
+ import pyarrow as pa
364
+
365
+ if not isinstance(on, str):
366
+ raise ValueError(
367
+ "on must be a string when aggregating on Arrow blocks, but got:"
368
+ f"{type(on)}."
369
+ )
370
+
371
+ if self.num_rows() == 0:
372
+ return None
373
+
374
+ col = self._table[on]
375
+ if pa.types.is_null(col.type):
376
+ return None
377
+ else:
378
+ return compute_fn(col, skip_nulls=ignore_nulls).as_py()
379
+
380
+ def sum(self, on: str, ignore_nulls: bool) -> Optional[U]:
381
+ import pyarrow.compute as pac
382
+
383
+ return self._apply_arrow_compute(pac.sum, on, ignore_nulls)
384
+
385
+ def min(self, on: str, ignore_nulls: bool) -> Optional[U]:
386
+ import pyarrow.compute as pac
387
+
388
+ return self._apply_arrow_compute(pac.min, on, ignore_nulls)
389
+
390
+ def max(self, on: str, ignore_nulls: bool) -> Optional[U]:
391
+ import pyarrow.compute as pac
392
+
393
+ return self._apply_arrow_compute(pac.max, on, ignore_nulls)
394
+
395
+ def mean(self, on: str, ignore_nulls: bool) -> Optional[U]:
396
+ import pyarrow.compute as pac
397
+
398
+ return self._apply_arrow_compute(pac.mean, on, ignore_nulls)
399
+
400
+ def sum_of_squared_diffs_from_mean(
401
+ self,
402
+ on: str,
403
+ ignore_nulls: bool,
404
+ mean: Optional[U] = None,
405
+ ) -> Optional[U]:
406
+ import pyarrow.compute as pac
407
+
408
+ if mean is None:
409
+ # If precomputed mean not given, we compute it ourselves.
410
+ mean = self.mean(on, ignore_nulls)
411
+ if mean is None:
412
+ return None
413
+ return self._apply_arrow_compute(
414
+ lambda col, skip_nulls: pac.sum(
415
+ pac.power(pac.subtract(col, mean), 2),
416
+ skip_nulls=skip_nulls,
417
+ ),
418
+ on,
419
+ ignore_nulls,
420
+ )
421
+
422
+ def sort_and_partition(
423
+ self, boundaries: List[T], sort_key: "SortKey"
424
+ ) -> List["Block"]:
425
+ if self._table.num_rows == 0:
426
+ # If the pyarrow table is empty we may not have schema
427
+ # so calling sort_indices() will raise an error.
428
+ return [self._empty_table() for _ in range(len(boundaries) + 1)]
429
+
430
+ context = DataContext.get_current()
431
+ sort = get_sort_transform(context)
432
+
433
+ table = sort(self._table, sort_key)
434
+ if len(boundaries) == 0:
435
+ return [table]
436
+ return find_partitions(table, boundaries, sort_key)
437
+
438
+ def combine(self, sort_key: "SortKey", aggs: Tuple["AggregateFn"]) -> Block:
439
+ """Combine rows with the same key into an accumulator.
440
+
441
+ This assumes the block is already sorted by key in ascending order.
442
+
443
+ Args:
444
+ sort_key: A column name or list of column names.
445
+ If this is ``None``, place all rows in a single group.
446
+
447
+ aggs: The aggregations to do.
448
+
449
+ Returns:
450
+ A sorted block of [k, v_1, ..., v_n] columns where k is the groupby
451
+ key and v_i is the partially combined accumulator for the ith given
452
+ aggregation.
453
+ If key is None then the k column is omitted.
454
+ """
455
+ keys: List[str] = sort_key.get_columns()
456
+
457
+ def iter_groups() -> Iterator[Tuple[Sequence[KeyType], Block]]:
458
+ """Creates an iterator over zero-copy group views."""
459
+ if not keys:
460
+ # Global aggregation consists of a single "group", so we short-circuit.
461
+ yield tuple(), self.to_block()
462
+ return
463
+
464
+ start = end = 0
465
+ iter = self.iter_rows(public_row_format=False)
466
+ next_row = None
467
+ while True:
468
+ try:
469
+ if next_row is None:
470
+ next_row = next(iter)
471
+ next_keys = next_row[keys]
472
+ while next_row[keys] == next_keys:
473
+ end += 1
474
+ try:
475
+ next_row = next(iter)
476
+ except StopIteration:
477
+ next_row = None
478
+ break
479
+ yield next_keys, self.slice(start, end)
480
+ start = end
481
+ except StopIteration:
482
+ break
483
+
484
+ builder = ArrowBlockBuilder()
485
+ for group_keys, group_view in iter_groups():
486
+ # Aggregate.
487
+ init_vals = group_keys
488
+ if len(group_keys) == 1:
489
+ init_vals = group_keys[0]
490
+
491
+ accumulators = [agg.init(init_vals) for agg in aggs]
492
+ for i in range(len(aggs)):
493
+ accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view)
494
+
495
+ # Build the row.
496
+ row = {}
497
+ if keys:
498
+ for k, gk in zip(keys, group_keys):
499
+ row[k] = gk
500
+
501
+ count = collections.defaultdict(int)
502
+ for agg, accumulator in zip(aggs, accumulators):
503
+ name = agg.name
504
+ # Check for conflicts with existing aggregation name.
505
+ if count[name] > 0:
506
+ name = self._munge_conflict(name, count[name])
507
+ count[name] += 1
508
+ row[name] = accumulator
509
+
510
+ builder.add(row)
511
+
512
+ return builder.build()
513
+
514
+ @staticmethod
515
+ def _munge_conflict(name, count):
516
+ return f"{name}_{count+1}"
517
+
518
+ @staticmethod
519
+ def merge_sorted_blocks(
520
+ blocks: List[Block], sort_key: "SortKey"
521
+ ) -> Tuple[Block, BlockMetadata]:
522
+ stats = BlockExecStats.builder()
523
+ blocks = [b for b in blocks if b.num_rows > 0]
524
+ if len(blocks) == 0:
525
+ ret = ArrowBlockAccessor._empty_table()
526
+ else:
527
+ # Handle blocks of different types.
528
+ blocks = TableBlockAccessor.normalize_block_types(blocks, "arrow")
529
+ concat_and_sort = get_concat_and_sort_transform(DataContext.get_current())
530
+ ret = concat_and_sort(blocks, sort_key)
531
+ return ret, ArrowBlockAccessor(ret).get_metadata(exec_stats=stats.build())
532
+
533
+ @staticmethod
534
+ def aggregate_combined_blocks(
535
+ blocks: List[Block],
536
+ sort_key: "SortKey",
537
+ aggs: Tuple["AggregateFn"],
538
+ finalize: bool,
539
+ ) -> Tuple[Block, BlockMetadata]:
540
+ """Aggregate sorted, partially combined blocks with the same key range.
541
+
542
+ This assumes blocks are already sorted by key in ascending order,
543
+ so we can do merge sort to get all the rows with the same key.
544
+
545
+ Args:
546
+ blocks: A list of partially combined and sorted blocks.
547
+ sort_key: The column name of key or None for global aggregation.
548
+ aggs: The aggregations to do.
549
+ finalize: Whether to finalize the aggregation. This is used as an
550
+ optimization for cases where we repeatedly combine partially
551
+ aggregated groups.
552
+
553
+ Returns:
554
+ A block of [k, v_1, ..., v_n] columns and its metadata where k is
555
+ the groupby key and v_i is the corresponding aggregation result for
556
+ the ith given aggregation.
557
+ If key is None then the k column is omitted.
558
+ """
559
+
560
+ stats = BlockExecStats.builder()
561
+ keys = sort_key.get_columns()
562
+
563
+ def key_fn(r):
564
+ if keys:
565
+ return tuple(r[keys])
566
+ else:
567
+ return (0,)
568
+
569
+ # Replace Nones with NULL_SENTINEL to ensure safe sorting.
570
+ def key_fn_with_null_sentinel(r):
571
+ values = key_fn(r)
572
+ return [NULL_SENTINEL if v is None else v for v in values]
573
+
574
+ # Handle blocks of different types.
575
+ blocks = TableBlockAccessor.normalize_block_types(blocks, "arrow")
576
+
577
+ iter = heapq.merge(
578
+ *[
579
+ ArrowBlockAccessor(block).iter_rows(public_row_format=False)
580
+ for block in blocks
581
+ ],
582
+ key=key_fn_with_null_sentinel,
583
+ )
584
+ next_row = None
585
+ builder = ArrowBlockBuilder()
586
+ while True:
587
+ try:
588
+ if next_row is None:
589
+ next_row = next(iter)
590
+ next_keys = key_fn(next_row)
591
+ next_key_columns = keys
592
+
593
+ def gen():
594
+ nonlocal iter
595
+ nonlocal next_row
596
+ while key_fn(next_row) == next_keys:
597
+ yield next_row
598
+ try:
599
+ next_row = next(iter)
600
+ except StopIteration:
601
+ next_row = None
602
+ break
603
+
604
+ # Merge.
605
+ first = True
606
+ accumulators = [None] * len(aggs)
607
+ resolved_agg_names = [None] * len(aggs)
608
+ for r in gen():
609
+ if first:
610
+ count = collections.defaultdict(int)
611
+ for i in range(len(aggs)):
612
+ name = aggs[i].name
613
+ # Check for conflicts with existing aggregation
614
+ # name.
615
+ if count[name] > 0:
616
+ name = ArrowBlockAccessor._munge_conflict(
617
+ name, count[name]
618
+ )
619
+ count[name] += 1
620
+ resolved_agg_names[i] = name
621
+ accumulators[i] = r[name]
622
+ first = False
623
+ else:
624
+ for i in range(len(aggs)):
625
+ accumulators[i] = aggs[i].merge(
626
+ accumulators[i], r[resolved_agg_names[i]]
627
+ )
628
+ # Build the row.
629
+ row = {}
630
+ if keys:
631
+ for col_name, next_key in zip(next_key_columns, next_keys):
632
+ row[col_name] = next_key
633
+
634
+ for agg, agg_name, accumulator in zip(
635
+ aggs, resolved_agg_names, accumulators
636
+ ):
637
+ if finalize:
638
+ row[agg_name] = agg.finalize(accumulator)
639
+ else:
640
+ row[agg_name] = accumulator
641
+
642
+ builder.add(row)
643
+ except StopIteration:
644
+ break
645
+
646
+ ret = builder.build()
647
+ return ret, ArrowBlockAccessor(ret).get_metadata(exec_stats=stats.build())
648
+
649
+ def block_type(self) -> BlockType:
650
+ return BlockType.ARROW
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/batcher.py ADDED
@@ -0,0 +1,325 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.data._internal.arrow_block import ArrowBlockAccessor
4
+ from ray.data._internal.arrow_ops import transform_pyarrow
5
+ from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder
6
+ from ray.data.block import Block, BlockAccessor
7
+
8
+ # pyarrow.Table.slice is slow when the table has many chunks
9
+ # so we combine chunks into a single one to make slice faster
10
+ # with the cost of an extra copy.
11
+ # See https://github.com/ray-project/ray/issues/31108 for more details.
12
+ # TODO(jjyao): remove this once
13
+ # https://github.com/apache/arrow/issues/35126 is resolved.
14
+ MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS = 10
15
+
16
+ # Delay compaction until the shuffle buffer has reached this ratio over the min
17
+ # shuffle buffer size. Setting this to 1 minimizes memory usage, at the cost of
18
+ # frequent compactions. Setting this to higher values increases memory usage but
19
+ # reduces compaction frequency.
20
+ SHUFFLE_BUFFER_COMPACTION_RATIO = 1.5
21
+
22
+
23
+ class BatcherInterface:
24
+ def add(self, block: Block):
25
+ """Add a block to the block buffer.
26
+
27
+ Args:
28
+ block: Block to add to the block buffer.
29
+ """
30
+ raise NotImplementedError()
31
+
32
+ def done_adding(self) -> bool:
33
+ """Indicate to the batcher that no more blocks will be added to the buffer."""
34
+ raise NotImplementedError()
35
+
36
+ def has_batch(self) -> bool:
37
+ """Whether this Batcher has any full batches."""
38
+ raise NotImplementedError()
39
+
40
+ def has_any(self) -> bool:
41
+ """Whether this Batcher has any data."""
42
+ raise NotImplementedError()
43
+
44
+ def next_batch(self) -> Block:
45
+ """Get the next batch from the block buffer.
46
+
47
+ Returns:
48
+ A batch represented as a Block.
49
+ """
50
+ raise NotImplementedError()
51
+
52
+
53
+ class Batcher(BatcherInterface):
54
+ """Chunks blocks into batches."""
55
+
56
+ # Implementation Note: When there are multiple batches per block, this batcher will
57
+ # slice off and return each batch and add the remaining block back to the buffer
58
+ # instead of optimally slicing and returning all batches from the block at once.
59
+ # This will result in extra (and nested) block slicing. However, since slices are
60
+ # zero-copy views, we sacrifice what should be a small performance hit for better
61
+ # readability.
62
+
63
+ def __init__(self, batch_size: Optional[int], ensure_copy: bool = False):
64
+ """
65
+ Construct a batcher that yields batches of batch_sizes rows.
66
+
67
+ Args:
68
+ batch_size: The size of batches to yield.
69
+ ensure_copy: Whether batches are always copied from the underlying base
70
+ blocks (not zero-copy views).
71
+ """
72
+ self._batch_size = batch_size
73
+ self._buffer = []
74
+ self._buffer_size = 0
75
+ self._done_adding = False
76
+ self._ensure_copy = ensure_copy
77
+
78
+ def add(self, block: Block):
79
+ """Add a block to the block buffer.
80
+
81
+ Note empty block is not added to buffer.
82
+
83
+ Args:
84
+ block: Block to add to the block buffer.
85
+ """
86
+ if BlockAccessor.for_block(block).num_rows() > 0:
87
+ self._buffer.append(block)
88
+ self._buffer_size += BlockAccessor.for_block(block).num_rows()
89
+
90
+ def done_adding(self) -> bool:
91
+ """Indicate to the batcher that no more blocks will be added to the batcher."""
92
+ self._done_adding = True
93
+
94
+ def has_batch(self) -> bool:
95
+ """Whether this Batcher has any full batches."""
96
+ return self.has_any() and (
97
+ self._batch_size is None or self._buffer_size >= self._batch_size
98
+ )
99
+
100
+ def has_any(self) -> bool:
101
+ """Whether this Batcher has any data."""
102
+ return self._buffer_size > 0
103
+
104
+ def next_batch(self) -> Block:
105
+ """Get the next batch from the block buffer.
106
+
107
+ Returns:
108
+ A batch represented as a Block.
109
+ """
110
+ assert self.has_batch() or (self._done_adding and self.has_any())
111
+ needs_copy = self._ensure_copy
112
+ # If no batch size, short-circuit.
113
+ if self._batch_size is None:
114
+ assert len(self._buffer) == 1
115
+ block = self._buffer[0]
116
+ if needs_copy:
117
+ # Copy block if needing to ensure fresh batch copy.
118
+ block = BlockAccessor.for_block(block)
119
+ block = block.slice(0, block.num_rows(), copy=True)
120
+ self._buffer = []
121
+ self._buffer_size = 0
122
+ return block
123
+ output = DelegatingBlockBuilder()
124
+ leftover = []
125
+ needed = self._batch_size
126
+ for block in self._buffer:
127
+ accessor = BlockAccessor.for_block(block)
128
+ if needed <= 0:
129
+ # We already have a full batch, so add this block to
130
+ # the leftovers.
131
+ leftover.append(block)
132
+ elif accessor.num_rows() <= needed:
133
+ output.add_block(accessor.to_block())
134
+ needed -= accessor.num_rows()
135
+ else:
136
+ if (
137
+ isinstance(accessor, ArrowBlockAccessor)
138
+ and block.num_columns > 0
139
+ and block.column(0).num_chunks
140
+ >= MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS
141
+ ):
142
+ accessor = BlockAccessor.for_block(
143
+ transform_pyarrow.combine_chunks(block)
144
+ )
145
+ # We only need part of the block to fill out a batch.
146
+ output.add_block(accessor.slice(0, needed, copy=False))
147
+ # Add the rest of the block to the leftovers.
148
+ leftover.append(accessor.slice(needed, accessor.num_rows(), copy=False))
149
+ needed = 0
150
+
151
+ # Move the leftovers into the block buffer so they're the first
152
+ # blocks consumed on the next batch extraction.
153
+ self._buffer = leftover
154
+ self._buffer_size -= self._batch_size
155
+ needs_copy = needs_copy and not output.will_build_yield_copy()
156
+ batch = output.build()
157
+ if needs_copy:
158
+ # Need to ensure that the batch is a fresh copy.
159
+ batch = BlockAccessor.for_block(batch)
160
+ batch = batch.slice(0, batch.num_rows(), copy=True)
161
+ return batch
162
+
163
+
164
+ class ShufflingBatcher(BatcherInterface):
165
+ """Chunks blocks into shuffled batches, using a local in-memory shuffle buffer."""
166
+
167
+ # Implementation Note:
168
+ #
169
+ # This shuffling batcher lazily builds a shuffle buffer from added blocks, and once
170
+ # a batch is requested via .next_batch(), it concatenates the blocks into a concrete
171
+ # shuffle buffer and randomly shuffles the entire buffer.
172
+ #
173
+ # Adding of more blocks can be intermixed with retrieving batches, but it should be
174
+ # noted that we can end up performing two expensive operations on each retrieval:
175
+ # 1. Build added blocks into a concrete shuffle buffer.
176
+ # 2. Shuffling the entire buffer.
177
+ # To amortize the overhead of this process, we only shuffle the blocks after a
178
+ # delay designated by SHUFFLE_BUFFER_COMPACTION_RATIO.
179
+ #
180
+ # Similarly, adding blocks is very cheap. Each added block will be appended to a
181
+ # list, with concatenation of the underlying data delayed until the next batch
182
+ # compaction.
183
+
184
+ def __init__(
185
+ self,
186
+ batch_size: Optional[int],
187
+ shuffle_buffer_min_size: int,
188
+ shuffle_seed: Optional[int] = None,
189
+ ):
190
+ """Constructs a random-shuffling block batcher.
191
+
192
+ Args:
193
+ batch_size: Record batch size.
194
+ shuffle_buffer_min_size: Minimum number of rows that must be in the local
195
+ in-memory shuffle buffer in order to yield a batch. When there are no
196
+ more rows to be added to the buffer, the number of rows in the buffer
197
+ *will* decrease below this value while yielding the remaining batches,
198
+ and the final batch may have less than ``batch_size`` rows. Increasing
199
+ this will improve the randomness of the shuffle but may increase the
200
+ latency to the first batch.
201
+ shuffle_seed: The seed to use for the local random shuffle.
202
+ """
203
+ if batch_size is None:
204
+ raise ValueError("Must specify a batch_size if using a local shuffle.")
205
+ self._batch_size = batch_size
206
+ self._shuffle_seed = shuffle_seed
207
+ if shuffle_buffer_min_size < batch_size:
208
+ # Round it up internally to `batch_size` since our algorithm requires it.
209
+ # This is harmless since it only offers extra randomization.
210
+ shuffle_buffer_min_size = batch_size
211
+ self._buffer_min_size = shuffle_buffer_min_size
212
+ self._builder = DelegatingBlockBuilder()
213
+ self._shuffle_buffer: Block = None
214
+ self._batch_head = 0
215
+ self._done_adding = False
216
+
217
+ def add(self, block: Block):
218
+ """Add a block to the shuffle buffer.
219
+
220
+ Note empty block is not added to buffer.
221
+
222
+ Args:
223
+ block: Block to add to the shuffle buffer.
224
+ """
225
+ if BlockAccessor.for_block(block).num_rows() > 0:
226
+ self._builder.add_block(block)
227
+
228
+ def done_adding(self) -> bool:
229
+ """Indicate to the batcher that no more blocks will be added to the batcher.
230
+
231
+ No more blocks should be added to the batcher after calling this.
232
+ """
233
+ self._done_adding = True
234
+
235
+ def has_any(self) -> bool:
236
+ """Whether this batcher has any data."""
237
+ return self._buffer_size() > 0
238
+
239
+ def has_batch(self) -> bool:
240
+ """Whether this batcher has any batches."""
241
+ buffer_size = self._buffer_size()
242
+
243
+ if not self._done_adding:
244
+ # Delay pulling of batches until the buffer is large enough in order to
245
+ # amortize compaction overhead.
246
+ return self._materialized_buffer_size() >= self._buffer_min_size or (
247
+ buffer_size - self._batch_size
248
+ >= self._buffer_min_size * SHUFFLE_BUFFER_COMPACTION_RATIO
249
+ )
250
+ else:
251
+ return buffer_size >= self._batch_size
252
+
253
+ def _buffer_size(self) -> int:
254
+ """Return shuffle buffer size."""
255
+ buffer_size = self._builder.num_rows()
256
+ buffer_size += self._materialized_buffer_size()
257
+ return buffer_size
258
+
259
+ def _materialized_buffer_size(self) -> int:
260
+ """Return materialized (compacted portion of) shuffle buffer size."""
261
+ if self._shuffle_buffer is None:
262
+ return 0
263
+ # The size of the concrete (materialized) shuffle buffer, adjusting
264
+ # for the batch head position, which also serves as a counter of the number
265
+ # of already-yielded rows from the current concrete shuffle buffer.
266
+ return max(
267
+ 0,
268
+ BlockAccessor.for_block(self._shuffle_buffer).num_rows() - self._batch_head,
269
+ )
270
+
271
+ def next_batch(self) -> Block:
272
+ """Get the next shuffled batch from the shuffle buffer.
273
+
274
+ Returns:
275
+ A batch represented as a Block.
276
+ """
277
+ assert self.has_batch() or (self._done_adding and self.has_any())
278
+ # Add rows in the builder to the shuffle buffer. Note that we delay compaction
279
+ # as much as possible to amortize the concatenation overhead. Compaction is
280
+ # only necessary when the materialized buffer size falls below the min size.
281
+ if self._builder.num_rows() > 0 and (
282
+ self._done_adding
283
+ or self._materialized_buffer_size() <= self._buffer_min_size
284
+ ):
285
+ if self._shuffle_buffer is not None:
286
+ if self._batch_head > 0:
287
+ # Compact the materialized shuffle buffer.
288
+ block = BlockAccessor.for_block(self._shuffle_buffer)
289
+ self._shuffle_buffer = block.slice(
290
+ self._batch_head, block.num_rows()
291
+ )
292
+ # Add the unyielded rows from the existing shuffle buffer.
293
+ self._builder.add_block(self._shuffle_buffer)
294
+ # Build the new shuffle buffer.
295
+ self._shuffle_buffer = self._builder.build()
296
+ self._shuffle_buffer = BlockAccessor.for_block(
297
+ self._shuffle_buffer
298
+ ).random_shuffle(self._shuffle_seed)
299
+ if self._shuffle_seed is not None:
300
+ self._shuffle_seed += 1
301
+ if (
302
+ isinstance(
303
+ BlockAccessor.for_block(self._shuffle_buffer), ArrowBlockAccessor
304
+ )
305
+ and self._shuffle_buffer.num_columns > 0
306
+ and self._shuffle_buffer.column(0).num_chunks
307
+ >= MIN_NUM_CHUNKS_TO_TRIGGER_COMBINE_CHUNKS
308
+ ):
309
+ self._shuffle_buffer = transform_pyarrow.combine_chunks(
310
+ self._shuffle_buffer
311
+ )
312
+ # Reset the builder.
313
+ self._builder = DelegatingBlockBuilder()
314
+ self._batch_head = 0
315
+
316
+ assert self._shuffle_buffer is not None
317
+ buffer_size = BlockAccessor.for_block(self._shuffle_buffer).num_rows()
318
+ # Truncate the batch to the buffer size, if necessary.
319
+ batch_size = min(self._batch_size, buffer_size)
320
+ slice_start = self._batch_head
321
+ self._batch_head += batch_size
322
+ # Yield the shuffled batch.
323
+ return BlockAccessor.for_block(self._shuffle_buffer).slice(
324
+ slice_start, self._batch_head
325
+ )
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/block_builder.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Generic
2
+
3
+ from ray.data.block import Block, BlockAccessor, BlockType, T
4
+
5
+
6
+ class BlockBuilder(Generic[T]):
7
+ """A builder class for blocks."""
8
+
9
+ @staticmethod
10
+ def for_block(block: Block) -> "BlockBuilder":
11
+ return BlockAccessor.for_block(block).builder()
12
+
13
+ def add(self, item: T) -> None:
14
+ """Append a single row to the block being built."""
15
+ raise NotImplementedError
16
+
17
+ def add_block(self, block: Block) -> None:
18
+ """Append an entire block to the block being built."""
19
+ raise NotImplementedError
20
+
21
+ def will_build_yield_copy(self) -> bool:
22
+ """Whether building this block will yield a new block copy."""
23
+ raise NotImplementedError
24
+
25
+ def build(self) -> Block:
26
+ """Build the block."""
27
+ raise NotImplementedError
28
+
29
+ def num_rows(self) -> int:
30
+ """Return the number of rows added in the block."""
31
+ raise NotImplementedError
32
+
33
+ def get_estimated_memory_usage(self) -> int:
34
+ """Return the estimated memory usage so far in bytes."""
35
+ raise NotImplementedError
36
+
37
+ def block_type(self) -> BlockType:
38
+ """Return the block type."""
39
+ raise NotImplementedError
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/compute.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from typing import Any, Callable, Iterable, Optional, TypeVar, Union
3
+
4
+ from ray.data._internal.execution.interfaces import TaskContext
5
+ from ray.data.block import Block, UserDefinedFunction
6
+ from ray.util.annotations import DeveloperAPI
7
+
8
+ logger = logging.getLogger(__name__)
9
+
10
+ T = TypeVar("T")
11
+ U = TypeVar("U")
12
+
13
+
14
+ # Block transform function applied by task and actor pools.
15
+ BlockTransform = Union[
16
+ # TODO(Clark): Once Ray only supports Python 3.8+, use protocol to constrain block
17
+ # transform type.
18
+ # Callable[[Block, ...], Iterable[Block]]
19
+ # Callable[[Block, UserDefinedFunction, ...], Iterable[Block]],
20
+ Callable[[Iterable[Block], TaskContext], Iterable[Block]],
21
+ Callable[[Iterable[Block], TaskContext, UserDefinedFunction], Iterable[Block]],
22
+ Callable[..., Iterable[Block]],
23
+ ]
24
+
25
+
26
+ @DeveloperAPI
27
+ class ComputeStrategy:
28
+ pass
29
+
30
+
31
+ @DeveloperAPI
32
+ class TaskPoolStrategy(ComputeStrategy):
33
+ def __init__(
34
+ self,
35
+ size: Optional[int] = None,
36
+ ):
37
+ """Construct TaskPoolStrategy for a Dataset transform.
38
+
39
+ Args:
40
+ size: Specify the maximum size of the task pool.
41
+ """
42
+
43
+ if size is not None and size < 1:
44
+ raise ValueError("`size` must be >= 1", size)
45
+ self.size = size
46
+
47
+ def __eq__(self, other: Any) -> bool:
48
+ return (isinstance(other, TaskPoolStrategy) and self.size == other.size) or (
49
+ other == "tasks" and self.size is None
50
+ )
51
+
52
+
53
+ class ActorPoolStrategy(ComputeStrategy):
54
+ """Specify the compute strategy for a Dataset transform.
55
+
56
+ ActorPoolStrategy specifies that an autoscaling pool of actors should be used
57
+ for a given Dataset transform. This is useful for stateful setup of callable
58
+ classes.
59
+
60
+ For a fixed-sized pool of size ``n``, specify ``compute=ActorPoolStrategy(size=n)``.
61
+ To autoscale from ``m`` to ``n`` actors, specify
62
+ ``ActorPoolStrategy(min_size=m, max_size=n)``.
63
+
64
+ To increase opportunities for pipelining task dependency prefetching with
65
+ computation and avoiding actor startup delays, set max_tasks_in_flight_per_actor
66
+ to 2 or greater; to try to decrease the delay due to queueing of tasks on the worker
67
+ actors, set max_tasks_in_flight_per_actor to 1.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ *,
73
+ size: Optional[int] = None,
74
+ min_size: Optional[int] = None,
75
+ max_size: Optional[int] = None,
76
+ max_tasks_in_flight_per_actor: Optional[int] = None,
77
+ ):
78
+ """Construct ActorPoolStrategy for a Dataset transform.
79
+
80
+ Args:
81
+ size: Specify a fixed size actor pool of this size. It is an error to
82
+ specify both `size` and `min_size` or `max_size`.
83
+ min_size: The minimize size of the actor pool.
84
+ max_size: The maximum size of the actor pool.
85
+ max_tasks_in_flight_per_actor: The maximum number of tasks to concurrently
86
+ send to a single actor worker. Increasing this will increase
87
+ opportunities for pipelining task dependency prefetching with
88
+ computation and avoiding actor startup delays, but will also increase
89
+ queueing delay.
90
+ """
91
+ if size is not None:
92
+ if size < 1:
93
+ raise ValueError("size must be >= 1", size)
94
+ if max_size is not None or min_size is not None:
95
+ raise ValueError(
96
+ "min_size and max_size cannot be set at the same time as `size`"
97
+ )
98
+ min_size = size
99
+ max_size = size
100
+ if min_size is not None and min_size < 1:
101
+ raise ValueError("min_size must be >= 1", min_size)
102
+ if max_size is not None:
103
+ if min_size is None:
104
+ min_size = 1 # Legacy default.
105
+ if min_size > max_size:
106
+ raise ValueError("min_size must be <= max_size", min_size, max_size)
107
+ if (
108
+ max_tasks_in_flight_per_actor is not None
109
+ and max_tasks_in_flight_per_actor < 1
110
+ ):
111
+ raise ValueError(
112
+ "max_tasks_in_flight_per_actor must be >= 1, got: ",
113
+ max_tasks_in_flight_per_actor,
114
+ )
115
+ self.min_size = min_size or 1
116
+ self.max_size = max_size or float("inf")
117
+ self.max_tasks_in_flight_per_actor = max_tasks_in_flight_per_actor
118
+ self.num_workers = 0
119
+ self.ready_to_total_workers_ratio = 0.8
120
+
121
+ def __eq__(self, other: Any) -> bool:
122
+ return isinstance(other, ActorPoolStrategy) and (
123
+ self.min_size == other.min_size
124
+ and self.max_size == other.max_size
125
+ and self.max_tasks_in_flight_per_actor
126
+ == other.max_tasks_in_flight_per_actor
127
+ )
128
+
129
+
130
+ def get_compute(compute_spec: Union[str, ComputeStrategy]) -> ComputeStrategy:
131
+ if not isinstance(compute_spec, (TaskPoolStrategy, ActorPoolStrategy)):
132
+ raise ValueError(
133
+ "In Ray 2.5, the compute spec must be either "
134
+ f"TaskPoolStrategy or ActorPoolStategy, was: {compute_spec}."
135
+ )
136
+ elif not compute_spec or compute_spec == "tasks":
137
+ return TaskPoolStrategy()
138
+ elif compute_spec == "actors":
139
+ return ActorPoolStrategy()
140
+ elif isinstance(compute_spec, ComputeStrategy):
141
+ return compute_spec
142
+ else:
143
+ raise ValueError("compute must be one of [`tasks`, `actors`, ComputeStrategy]")
144
+
145
+
146
+ def is_task_compute(compute_spec: Union[str, ComputeStrategy]) -> bool:
147
+ return (
148
+ not compute_spec
149
+ or compute_spec == "tasks"
150
+ or isinstance(compute_spec, TaskPoolStrategy)
151
+ )
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/delegating_block_builder.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from typing import Any, Mapping, Optional
3
+
4
+ from ray.data._internal.arrow_block import ArrowBlockBuilder
5
+ from ray.data._internal.block_builder import BlockBuilder
6
+ from ray.data.block import Block, BlockAccessor, BlockType, DataBatch
7
+
8
+
9
+ class DelegatingBlockBuilder(BlockBuilder):
10
+ def __init__(self):
11
+ self._builder = None
12
+ self._empty_block = None
13
+
14
+ @property
15
+ def _inferred_block_type(self) -> Optional[BlockType]:
16
+ """The block type inferred from the first item added to the builder."""
17
+ if self._builder is not None:
18
+ return self._builder.block_type()
19
+ return None
20
+
21
+ def add(self, item: Mapping[str, Any]) -> None:
22
+ assert isinstance(item, collections.abc.Mapping), item
23
+
24
+ if self._builder is None:
25
+ self._builder = ArrowBlockBuilder()
26
+
27
+ self._builder.add(item)
28
+
29
+ def add_batch(self, batch: DataBatch):
30
+ """Add a user-facing data batch to the builder.
31
+
32
+ This data batch will be converted to an internal block and then added to the
33
+ underlying builder.
34
+ """
35
+ block = BlockAccessor.batch_to_block(batch, self._inferred_block_type)
36
+ return self.add_block(block)
37
+
38
+ def add_block(self, block: Block):
39
+ accessor = BlockAccessor.for_block(block)
40
+ if accessor.num_rows() == 0:
41
+ # Don't infer types of empty lists. Store the block and use it if no
42
+ # other data is added. https://github.com/ray-project/ray/issues/20290
43
+ self._empty_block = block
44
+ return
45
+ if self._builder is None:
46
+ self._builder = accessor.builder()
47
+ else:
48
+ block_type = accessor.block_type()
49
+ assert block_type == self._inferred_block_type, (
50
+ block_type,
51
+ self._inferred_block_type,
52
+ )
53
+
54
+ self._builder.add_block(accessor.to_block())
55
+
56
+ def will_build_yield_copy(self) -> bool:
57
+ if self._builder is None:
58
+ return True
59
+ return self._builder.will_build_yield_copy()
60
+
61
+ def build(self) -> Block:
62
+ if self._builder is None:
63
+ if self._empty_block is not None:
64
+ self._builder = BlockAccessor.for_block(self._empty_block).builder()
65
+ self._builder.add_block(self._empty_block)
66
+ else:
67
+ self._builder = ArrowBlockBuilder()
68
+ return self._builder.build()
69
+
70
+ def num_rows(self) -> int:
71
+ return self._builder.num_rows() if self._builder is not None else 0
72
+
73
+ def get_estimated_memory_usage(self) -> int:
74
+ if self._builder is None:
75
+ return 0
76
+ return self._builder.get_estimated_memory_usage()
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/equalize.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Tuple
2
+
3
+ from ray.data._internal.execution.interfaces import RefBundle
4
+ from ray.data._internal.split import _calculate_blocks_rows, _split_at_indices
5
+ from ray.data.block import Block, BlockMetadata, BlockPartition
6
+ from ray.types import ObjectRef
7
+
8
+
9
+ def _equalize(
10
+ per_split_bundles: List[RefBundle],
11
+ owned_by_consumer: bool,
12
+ ) -> List[RefBundle]:
13
+ """Equalize split ref bundles into equal number of rows.
14
+
15
+ Args:
16
+ per_split_bundles: ref bundles to equalize.
17
+ Returns:
18
+ the equalized ref bundles.
19
+ """
20
+ if len(per_split_bundles) == 0:
21
+ return per_split_bundles
22
+ per_split_blocks_with_metadata = [bundle.blocks for bundle in per_split_bundles]
23
+ per_split_num_rows: List[List[int]] = [
24
+ _calculate_blocks_rows(split) for split in per_split_blocks_with_metadata
25
+ ]
26
+ total_rows = sum([sum(blocks_rows) for blocks_rows in per_split_num_rows])
27
+ target_split_size = total_rows // len(per_split_blocks_with_metadata)
28
+
29
+ # phase 1: shave the current splits by dropping blocks (into leftovers)
30
+ # and calculate num rows needed to the meet target.
31
+ shaved_splits, per_split_needed_rows, leftovers = _shave_all_splits(
32
+ per_split_blocks_with_metadata, per_split_num_rows, target_split_size
33
+ )
34
+
35
+ # validate invariants
36
+ for shaved_split, split_needed_row in zip(shaved_splits, per_split_needed_rows):
37
+ num_shaved_rows = sum([meta.num_rows for _, meta in shaved_split])
38
+ assert num_shaved_rows <= target_split_size
39
+ assert num_shaved_rows + split_needed_row == target_split_size
40
+
41
+ # phase 2: based on the num rows needed for each shaved split, split the leftovers
42
+ # in the shape that exactly matches the rows needed.
43
+ leftover_bundle = RefBundle(leftovers, owns_blocks=owned_by_consumer)
44
+ leftover_splits = _split_leftovers(leftover_bundle, per_split_needed_rows)
45
+
46
+ # phase 3: merge the shaved_splits and leftoever splits and return.
47
+ for i, leftover_split in enumerate(leftover_splits):
48
+ shaved_splits[i].extend(leftover_split)
49
+
50
+ # validate invariants.
51
+ num_shaved_rows = sum([meta.num_rows for _, meta in shaved_splits[i]])
52
+ assert num_shaved_rows == target_split_size
53
+
54
+ # Compose the result back to RefBundle
55
+ equalized_ref_bundles: List[RefBundle] = []
56
+ for split in shaved_splits:
57
+ equalized_ref_bundles.append(RefBundle(split, owns_blocks=owned_by_consumer))
58
+ return equalized_ref_bundles
59
+
60
+
61
+ def _shave_one_split(
62
+ split: BlockPartition, num_rows_per_block: List[int], target_size: int
63
+ ) -> Tuple[BlockPartition, int, BlockPartition]:
64
+ """Shave a block list to the target size.
65
+
66
+ Args:
67
+ split: the block list to shave.
68
+ num_rows_per_block: num rows for each block in the list.
69
+ target_size: the upper bound target size of the shaved list.
70
+ Returns:
71
+ A tuple of:
72
+ - shaved block list.
73
+ - num of rows needed for the block list to meet the target size.
74
+ - leftover blocks.
75
+
76
+ """
77
+ # iterates through the blocks from the input list and
78
+ shaved = []
79
+ leftovers = []
80
+ shaved_rows = 0
81
+ for block_with_meta, block_rows in zip(split, num_rows_per_block):
82
+ if block_rows + shaved_rows <= target_size:
83
+ shaved.append(block_with_meta)
84
+ shaved_rows += block_rows
85
+ else:
86
+ leftovers.append(block_with_meta)
87
+ num_rows_needed = target_size - shaved_rows
88
+ return shaved, num_rows_needed, leftovers
89
+
90
+
91
+ def _shave_all_splits(
92
+ input_splits: List[BlockPartition],
93
+ per_split_num_rows: List[List[int]],
94
+ target_size: int,
95
+ ) -> Tuple[List[BlockPartition], List[int], BlockPartition]:
96
+ """Shave all block list to the target size.
97
+
98
+ Args:
99
+ input_splits: all block list to shave.
100
+ input_splits: num rows (per block) for each block list.
101
+ target_size: the upper bound target size of the shaved lists.
102
+ Returns:
103
+ A tuple of:
104
+ - all shaved block list.
105
+ - num of rows needed for the block list to meet the target size.
106
+ - leftover blocks.
107
+ """
108
+ shaved_splits = []
109
+ per_split_needed_rows = []
110
+ leftovers = []
111
+
112
+ for split, num_rows_per_block in zip(input_splits, per_split_num_rows):
113
+ shaved, num_rows_needed, _leftovers = _shave_one_split(
114
+ split, num_rows_per_block, target_size
115
+ )
116
+ shaved_splits.append(shaved)
117
+ per_split_needed_rows.append(num_rows_needed)
118
+ leftovers.extend(_leftovers)
119
+
120
+ return shaved_splits, per_split_needed_rows, leftovers
121
+
122
+
123
+ def _split_leftovers(
124
+ leftovers: RefBundle, per_split_needed_rows: List[int]
125
+ ) -> List[BlockPartition]:
126
+ """Split leftover blocks by the num of rows needed."""
127
+ num_splits = len(per_split_needed_rows)
128
+ split_indices = []
129
+ prev = 0
130
+ for i, num_rows_needed in enumerate(per_split_needed_rows):
131
+ split_indices.append(prev + num_rows_needed)
132
+ prev = split_indices[i]
133
+ split_result: Tuple[
134
+ List[List[ObjectRef[Block]]], List[List[BlockMetadata]]
135
+ ] = _split_at_indices(
136
+ leftovers.blocks,
137
+ split_indices,
138
+ leftovers.owns_blocks,
139
+ )
140
+ return [list(zip(block_refs, meta)) for block_refs, meta in zip(*split_result)][
141
+ :num_splits
142
+ ]
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/logging.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import logging.config
3
+ import os
4
+ from typing import Optional
5
+
6
+ import yaml
7
+
8
+ import ray
9
+
10
+ DEFAULT_CONFIG = {
11
+ "version": 1,
12
+ "disable_existing_loggers": False,
13
+ "formatters": {
14
+ "ray": {
15
+ "format": "%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s" # noqa: E501
16
+ },
17
+ "ray_json": {"class": "ray._private.ray_logging.formatters.JSONFormatter"},
18
+ },
19
+ "filters": {
20
+ "console_filter": {"()": "ray.data._internal.logging.HiddenRecordFilter"},
21
+ "core_context_filter": {
22
+ "()": "ray._private.ray_logging.filters.CoreContextFilter"
23
+ },
24
+ },
25
+ "handlers": {
26
+ "file": {
27
+ "class": "ray.data._internal.logging.SessionFileHandler",
28
+ "formatter": "ray",
29
+ "filename": "ray-data.log",
30
+ },
31
+ "file_json": {
32
+ "class": "ray.data._internal.logging.SessionFileHandler",
33
+ "formatter": "ray_json",
34
+ "filename": "ray-data.log",
35
+ "filters": ["core_context_filter"],
36
+ },
37
+ "console": {
38
+ "class": "ray._private.log.PlainRayHandler",
39
+ "formatter": "ray",
40
+ "level": "INFO",
41
+ "filters": ["console_filter"],
42
+ },
43
+ },
44
+ "loggers": {
45
+ "ray.data": {
46
+ "level": "DEBUG",
47
+ "handlers": ["file", "console"],
48
+ "propagate": False,
49
+ },
50
+ "ray.air.util.tensor_extensions": {
51
+ "level": "DEBUG",
52
+ "handlers": ["file", "console"],
53
+ "propagate": False,
54
+ },
55
+ },
56
+ }
57
+
58
+ # Dictionary of substitutions to be performed when using JSON mode. Handlers with names
59
+ # corresponding to keys will be replaced by those corresponding to values.
60
+ RAY_DATA_LOG_HANDLER_JSON_SUBSTITUTIONS = {"file": "file_json"}
61
+
62
+ # Env. variable to specify the encoding of the file logs when using the default config.
63
+ RAY_DATA_LOG_ENCODING_ENV_VAR_NAME = "RAY_DATA_LOG_ENCODING"
64
+
65
+ # Env. variable to specify the logging config path use defaults if not set
66
+ RAY_DATA_LOGGING_CONFIG_ENV_VAR_NAME = "RAY_DATA_LOGGING_CONFIG"
67
+
68
+ # To facilitate debugging, Ray Data writes debug logs to a file. However, if Ray Data
69
+ # logs every scheduler loop, logging might impact performance. So, we add a "TRACE"
70
+ # level where logs aren't written by default.
71
+ #
72
+ # Use the following code to log a message at the "TRACE" level:
73
+ # ```
74
+ # logger.log(logging.getLevelName("TRACE"), "Your message here.")
75
+ # ````
76
+ logging.addLevelName(logging.DEBUG - 1, "TRACE")
77
+
78
+
79
+ class HiddenRecordFilter:
80
+ """Filters out log records with the "hide" attribute set to True.
81
+
82
+ This filter allows you to override default logging behavior. For example, if errors
83
+ are printed by default, and you don't want to print a specific error, you can set
84
+ the "hide" attribute to avoid printing the message.
85
+
86
+ .. testcode::
87
+
88
+ import logging
89
+ logger = logging.getLogger("ray.data.spam")
90
+
91
+ # This warning won't be printed to the console.
92
+ logger.warning("ham", extra={"hide": True})
93
+ """
94
+
95
+ def filter(self, record):
96
+ return not getattr(record, "hide", False)
97
+
98
+
99
+ class SessionFileHandler(logging.Handler):
100
+ """A handler that writes to a log file in the Ray session directory.
101
+
102
+ The Ray session directory isn't available until Ray is initialized, so this handler
103
+ lazily creates the file handler when you emit a log record.
104
+
105
+ Args:
106
+ filename: The name of the log file. The file is created in the 'logs' directory
107
+ of the Ray session directory.
108
+ """
109
+
110
+ def __init__(self, filename: str):
111
+ super().__init__()
112
+ self._filename = filename
113
+ self._handler = None
114
+ self._formatter = None
115
+ self._path = None
116
+
117
+ def emit(self, record):
118
+ if self._handler is None:
119
+ self._try_create_handler()
120
+ if self._handler is not None:
121
+ self._handler.emit(record)
122
+
123
+ def setFormatter(self, fmt: logging.Formatter) -> None:
124
+ if self._handler is not None:
125
+ self._handler.setFormatter(fmt)
126
+ self._formatter = fmt
127
+
128
+ def _try_create_handler(self):
129
+ assert self._handler is None
130
+
131
+ log_directory = get_log_directory()
132
+ if log_directory is None:
133
+ return
134
+
135
+ os.makedirs(log_directory, exist_ok=True)
136
+
137
+ self._path = os.path.join(log_directory, self._filename)
138
+ self._handler = logging.FileHandler(self._path)
139
+ if self._formatter is not None:
140
+ self._handler.setFormatter(self._formatter)
141
+
142
+
143
+ def configure_logging() -> None:
144
+ """Configure the Python logger named 'ray.data'.
145
+
146
+ This function loads the configration YAML specified by "RAY_DATA_LOGGING_CONFIG"
147
+ environment variable. If the variable isn't set, this function loads the default
148
+ "logging.yaml" file that is adjacent to this module.
149
+
150
+ If "RAY_DATA_LOG_ENCODING" is specified as "JSON" we will enable JSON logging mode
151
+ if using the default logging config.
152
+ """
153
+
154
+ def _load_logging_config(config_path: str):
155
+ with open(config_path) as file:
156
+ config = yaml.safe_load(file)
157
+ return config
158
+
159
+ # Dynamically load env vars
160
+ config_path = os.environ.get(RAY_DATA_LOGGING_CONFIG_ENV_VAR_NAME)
161
+ log_encoding = os.environ.get(RAY_DATA_LOG_ENCODING_ENV_VAR_NAME)
162
+
163
+ if config_path is not None:
164
+ config = _load_logging_config(config_path)
165
+ else:
166
+ config = DEFAULT_CONFIG
167
+ if log_encoding is not None and log_encoding.upper() == "JSON":
168
+ for logger in config["loggers"].values():
169
+ for (
170
+ old_handler_name,
171
+ new_handler_name,
172
+ ) in RAY_DATA_LOG_HANDLER_JSON_SUBSTITUTIONS.items():
173
+ logger["handlers"].remove(old_handler_name)
174
+ logger["handlers"].append(new_handler_name)
175
+
176
+ logging.config.dictConfig(config)
177
+
178
+ # After configuring logger, warn if RAY_DATA_LOGGING_CONFIG is used with
179
+ # RAY_DATA_LOG_ENCODING, because they are not both supported together.
180
+ if config_path is not None and log_encoding is not None:
181
+ logger = logging.getLogger(__name__)
182
+ logger.warning(
183
+ "Using `RAY_DATA_LOG_ENCODING` is not supported with "
184
+ + "`RAY_DATA_LOGGING_CONFIG`"
185
+ )
186
+
187
+
188
+ def reset_logging() -> None:
189
+ """Reset the logger named 'ray.data' to its initial state.
190
+
191
+ Used for testing.
192
+ """
193
+ logger = logging.getLogger("ray.data")
194
+ logger.handlers.clear()
195
+ logger.setLevel(logging.NOTSET)
196
+
197
+
198
+ def get_log_directory() -> Optional[str]:
199
+ """Return the directory where Ray Data writes log files.
200
+
201
+ If Ray isn't initialized, this function returns ``None``.
202
+ """
203
+ global_node = ray._private.worker._global_node
204
+ if global_node is None:
205
+ return None
206
+
207
+ session_dir = global_node.get_session_dir_path()
208
+ return os.path.join(session_dir, "logs", "ray-data")
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/memory_tracing.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility for debugging object store memory eager deletion in Datasets.
2
+
3
+ NOTE: the performance overhead of tracing object allocation is fairly substantial.
4
+ This is meant to use in unit test for debugging. Please do not enable in production,
5
+ without performance optimization.
6
+
7
+ Enable with RAY_DATA_TRACE_ALLOCATIONS=1.
8
+
9
+ Basic usage is to call `trace_allocation` each time a new object is created, and call
10
+ `trace_deallocation` when an object should be disposed of. When the workload is
11
+ complete, call `leak_report` to view possibly leaked objects.
12
+
13
+ Note that so called "leaked" objects will be reclaimed eventually by reference counting
14
+ in Ray. This is just to debug the eager deletion protocol which is more efficient.
15
+ """
16
+
17
+ from io import StringIO
18
+ from typing import Dict, List
19
+
20
+ import ray
21
+ from ray.data.context import DataContext
22
+
23
+
24
+ def trace_allocation(ref: ray.ObjectRef, loc: str) -> None:
25
+ """Record that an object has been created.
26
+
27
+ Args:
28
+ ref: The object created.
29
+ loc: A human-readable string identifying the call site.
30
+ """
31
+ ctx = DataContext.get_current()
32
+ if ctx.trace_allocations:
33
+ tracer = _get_mem_actor()
34
+ # TODO: it would be nice to determine loc automatically based on the stack.
35
+ ray.get(tracer.trace_alloc.remote([ref], loc))
36
+
37
+
38
+ def trace_deallocation(ref: ray.ObjectRef, loc: str, free: bool = True) -> None:
39
+ """Record that an object has been deleted (and delete if free=True).
40
+
41
+ Args:
42
+ ref: The object we no longer need.
43
+ loc: A human-readable string identifying the call site.
44
+ free: Whether to eagerly destroy the object instead of waiting for Ray
45
+ reference counting to kick in.
46
+ """
47
+ if free:
48
+ ray._private.internal_api.free(ref, local_only=False)
49
+ ctx = DataContext.get_current()
50
+ if ctx.trace_allocations:
51
+ tracer = _get_mem_actor()
52
+ ray.get(tracer.trace_dealloc.remote([ref], loc, free))
53
+
54
+
55
+ def leak_report() -> str:
56
+ tracer = _get_mem_actor()
57
+ return ray.get(tracer.leak_report.remote())
58
+
59
+
60
+ @ray.remote(num_cpus=0)
61
+ class _MemActor:
62
+ def __init__(self):
63
+ self.allocated: Dict[ray.ObjectRef, dict] = {}
64
+ self.deallocated: Dict[ray.ObjectRef, dict] = {}
65
+ self.skip_dealloc: Dict[ray.ObjectRef, str] = {}
66
+ self.peak_mem = 0
67
+ self.cur_mem = 0
68
+
69
+ def trace_alloc(self, ref: List[ray.ObjectRef], loc: str):
70
+ ref = ref[0] # Avoid Ray materializing the ref.
71
+ if ref not in self.allocated:
72
+ meta = ray.experimental.get_object_locations([ref])
73
+ size_bytes = meta.get("object_size", 0)
74
+ if not size_bytes:
75
+ size_bytes = -1
76
+ from ray import cloudpickle as pickle
77
+
78
+ try:
79
+ obj = ray.get(ref, timeout=5.0)
80
+ size_bytes = len(pickle.dumps(obj))
81
+ except Exception:
82
+ print("[mem_tracing] ERROR getting size")
83
+ size_bytes = -1
84
+ print(f"[mem_tracing] Allocated {size_bytes} bytes at {loc}: {ref}")
85
+ entry = {
86
+ "size_bytes": size_bytes,
87
+ "loc": loc,
88
+ }
89
+ self.allocated[ref] = entry
90
+ self.cur_mem += size_bytes
91
+ self.peak_mem = max(self.cur_mem, self.peak_mem)
92
+
93
+ def trace_dealloc(self, ref: List[ray.ObjectRef], loc: str, freed: bool):
94
+ ref = ref[0] # Avoid Ray materializing the ref.
95
+ size_bytes = self.allocated.get(ref, {}).get("size_bytes", 0)
96
+ if freed:
97
+ print(f"[mem_tracing] Freed {size_bytes} bytes at {loc}: {ref}")
98
+ if ref in self.allocated:
99
+ self.cur_mem -= size_bytes
100
+ self.deallocated[ref] = self.allocated.pop(ref)
101
+ self.deallocated[ref]["dealloc_loc"] = loc
102
+ if ref in self.deallocated:
103
+ # This object reference is already deallocated.
104
+ pass
105
+ else:
106
+ print(f"[mem_tracing] WARNING: allocation of {ref} was not traced!")
107
+ else:
108
+ print(f"[mem_tracing] Skipped freeing {size_bytes} bytes at {loc}: {ref}")
109
+ self.skip_dealloc[ref] = loc
110
+
111
+ def leak_report(self) -> str:
112
+ output = StringIO()
113
+ output.write("[mem_tracing] ===== Leaked objects =====\n")
114
+ for ref in self.allocated:
115
+ size_bytes = self.allocated[ref].get("size_bytes")
116
+ loc = self.allocated[ref].get("loc")
117
+ if ref in self.skip_dealloc:
118
+ dealloc_loc = self.skip_dealloc[ref]
119
+ output.write(
120
+ f"[mem_tracing] Leaked object, created at {loc}, size "
121
+ f"{size_bytes}, skipped dealloc at {dealloc_loc}: {ref}\n"
122
+ )
123
+ else:
124
+ output.write(
125
+ f"[mem_tracing] Leaked object, created at {loc}, "
126
+ f"size {size_bytes}: {ref}\n"
127
+ )
128
+ output.write("[mem_tracing] ===== End leaked objects =====\n")
129
+ output.write("[mem_tracing] ===== Freed objects =====\n")
130
+ for ref in self.deallocated:
131
+ size_bytes = self.deallocated[ref].get("size_bytes")
132
+ loc = self.deallocated[ref].get("loc")
133
+ dealloc_loc = self.deallocated[ref].get("dealloc_loc")
134
+ output.write(
135
+ f"[mem_tracing] Freed object from {loc} at {dealloc_loc}, "
136
+ f"size {size_bytes}: {ref}\n"
137
+ )
138
+ output.write("[mem_tracing] ===== End freed objects =====\n")
139
+ output.write(f"[mem_tracing] Peak size bytes {self.peak_mem}\n")
140
+ output.write(f"[mem_tracing] Current size bytes {self.cur_mem}\n")
141
+ return output.getvalue()
142
+
143
+
144
+ def _get_mem_actor():
145
+ return _MemActor.options(
146
+ name="mem_tracing_actor", get_if_exists=True, lifetime="detached"
147
+ ).remote()
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/null_aggregate.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from types import ModuleType
2
+ from typing import Any, Callable, Tuple, Union
3
+
4
+ import numpy as np
5
+
6
+ from ray.data.block import AggType, Block, KeyType, T, U
7
+
8
+ WrappedAggType = Tuple[AggType, int]
9
+
10
+
11
+ # This module contains aggregation helpers for handling nulls.
12
+ # The null handling policy is:
13
+ # 1. Mix of values and nulls - ignore_nulls=True: Ignore the nulls, return
14
+ # aggregation of non-null values.
15
+ # 2. Mix of values and nulls - ignore_nulls=False: Return None.
16
+ # 3. All nulls: Return None.
17
+ # 4. Empty dataset: Return None.
18
+ #
19
+ # This is accomplished by checking rows for null values and by propagating nulls
20
+ # if found AND if we're not ignoring them. If not ignoring nulls, in order to delineate
21
+ # between found null rows and an empty block accumulation when merging (the latter of
22
+ # which we want to propagate; the former of which we do not), we attach a boolean flag
23
+ # indicating whether or not an accumulation contains valid data to intermediate block
24
+ # accumulations via _wrap_acc() and _unwrap_acc(). This allows us to properly merge
25
+ # intermediate block accumulations under a streaming constraint.
26
+
27
+
28
+ def _wrap_acc(a: AggType, has_data: bool) -> WrappedAggType:
29
+ """
30
+ Wrap accumulation with a numeric boolean flag indicating whether or not
31
+ this accumulation contains real data; if it doesn't, we consider it to be
32
+ empty.
33
+
34
+ Args:
35
+ a: The accumulation value.
36
+ has_data: Whether the accumulation contains real data.
37
+
38
+ Returns:
39
+ An AggType list with the last element being a numeric boolean flag indicating
40
+ whether or not this accumulation contains real data. If the input a has length
41
+ n, the returned AggType has length n + 1.
42
+ """
43
+ if not isinstance(a, list):
44
+ a = [a]
45
+ return a + [1 if has_data else 0]
46
+
47
+
48
+ def _unwrap_acc(a: WrappedAggType) -> Tuple[AggType, bool]:
49
+ """
50
+ Unwrap the accumulation, which we assume has been wrapped (via _wrap_acc) with a
51
+ numeric boolean flag indicating whether or not this accumulation contains real data.
52
+
53
+ Args:
54
+ a: The wrapped accumulation value that we wish to unwrap.
55
+
56
+ Returns:
57
+ A tuple containing the unwrapped accumulation value and a boolean indicating
58
+ whether the accumulation contains real data.
59
+ """
60
+ has_data = a[-1] == 1
61
+ a = a[:-1]
62
+ if len(a) == 1:
63
+ a = a[0]
64
+ return a, has_data
65
+
66
+
67
+ def _null_wrap_init(
68
+ init: Callable[[KeyType], AggType]
69
+ ) -> Callable[[KeyType], WrappedAggType]:
70
+ """
71
+ Wraps an accumulation initializer with null handling.
72
+
73
+ The returned initializer function adds on a has_data field that the accumulator
74
+ uses to track whether an aggregation is empty.
75
+
76
+ Args:
77
+ init: The core init function to wrap.
78
+
79
+ Returns:
80
+ A new accumulation initializer function that can handle nulls.
81
+ """
82
+
83
+ def _init(k: KeyType) -> AggType:
84
+ a = init(k)
85
+ # Initializing accumulation, so indicate that the accumulation doesn't represent
86
+ # real data yet.
87
+ return _wrap_acc(a, has_data=False)
88
+
89
+ return _init
90
+
91
+
92
+ def _null_wrap_merge(
93
+ ignore_nulls: bool,
94
+ merge: Callable[[AggType, AggType], AggType],
95
+ ) -> Callable[[WrappedAggType, WrappedAggType], WrappedAggType]:
96
+ """
97
+ Wrap merge function with null handling.
98
+
99
+ The returned merge function expects a1 and a2 to be either None or of the form:
100
+ a = [acc_data_1, ..., acc_data_2, has_data].
101
+
102
+ This merges two accumulations subject to the following null rules:
103
+ 1. If a1 is empty and a2 is empty, return empty accumulation.
104
+ 2. If a1 (a2) is empty and a2 (a1) is None, return None.
105
+ 3. If a1 (a2) is empty and a2 (a1) is non-None, return a2 (a1).
106
+ 4. If a1 (a2) is None, return a2 (a1) if ignoring nulls, None otherwise.
107
+ 5. If a1 and a2 are both non-null, return merge(a1, a2).
108
+
109
+ Args:
110
+ ignore_nulls: Whether nulls should be ignored or cause a None result.
111
+ merge: The core merge function to wrap.
112
+
113
+ Returns:
114
+ A new merge function that handles nulls.
115
+ """
116
+
117
+ def _merge(a1: WrappedAggType, a2: WrappedAggType) -> WrappedAggType:
118
+ if a1 is None:
119
+ # If we're ignoring nulls, propagate a2; otherwise, propagate None.
120
+ return a2 if ignore_nulls else None
121
+ unwrapped_a1, a1_has_data = _unwrap_acc(a1)
122
+ if not a1_has_data:
123
+ # If a1 is empty, propagate a2.
124
+ # No matter whether a2 is a real value, empty, or None,
125
+ # propagating each of these is correct if a1 is empty.
126
+ return a2
127
+ if a2 is None:
128
+ # If we're ignoring nulls, propagate a1; otherwise, propagate None.
129
+ return a1 if ignore_nulls else None
130
+ unwrapped_a2, a2_has_data = _unwrap_acc(a2)
131
+ if not a2_has_data:
132
+ # If a2 is empty, propagate a1.
133
+ return a1
134
+ a = merge(unwrapped_a1, unwrapped_a2)
135
+ return _wrap_acc(a, has_data=True)
136
+
137
+ return _merge
138
+
139
+
140
+ def _null_wrap_accumulate_row(
141
+ ignore_nulls: bool,
142
+ on_fn: Callable[[T], T],
143
+ accum: Callable[[AggType, T], AggType],
144
+ ) -> Callable[[WrappedAggType, T], WrappedAggType]:
145
+ """
146
+ Wrap accumulator function with null handling.
147
+
148
+ The returned accumulate function expects a to be either None or of the form:
149
+ a = [acc_data_1, ..., acc_data_n, has_data].
150
+
151
+ This performs an accumulation subject to the following null rules:
152
+ 1. If r is null and ignore_nulls=False, return None.
153
+ 2. If r is null and ignore_nulls=True, return a.
154
+ 3. If r is non-null and a is None, return None.
155
+ 4. If r is non-null and a is non-None, return accum(a[:-1], r).
156
+
157
+ Args:
158
+ ignore_nulls: Whether nulls should be ignored or cause a None result.
159
+ on_fn: Function selecting a subset of the row to apply the aggregation.
160
+ accum: The core accumulator function to wrap.
161
+
162
+ Returns:
163
+ A new accumulator function that handles nulls.
164
+ """
165
+
166
+ def _accum(a: WrappedAggType, r: T) -> WrappedAggType:
167
+ r = on_fn(r)
168
+ if _is_null(r):
169
+ if ignore_nulls:
170
+ # Ignoring nulls, return the current accumulation, ignoring r.
171
+ return a
172
+ else:
173
+ # Not ignoring nulls, so propagate the null.
174
+ return None
175
+ else:
176
+ if a is None:
177
+ # Accumulation is None so (1) a previous row must have been null, and
178
+ # (2) we must be propagating nulls, so continue to pragate this null.
179
+ return None
180
+ else:
181
+ # Row is non-null and accumulation is non-null, so we now apply the core
182
+ # accumulation.
183
+ a, _ = _unwrap_acc(a)
184
+ a = accum(a, r)
185
+ return _wrap_acc(a, has_data=True)
186
+
187
+ return _accum
188
+
189
+
190
+ def _null_wrap_accumulate_block(
191
+ ignore_nulls: bool,
192
+ accum_block: Callable[[AggType, Block], AggType],
193
+ null_merge: Callable[[WrappedAggType, WrappedAggType], WrappedAggType],
194
+ ) -> Callable[[WrappedAggType, Block], WrappedAggType]:
195
+ """
196
+ Wrap vectorized aggregate function with null handling.
197
+
198
+ This performs a block accumulation subject to the following null rules:
199
+ 1. If any row is null and ignore_nulls=False, return None.
200
+ 2. If at least one row is not null and ignore_nulls=True, return the block
201
+ accumulation.
202
+ 3. If all rows are null and ignore_nulls=True, return the base accumulation.
203
+ 4. If all rows non-null, return the block accumulation.
204
+
205
+ Args:
206
+ ignore_nulls: Whether nulls should be ignored or cause a None result.
207
+ accum_block: The core vectorized aggregate function to wrap.
208
+ null_merge: A null-handling merge, as returned from _null_wrap_merge().
209
+
210
+ Returns:
211
+ A new vectorized aggregate function that handles nulls.
212
+ """
213
+
214
+ def _accum_block_null(a: WrappedAggType, block: Block) -> WrappedAggType:
215
+ ret = accum_block(block)
216
+ if ret is not None:
217
+ ret = _wrap_acc(ret, has_data=True)
218
+ elif ignore_nulls:
219
+ # This can happen if we're ignoring nulls but the entire block only consists
220
+ # of nulls. We treat the block as if it were empty in this case.
221
+ ret = a
222
+ return null_merge(a, ret)
223
+
224
+ return _accum_block_null
225
+
226
+
227
+ def _null_wrap_finalize(
228
+ finalize: Callable[[AggType], AggType]
229
+ ) -> Callable[[WrappedAggType], U]:
230
+ """
231
+ Wrap finalizer with null handling.
232
+
233
+ If the accumulation is empty or None, the returned finalizer returns None.
234
+
235
+ Args:
236
+ finalize: The core finalizing function to wrap.
237
+
238
+ Returns:
239
+ A new finalizing function that handles nulls.
240
+ """
241
+
242
+ def _finalize(a: AggType) -> U:
243
+ if a is None:
244
+ return None
245
+ a, has_data = _unwrap_acc(a)
246
+ if not has_data:
247
+ return None
248
+ return finalize(a)
249
+
250
+ return _finalize
251
+
252
+
253
+ LazyModule = Union[None, bool, ModuleType]
254
+ _pandas: LazyModule = None
255
+
256
+
257
+ def _lazy_import_pandas() -> LazyModule:
258
+ global _pandas
259
+ if _pandas is None:
260
+ try:
261
+ import pandas as _pandas
262
+ except ModuleNotFoundError:
263
+ # If module is not found, set _pandas to False so we won't
264
+ # keep trying to import it on every _lazy_import_pandas() call.
265
+ _pandas = False
266
+ return _pandas
267
+
268
+
269
+ def _is_null(r: Any):
270
+ pd = _lazy_import_pandas()
271
+ if pd:
272
+ return pd.isnull(r)
273
+ try:
274
+ return np.isnan(r)
275
+ except TypeError:
276
+ return r is None
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/output_buffer.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+
3
+ from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder
4
+ from ray.data.block import Block, BlockAccessor, DataBatch
5
+ from ray.data.context import MAX_SAFE_BLOCK_SIZE_FACTOR
6
+
7
+
8
+ class BlockOutputBuffer:
9
+ """Generates output blocks of a given size given a stream of inputs.
10
+
11
+ This class is used to turn a stream of items / blocks of arbitrary size
12
+ into a stream of blocks of ``target_max_block_size``. The caller should
13
+ check ``has_next()`` after each ``add()`` call, and call ``next()`` to get
14
+ the next block when ``has_next()`` returns True.
15
+
16
+ When all items have been added, the caller must call ``finalize()`` and
17
+ then check ``has_next()`` one last time.
18
+
19
+ Examples:
20
+ >>> from ray.data._internal.output_buffer import BlockOutputBuffer
21
+ >>> udf = ... # doctest: +SKIP
22
+ >>> generator = ... # doctest: +SKIP
23
+ >>> # Yield a stream of output blocks.
24
+ >>> output = BlockOutputBuffer(udf, 500 * 1024 * 1024) # doctest: +SKIP
25
+ >>> for item in generator(): # doctest: +SKIP
26
+ ... output.add(item) # doctest: +SKIP
27
+ ... if output.has_next(): # doctest: +SKIP
28
+ ... yield output.next() # doctest: +SKIP
29
+ >>> output.finalize() # doctest: +SKIP
30
+ >>> if output.has_next() # doctest: +SKIP
31
+ ... yield output.next() # doctest: +SKIP
32
+ """
33
+
34
+ def __init__(self, target_max_block_size: int):
35
+ self._target_max_block_size = target_max_block_size
36
+ self._buffer = DelegatingBlockBuilder()
37
+ self._returned_at_least_one_block = False
38
+ self._finalized = False
39
+
40
+ def add(self, item: Any) -> None:
41
+ """Add a single item to this output buffer."""
42
+ assert not self._finalized
43
+ self._buffer.add(item)
44
+
45
+ def add_batch(self, batch: DataBatch) -> None:
46
+ """Add a data batch to this output buffer."""
47
+ assert not self._finalized
48
+ self._buffer.add_batch(batch)
49
+
50
+ def add_block(self, block: Block) -> None:
51
+ """Add a data block to this output buffer."""
52
+ assert not self._finalized
53
+ self._buffer.add_block(block)
54
+
55
+ def finalize(self) -> None:
56
+ """Must be called once all items have been added."""
57
+ assert not self._finalized
58
+ self._finalized = True
59
+
60
+ def has_next(self) -> bool:
61
+ """Returns true when a complete output block is produced."""
62
+ if self._finalized:
63
+ return not self._returned_at_least_one_block or self._buffer.num_rows() > 0
64
+ else:
65
+ return (
66
+ self._buffer.get_estimated_memory_usage() > self._target_max_block_size
67
+ )
68
+
69
+ def next(self) -> Block:
70
+ """Returns the next complete output block."""
71
+ assert self.has_next()
72
+
73
+ block_to_yield = self._buffer.build()
74
+ block_remainder = None
75
+ block = BlockAccessor.for_block(block_to_yield)
76
+ if (
77
+ block.size_bytes()
78
+ >= MAX_SAFE_BLOCK_SIZE_FACTOR * self._target_max_block_size
79
+ ):
80
+ # Slice a block to respect the target max block size. We only do
81
+ # this if we are more than 50% above the target block size, because
82
+ # this ensures that the last block produced will be at least half
83
+ # the block size.
84
+ num_bytes_per_row = block.size_bytes() // block.num_rows()
85
+ target_num_rows = max(1, self._target_max_block_size // num_bytes_per_row)
86
+
87
+ if target_num_rows < block.num_rows():
88
+ # NOTE: We're maintaining following protocol of slicing underlying block
89
+ # into appropriately sized ones:
90
+ #
91
+ # - (Finalized) Target blocks sliced from the original one
92
+ # and are *copied* to avoid referencing original blocks
93
+ # - Temporary remainder of the block should *NOT* be copied
94
+ # such as to avoid repeatedly copying the remainder bytes
95
+ # of the block, resulting in O(M * N) total bytes being
96
+ # copied, where N is the total number of bytes in the original
97
+ # block and M is the number of blocks that will be produced by
98
+ # this iterator
99
+ block_to_yield = block.slice(0, target_num_rows, copy=True)
100
+ block_remainder = block.slice(
101
+ target_num_rows, block.num_rows(), copy=False
102
+ )
103
+
104
+ self._buffer = DelegatingBlockBuilder()
105
+ if block_remainder is not None:
106
+ self._buffer.add_block(block_remainder)
107
+
108
+ self._returned_at_least_one_block = True
109
+ return block_to_yield
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/pandas_block.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import heapq
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ Iterator,
9
+ List,
10
+ Optional,
11
+ Sequence,
12
+ Tuple,
13
+ TypeVar,
14
+ Union,
15
+ )
16
+
17
+ import numpy as np
18
+
19
+ from ray.air.constants import TENSOR_COLUMN_NAME
20
+ from ray.air.util.tensor_extensions.utils import _is_ndarray_tensor
21
+ from ray.data._internal.numpy_support import convert_to_numpy, validate_numpy_batch
22
+ from ray.data._internal.row import TableRow
23
+ from ray.data._internal.table_block import TableBlockAccessor, TableBlockBuilder
24
+ from ray.data._internal.util import find_partitions
25
+ from ray.data.block import (
26
+ Block,
27
+ BlockAccessor,
28
+ BlockExecStats,
29
+ BlockMetadata,
30
+ BlockType,
31
+ KeyType,
32
+ U,
33
+ )
34
+ from ray.data.context import DataContext
35
+
36
+ if TYPE_CHECKING:
37
+ import pandas
38
+ import pyarrow
39
+
40
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey
41
+ from ray.data.aggregate import AggregateFn
42
+
43
+ T = TypeVar("T")
44
+
45
+ _pandas = None
46
+
47
+
48
+ def lazy_import_pandas():
49
+ global _pandas
50
+ if _pandas is None:
51
+ import pandas
52
+
53
+ _pandas = pandas
54
+ return _pandas
55
+
56
+
57
+ class PandasRow(TableRow):
58
+ """
59
+ Row of a tabular Dataset backed by a Pandas DataFrame block.
60
+ """
61
+
62
+ def __getitem__(self, key: Union[str, List[str]]) -> Any:
63
+ from ray.data.extensions import TensorArrayElement
64
+
65
+ pd = lazy_import_pandas()
66
+
67
+ def get_item(keys: List[str]) -> Any:
68
+ col = self._row[keys]
69
+ if len(col) == 0:
70
+ return None
71
+
72
+ items = col.iloc[0]
73
+ if isinstance(items.iloc[0], TensorArrayElement):
74
+ # Getting an item in a Pandas tensor column may return
75
+ # a TensorArrayElement, which we have to convert to an ndarray.
76
+ return pd.Series(item.to_numpy() for item in items)
77
+
78
+ try:
79
+ # Try to interpret this as a numpy-type value.
80
+ # See https://stackoverflow.com/questions/9452775/converting-numpy-dtypes-to-native-python-types. # noqa: E501
81
+ return pd.Series(item.as_py() for item in items)
82
+
83
+ except (AttributeError, ValueError):
84
+ # Fallback to the original form.
85
+ return items
86
+
87
+ is_single_item = isinstance(key, str)
88
+ keys = [key] if is_single_item else key
89
+
90
+ items = get_item(keys)
91
+
92
+ if items is None:
93
+ return None
94
+ elif is_single_item:
95
+ return items.iloc[0]
96
+ else:
97
+ return items
98
+
99
+ def __iter__(self) -> Iterator:
100
+ for k in self._row.columns:
101
+ yield k
102
+
103
+ def __len__(self):
104
+ return self._row.shape[1]
105
+
106
+
107
+ class PandasBlockBuilder(TableBlockBuilder):
108
+ def __init__(self):
109
+ pandas = lazy_import_pandas()
110
+ super().__init__(pandas.DataFrame)
111
+
112
+ @staticmethod
113
+ def _table_from_pydict(columns: Dict[str, List[Any]]) -> "pandas.DataFrame":
114
+ pandas = lazy_import_pandas()
115
+
116
+ pd_columns: Dict[str, Any] = {}
117
+
118
+ for col_name, col_vals in columns.items():
119
+ np_col_vals = convert_to_numpy(col_vals)
120
+
121
+ if col_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(np_col_vals):
122
+ from ray.data.extensions.tensor_extension import TensorArray
123
+
124
+ pd_columns[col_name] = TensorArray(np_col_vals)
125
+ else:
126
+ pd_columns[col_name] = np_col_vals
127
+
128
+ return pandas.DataFrame(pd_columns)
129
+
130
+ @staticmethod
131
+ def _concat_tables(tables: List["pandas.DataFrame"]) -> "pandas.DataFrame":
132
+ pandas = lazy_import_pandas()
133
+ from ray.air.util.data_batch_conversion import (
134
+ _cast_ndarray_columns_to_tensor_extension,
135
+ )
136
+
137
+ if len(tables) > 1:
138
+ df = pandas.concat(tables, ignore_index=True)
139
+ df.reset_index(drop=True, inplace=True)
140
+ else:
141
+ df = tables[0]
142
+ ctx = DataContext.get_current()
143
+ if ctx.enable_tensor_extension_casting:
144
+ df = _cast_ndarray_columns_to_tensor_extension(df)
145
+ return df
146
+
147
+ @staticmethod
148
+ def _concat_would_copy() -> bool:
149
+ return True
150
+
151
+ @staticmethod
152
+ def _empty_table() -> "pandas.DataFrame":
153
+ pandas = lazy_import_pandas()
154
+ return pandas.DataFrame()
155
+
156
+ def block_type(self) -> BlockType:
157
+ return BlockType.PANDAS
158
+
159
+
160
+ # This is to be compatible with pyarrow.lib.schema
161
+ # TODO (kfstorm): We need a format-independent way to represent schema.
162
+ PandasBlockSchema = collections.namedtuple("PandasBlockSchema", ["names", "types"])
163
+
164
+
165
+ class PandasBlockAccessor(TableBlockAccessor):
166
+ ROW_TYPE = PandasRow
167
+
168
+ def __init__(self, table: "pandas.DataFrame"):
169
+ super().__init__(table)
170
+
171
+ def column_names(self) -> List[str]:
172
+ return self._table.columns.tolist()
173
+
174
+ def append_column(self, name: str, data: Any) -> Block:
175
+ assert name not in self._table.columns
176
+
177
+ if any(isinstance(item, np.ndarray) for item in data):
178
+ raise NotImplementedError(
179
+ f"`{self.__class__.__name__}.append_column()` doesn't support "
180
+ "array-like data."
181
+ )
182
+
183
+ table = self._table.copy()
184
+ table[name] = data
185
+ return table
186
+
187
+ @staticmethod
188
+ def _build_tensor_row(row: PandasRow) -> np.ndarray:
189
+ from ray.data.extensions import TensorArrayElement
190
+
191
+ tensor = row[TENSOR_COLUMN_NAME].iloc[0]
192
+ if isinstance(tensor, TensorArrayElement):
193
+ # Getting an item in a Pandas tensor column may return a TensorArrayElement,
194
+ # which we have to convert to an ndarray.
195
+ tensor = tensor.to_numpy()
196
+ return tensor
197
+
198
+ def slice(self, start: int, end: int, copy: bool = False) -> "pandas.DataFrame":
199
+ view = self._table[start:end]
200
+ view.reset_index(drop=True, inplace=True)
201
+ if copy:
202
+ view = view.copy(deep=True)
203
+ return view
204
+
205
+ def take(self, indices: List[int]) -> "pandas.DataFrame":
206
+ table = self._table.take(indices)
207
+ table.reset_index(drop=True, inplace=True)
208
+ return table
209
+
210
+ def select(self, columns: List[str]) -> "pandas.DataFrame":
211
+ if not all(isinstance(col, str) for col in columns):
212
+ raise ValueError(
213
+ "Columns must be a list of column name strings when aggregating on "
214
+ f"Pandas blocks, but got: {columns}."
215
+ )
216
+ return self._table[columns]
217
+
218
+ def random_shuffle(self, random_seed: Optional[int]) -> "pandas.DataFrame":
219
+ table = self._table.sample(frac=1, random_state=random_seed)
220
+ table.reset_index(drop=True, inplace=True)
221
+ return table
222
+
223
+ def schema(self) -> PandasBlockSchema:
224
+ dtypes = self._table.dtypes
225
+ schema = PandasBlockSchema(
226
+ names=dtypes.index.tolist(), types=dtypes.values.tolist()
227
+ )
228
+ # Column names with non-str types of a pandas DataFrame is not
229
+ # supported by Ray Dataset.
230
+ if any(not isinstance(name, str) for name in schema.names):
231
+ raise ValueError(
232
+ "A Pandas DataFrame with column names of non-str types"
233
+ " is not supported by Ray Dataset. Column names of this"
234
+ f" DataFrame: {schema.names!r}."
235
+ )
236
+ return schema
237
+
238
+ def to_pandas(self) -> "pandas.DataFrame":
239
+ from ray.air.util.data_batch_conversion import _cast_tensor_columns_to_ndarrays
240
+
241
+ ctx = DataContext.get_current()
242
+ table = self._table
243
+ if ctx.enable_tensor_extension_casting:
244
+ table = _cast_tensor_columns_to_ndarrays(table)
245
+ return table
246
+
247
+ def to_numpy(
248
+ self, columns: Optional[Union[str, List[str]]] = None
249
+ ) -> Union[np.ndarray, Dict[str, np.ndarray]]:
250
+ if columns is None:
251
+ columns = self._table.columns.tolist()
252
+ should_be_single_ndarray = False
253
+ elif isinstance(columns, list):
254
+ should_be_single_ndarray = False
255
+ else:
256
+ columns = [columns]
257
+ should_be_single_ndarray = True
258
+
259
+ column_names_set = set(self._table.columns)
260
+ for column in columns:
261
+ if column not in column_names_set:
262
+ raise ValueError(
263
+ f"Cannot find column {column}, available columns: "
264
+ f"{self._table.columns.tolist()}"
265
+ )
266
+
267
+ arrays = []
268
+ for column in columns:
269
+ arrays.append(self._table[column].to_numpy())
270
+
271
+ if should_be_single_ndarray:
272
+ arrays = arrays[0]
273
+ else:
274
+ arrays = dict(zip(columns, arrays))
275
+ return arrays
276
+
277
+ def to_arrow(self) -> "pyarrow.Table":
278
+ import pyarrow
279
+
280
+ # Set `preserve_index=False` so that Arrow doesn't add a '__index_level_0__'
281
+ # column to the resulting table.
282
+ return pyarrow.Table.from_pandas(self._table, preserve_index=False)
283
+
284
+ @staticmethod
285
+ def numpy_to_block(
286
+ batch: Union[Dict[str, np.ndarray], Dict[str, list]],
287
+ ) -> "pandas.DataFrame":
288
+ validate_numpy_batch(batch)
289
+
290
+ block = PandasBlockBuilder._table_from_pydict(batch)
291
+ return block
292
+
293
+ def num_rows(self) -> int:
294
+ return self._table.shape[0]
295
+
296
+ def size_bytes(self) -> int:
297
+ return int(self._table.memory_usage(index=True, deep=True).sum())
298
+
299
+ def _zip(self, acc: BlockAccessor) -> "pandas.DataFrame":
300
+ r = self.to_pandas().copy(deep=False)
301
+ s = acc.to_pandas()
302
+ for col_name in s.columns:
303
+ col = s[col_name]
304
+ column_names = list(r.columns)
305
+ # Ensure the column names are unique after zip.
306
+ if col_name in column_names:
307
+ i = 1
308
+ new_name = col_name
309
+ while new_name in column_names:
310
+ new_name = "{}_{}".format(col_name, i)
311
+ i += 1
312
+ col_name = new_name
313
+ r[col_name] = col
314
+ return r
315
+
316
+ @staticmethod
317
+ def builder() -> PandasBlockBuilder:
318
+ return PandasBlockBuilder()
319
+
320
+ @staticmethod
321
+ def _empty_table() -> "pandas.DataFrame":
322
+ return PandasBlockBuilder._empty_table()
323
+
324
+ def _sample(self, n_samples: int, sort_key: "SortKey") -> "pandas.DataFrame":
325
+ return self._table[sort_key.get_columns()].sample(n_samples, ignore_index=True)
326
+
327
+ def _apply_agg(
328
+ self, agg_fn: Callable[["pandas.Series", bool], U], on: str
329
+ ) -> Optional[U]:
330
+ """Helper providing null handling around applying an aggregation to a column."""
331
+ pd = lazy_import_pandas()
332
+ if on is not None and not isinstance(on, str):
333
+ raise ValueError(
334
+ "on must be a string or None when aggregating on Pandas blocks, but "
335
+ f"got: {type(on)}."
336
+ )
337
+
338
+ if self.num_rows() == 0:
339
+ return None
340
+
341
+ col = self._table[on]
342
+ try:
343
+ val = agg_fn(col)
344
+ except TypeError as e:
345
+ # Converting an all-null column in an Arrow Table to a Pandas DataFrame
346
+ # column will result in an all-None column of object type, which will raise
347
+ # a type error when attempting to do most binary operations. We explicitly
348
+ # check for this type failure here so we can properly propagate a null.
349
+ if np.issubdtype(col.dtype, np.object_) and col.isnull().all():
350
+ return None
351
+ raise e from None
352
+ if pd.isnull(val):
353
+ return None
354
+ return val
355
+
356
+ def count(self, on: str) -> Optional[U]:
357
+ return self._apply_agg(lambda col: col.count(), on)
358
+
359
+ def sum(self, on: str, ignore_nulls: bool) -> Optional[U]:
360
+ pd = lazy_import_pandas()
361
+ if on is not None and not isinstance(on, str):
362
+ raise ValueError(
363
+ "on must be a string or None when aggregating on Pandas blocks, but "
364
+ f"got: {type(on)}."
365
+ )
366
+
367
+ if self.num_rows() == 0:
368
+ return None
369
+
370
+ col = self._table[on]
371
+ if col.isnull().all():
372
+ # Short-circuit on an all-null column, returning None. This is required for
373
+ # sum() since it will otherwise return 0 when summing on an all-null column,
374
+ # which is not what we want.
375
+ return None
376
+ val = col.sum(skipna=ignore_nulls)
377
+ if pd.isnull(val):
378
+ return None
379
+ return val
380
+
381
+ def min(self, on: str, ignore_nulls: bool) -> Optional[U]:
382
+ return self._apply_agg(lambda col: col.min(skipna=ignore_nulls), on)
383
+
384
+ def max(self, on: str, ignore_nulls: bool) -> Optional[U]:
385
+ return self._apply_agg(lambda col: col.max(skipna=ignore_nulls), on)
386
+
387
+ def mean(self, on: str, ignore_nulls: bool) -> Optional[U]:
388
+ return self._apply_agg(lambda col: col.mean(skipna=ignore_nulls), on)
389
+
390
+ def sum_of_squared_diffs_from_mean(
391
+ self,
392
+ on: str,
393
+ ignore_nulls: bool,
394
+ mean: Optional[U] = None,
395
+ ) -> Optional[U]:
396
+ if mean is None:
397
+ mean = self.mean(on, ignore_nulls)
398
+ return self._apply_agg(
399
+ lambda col: ((col - mean) ** 2).sum(skipna=ignore_nulls),
400
+ on,
401
+ )
402
+
403
+ def sort_and_partition(
404
+ self, boundaries: List[T], sort_key: "SortKey"
405
+ ) -> List[Block]:
406
+ if self._table.shape[0] == 0:
407
+ # If the pyarrow table is empty we may not have schema
408
+ # so calling sort_indices() will raise an error.
409
+ return [self._empty_table() for _ in range(len(boundaries) + 1)]
410
+
411
+ columns, ascending = sort_key.to_pandas_sort_args()
412
+ table = self._table.sort_values(by=columns, ascending=ascending)
413
+ if len(boundaries) == 0:
414
+ return [table]
415
+
416
+ return find_partitions(table, boundaries, sort_key)
417
+
418
+ def combine(
419
+ self, sort_key: "SortKey", aggs: Tuple["AggregateFn"]
420
+ ) -> "pandas.DataFrame":
421
+ """Combine rows with the same key into an accumulator.
422
+
423
+ This assumes the block is already sorted by key in ascending order.
424
+
425
+ Args:
426
+ sort_key: A SortKey object which holds column names/keys.
427
+ If this is ``None``, place all rows in a single group.
428
+
429
+ aggs: The aggregations to do.
430
+
431
+ Returns:
432
+ A sorted block of [k, v_1, ..., v_n] columns where k is the groupby
433
+ key and v_i is the partially combined accumulator for the ith given
434
+ aggregation.
435
+ If key is None then the k column is omitted.
436
+ """
437
+ keys: List[str] = sort_key.get_columns()
438
+ pd = lazy_import_pandas()
439
+
440
+ def iter_groups() -> Iterator[Tuple[Sequence[KeyType], Block]]:
441
+ """Creates an iterator over zero-copy group views."""
442
+ if not keys:
443
+ # Global aggregation consists of a single "group", so we short-circuit.
444
+ yield tuple(), self.to_block()
445
+ return
446
+
447
+ start = end = 0
448
+ iter = self.iter_rows(public_row_format=False)
449
+ next_row = None
450
+ while True:
451
+ try:
452
+ if next_row is None:
453
+ next_row = next(iter)
454
+ next_keys = next_row[keys]
455
+ while np.all(next_row[keys] == next_keys):
456
+ end += 1
457
+ try:
458
+ next_row = next(iter)
459
+ except StopIteration:
460
+ next_row = None
461
+ break
462
+ if isinstance(next_keys, pd.Series):
463
+ next_keys = next_keys.values
464
+ yield next_keys, self.slice(start, end, copy=False)
465
+ start = end
466
+ except StopIteration:
467
+ break
468
+
469
+ builder = PandasBlockBuilder()
470
+ for group_keys, group_view in iter_groups():
471
+ # Aggregate.
472
+ init_vals = group_keys
473
+ if len(group_keys) == 1:
474
+ init_vals = group_keys[0]
475
+ accumulators = [agg.init(init_vals) for agg in aggs]
476
+ for i in range(len(aggs)):
477
+ accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view)
478
+
479
+ # Build the row.
480
+ row = {}
481
+ if keys:
482
+ for k, gk in zip(keys, group_keys):
483
+ row[k] = gk
484
+
485
+ count = collections.defaultdict(int)
486
+ for agg, accumulator in zip(aggs, accumulators):
487
+ name = agg.name
488
+ # Check for conflicts with existing aggregation name.
489
+ if count[name] > 0:
490
+ name = self._munge_conflict(name, count[name])
491
+ count[name] += 1
492
+ row[name] = accumulator
493
+
494
+ builder.add(row)
495
+
496
+ return builder.build()
497
+
498
+ @staticmethod
499
+ def merge_sorted_blocks(
500
+ blocks: List[Block], sort_key: "SortKey"
501
+ ) -> Tuple["pandas.DataFrame", BlockMetadata]:
502
+ pd = lazy_import_pandas()
503
+ stats = BlockExecStats.builder()
504
+ blocks = [b for b in blocks if b.shape[0] > 0]
505
+ if len(blocks) == 0:
506
+ ret = PandasBlockAccessor._empty_table()
507
+ else:
508
+ # Handle blocks of different types.
509
+ blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas")
510
+ ret = pd.concat(blocks, ignore_index=True)
511
+ columns, ascending = sort_key.to_pandas_sort_args()
512
+ ret = ret.sort_values(by=columns, ascending=ascending)
513
+ return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build())
514
+
515
+ @staticmethod
516
+ def aggregate_combined_blocks(
517
+ blocks: List["pandas.DataFrame"],
518
+ sort_key: "SortKey",
519
+ aggs: Tuple["AggregateFn"],
520
+ finalize: bool,
521
+ ) -> Tuple["pandas.DataFrame", BlockMetadata]:
522
+ """Aggregate sorted, partially combined blocks with the same key range.
523
+
524
+ This assumes blocks are already sorted by key in ascending order,
525
+ so we can do merge sort to get all the rows with the same key.
526
+
527
+ Args:
528
+ blocks: A list of partially combined and sorted blocks.
529
+ sort_key: The column name of key or None for global aggregation.
530
+ aggs: The aggregations to do.
531
+ finalize: Whether to finalize the aggregation. This is used as an
532
+ optimization for cases where we repeatedly combine partially
533
+ aggregated groups.
534
+
535
+ Returns:
536
+ A block of [k, v_1, ..., v_n] columns and its metadata where k is
537
+ the groupby key and v_i is the corresponding aggregation result for
538
+ the ith given aggregation.
539
+ If key is None then the k column is omitted.
540
+ """
541
+
542
+ stats = BlockExecStats.builder()
543
+ keys = sort_key.get_columns()
544
+
545
+ def key_fn(r):
546
+ if keys:
547
+ return tuple(r[keys])
548
+ else:
549
+ return (0,)
550
+
551
+ # Handle blocks of different types.
552
+ blocks = TableBlockAccessor.normalize_block_types(blocks, "pandas")
553
+
554
+ iter = heapq.merge(
555
+ *[
556
+ PandasBlockAccessor(block).iter_rows(public_row_format=False)
557
+ for block in blocks
558
+ ],
559
+ key=key_fn,
560
+ )
561
+ next_row = None
562
+ builder = PandasBlockBuilder()
563
+ while True:
564
+ try:
565
+ if next_row is None:
566
+ next_row = next(iter)
567
+ next_keys = key_fn(next_row)
568
+ next_key_columns = keys
569
+
570
+ def gen():
571
+ nonlocal iter
572
+ nonlocal next_row
573
+ while key_fn(next_row) == next_keys:
574
+ yield next_row
575
+ try:
576
+ next_row = next(iter)
577
+ except StopIteration:
578
+ next_row = None
579
+ break
580
+
581
+ # Merge.
582
+ first = True
583
+ accumulators = [None] * len(aggs)
584
+ resolved_agg_names = [None] * len(aggs)
585
+ for r in gen():
586
+ if first:
587
+ count = collections.defaultdict(int)
588
+ for i in range(len(aggs)):
589
+ name = aggs[i].name
590
+ # Check for conflicts with existing aggregation
591
+ # name.
592
+ if count[name] > 0:
593
+ name = PandasBlockAccessor._munge_conflict(
594
+ name, count[name]
595
+ )
596
+ count[name] += 1
597
+ resolved_agg_names[i] = name
598
+ accumulators[i] = r[name]
599
+ first = False
600
+ else:
601
+ for i in range(len(aggs)):
602
+ accumulators[i] = aggs[i].merge(
603
+ accumulators[i], r[resolved_agg_names[i]]
604
+ )
605
+ # Build the row.
606
+ row = {}
607
+ if keys:
608
+ for col_name, next_key in zip(next_key_columns, next_keys):
609
+ row[col_name] = next_key
610
+
611
+ for agg, agg_name, accumulator in zip(
612
+ aggs, resolved_agg_names, accumulators
613
+ ):
614
+ if finalize:
615
+ row[agg_name] = agg.finalize(accumulator)
616
+ else:
617
+ row[agg_name] = accumulator
618
+
619
+ builder.add(row)
620
+ except StopIteration:
621
+ break
622
+
623
+ ret = builder.build()
624
+ return ret, PandasBlockAccessor(ret).get_metadata(exec_stats=stats.build())
625
+
626
+ def block_type(self) -> BlockType:
627
+ return BlockType.PANDAS
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/plan.py ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import itertools
3
+ import logging
4
+ from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, Type, Union
5
+
6
+ import pyarrow
7
+
8
+ import ray
9
+ from ray._private.internal_api import get_memory_info_reply, get_state_from_address
10
+ from ray.data._internal.execution.interfaces import RefBundle
11
+ from ray.data._internal.logical.interfaces.logical_operator import LogicalOperator
12
+ from ray.data._internal.logical.interfaces.logical_plan import LogicalPlan
13
+ from ray.data._internal.logical.operators.from_operators import AbstractFrom
14
+ from ray.data._internal.logical.operators.input_data_operator import InputData
15
+ from ray.data._internal.logical.operators.read_operator import Read
16
+ from ray.data._internal.stats import DatasetStats
17
+ from ray.data._internal.util import create_dataset_tag, unify_block_metadata_schema
18
+ from ray.data.block import BlockMetadata
19
+ from ray.data.context import DataContext
20
+ from ray.data.exceptions import omit_traceback_stdout
21
+ from ray.util.debug import log_once
22
+
23
+ if TYPE_CHECKING:
24
+
25
+ from ray.data._internal.execution.interfaces import Executor
26
+ from ray.data.dataset import Dataset
27
+
28
+
29
+ # Scheduling strategy can be inherited from prev operator if not specified.
30
+ INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"]
31
+
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ class ExecutionPlan:
37
+ """A lazy execution plan for a Dataset.
38
+
39
+ This lazy execution plan builds up a chain of ``List[RefBundle]`` -->
40
+ ``List[RefBundle]`` operators. Prior to execution, we apply a set of logical
41
+ plan optimizations, such as operator fusion, in order to reduce Ray task
42
+ overhead and data copies.
43
+
44
+ Internally, the execution plan holds a snapshot of a computed list of
45
+ blocks and their associated metadata under ``self._snapshot_bundle``,
46
+ where this snapshot is the cached output of executing the operator chain."""
47
+
48
+ def __init__(
49
+ self,
50
+ stats: DatasetStats,
51
+ *,
52
+ data_context: Optional[DataContext] = None,
53
+ ):
54
+ """Create a plan with no transformation operators.
55
+
56
+ Args:
57
+ stats: Stats for the base blocks.
58
+ data_context: :class:`~ray.data.context.DataContext`
59
+ object to use for execution.
60
+ """
61
+ self._in_stats = stats
62
+ # A computed snapshot of some prefix of operators and their corresponding
63
+ # output blocks and stats.
64
+ self._snapshot_operator: Optional[LogicalOperator] = None
65
+ self._snapshot_stats = None
66
+ self._snapshot_bundle = None
67
+ # Snapshot of only metadata corresponding to the final operator's
68
+ # output bundles, used as the source of truth for the Dataset's schema
69
+ # and count. This is calculated and cached when the plan is executed as an
70
+ # iterator (`execute_to_iterator()`), and avoids caching
71
+ # all of the output blocks in memory like in `self.snapshot_bundle`.
72
+ # TODO(scottjlee): To keep the caching logic consistent, update `execute()`
73
+ # to also store the metadata in `_snapshot_metadata` instead of
74
+ # `_snapshot_bundle`. For example, we could store the blocks in
75
+ # `self._snapshot_blocks` and the metadata in `self._snapshot_metadata`.
76
+ self._snapshot_metadata: Optional[BlockMetadata] = None
77
+
78
+ # Cached schema.
79
+ self._schema = None
80
+ # Set when a Dataset is constructed with this plan
81
+ self._dataset_uuid = None
82
+
83
+ self._dataset_name = None
84
+
85
+ self._has_started_execution = False
86
+
87
+ if data_context is None:
88
+ # Snapshot the current context, so that the config of Datasets is always
89
+ # determined by the config at the time it was created.
90
+ self._context = copy.deepcopy(DataContext.get_current())
91
+ else:
92
+ self._context = data_context
93
+
94
+ def __repr__(self) -> str:
95
+ return (
96
+ f"ExecutionPlan("
97
+ f"dataset_uuid={self._dataset_uuid}, "
98
+ f"snapshot_operator={self._snapshot_operator}"
99
+ f")"
100
+ )
101
+
102
+ def get_plan_as_string(self, dataset_cls: Type["Dataset"]) -> str:
103
+ """Create a cosmetic string representation of this execution plan.
104
+
105
+ Returns:
106
+ The string representation of this execution plan.
107
+ """
108
+ # NOTE: this is used for Dataset.__repr__ to give a user-facing string
109
+ # representation. Ideally ExecutionPlan.__repr__ should be replaced with this
110
+ # method as well.
111
+
112
+ from ray.data.dataset import MaterializedDataset
113
+
114
+ # Do not force execution for schema, as this method is expected to be very
115
+ # cheap.
116
+ plan_str = ""
117
+ plan_max_depth = 0
118
+ if not self.has_computed_output():
119
+
120
+ def generate_logical_plan_string(
121
+ op: LogicalOperator,
122
+ curr_str: str = "",
123
+ depth: int = 0,
124
+ ):
125
+ """Traverse (DFS) the LogicalPlan DAG and
126
+ return a string representation of the operators."""
127
+ if isinstance(op, (Read, InputData, AbstractFrom)):
128
+ return curr_str, depth
129
+
130
+ curr_max_depth = depth
131
+ op_name = op.name
132
+ if depth == 0:
133
+ curr_str += f"{op_name}\n"
134
+ else:
135
+ trailing_space = " " * ((depth - 1) * 3)
136
+ curr_str += f"{trailing_space}+- {op_name}\n"
137
+
138
+ for input in op.input_dependencies:
139
+ curr_str, input_max_depth = generate_logical_plan_string(
140
+ input, curr_str, depth + 1
141
+ )
142
+ curr_max_depth = max(curr_max_depth, input_max_depth)
143
+ return curr_str, curr_max_depth
144
+
145
+ # generate_logical_plan_string(self._logical_plan.dag)
146
+ plan_str, plan_max_depth = generate_logical_plan_string(
147
+ self._logical_plan.dag
148
+ )
149
+
150
+ if self._snapshot_bundle is not None:
151
+ # This plan has executed some but not all operators.
152
+ schema = unify_block_metadata_schema(self._snapshot_bundle.metadata)
153
+ count = self._snapshot_bundle.num_rows()
154
+ elif self._snapshot_metadata is not None:
155
+ schema = self._snapshot_metadata.schema
156
+ count = self._snapshot_metadata.num_rows
157
+ else:
158
+ # This plan hasn't executed any operators.
159
+ sources = self._logical_plan.sources()
160
+ # TODO(@bveeramani): Handle schemas for n-ary operators like `Union`.
161
+ if len(sources) > 1:
162
+ # Multiple sources, cannot determine schema.
163
+ schema = None
164
+ count = None
165
+ else:
166
+ assert len(sources) == 1
167
+ plan = ExecutionPlan(DatasetStats(metadata={}, parent=None))
168
+ plan.link_logical_plan(LogicalPlan(sources[0], plan._context))
169
+ schema = plan.schema()
170
+ count = plan.meta_count()
171
+ else:
172
+ # Get schema of output blocks.
173
+ schema = self.schema(fetch_if_missing=False)
174
+ count = self._snapshot_bundle.num_rows()
175
+
176
+ if schema is None:
177
+ schema_str = "Unknown schema"
178
+ elif isinstance(schema, type):
179
+ schema_str = str(schema)
180
+ else:
181
+ schema_str = []
182
+ for n, t in zip(schema.names, schema.types):
183
+ if hasattr(t, "__name__"):
184
+ t = t.__name__
185
+ schema_str.append(f"{n}: {t}")
186
+ schema_str = ", ".join(schema_str)
187
+ schema_str = "{" + schema_str + "}"
188
+
189
+ if count is None:
190
+ count = "?"
191
+
192
+ num_blocks = None
193
+ if dataset_cls == MaterializedDataset:
194
+ num_blocks = self.initial_num_blocks()
195
+ assert num_blocks is not None
196
+
197
+ name_str = (
198
+ "name={}, ".format(self._dataset_name)
199
+ if self._dataset_name is not None
200
+ else ""
201
+ )
202
+ num_blocks_str = f"num_blocks={num_blocks}, " if num_blocks else ""
203
+
204
+ dataset_str = "{}({}{}num_rows={}, schema={})".format(
205
+ dataset_cls.__name__,
206
+ name_str,
207
+ num_blocks_str,
208
+ count,
209
+ schema_str,
210
+ )
211
+
212
+ # If the resulting string representation fits in one line, use it directly.
213
+ SCHEMA_LINE_CHAR_LIMIT = 80
214
+ MIN_FIELD_LENGTH = 10
215
+ INDENT_STR = " " * 3
216
+ trailing_space = INDENT_STR * plan_max_depth
217
+
218
+ if len(dataset_str) > SCHEMA_LINE_CHAR_LIMIT:
219
+ # If the resulting string representation exceeds the line char limit,
220
+ # first try breaking up each `Dataset` parameter into its own line
221
+ # and check if each line fits within the line limit. We check the
222
+ # `schema` param's length, since this is likely the longest string.
223
+ schema_str_on_new_line = f"{trailing_space}{INDENT_STR}schema={schema_str}"
224
+ if len(schema_str_on_new_line) > SCHEMA_LINE_CHAR_LIMIT:
225
+ # If the schema cannot fit on a single line, break up each field
226
+ # into its own line.
227
+ schema_str = []
228
+ for n, t in zip(schema.names, schema.types):
229
+ if hasattr(t, "__name__"):
230
+ t = t.__name__
231
+ col_str = f"{trailing_space}{INDENT_STR * 2}{n}: {t}"
232
+ # If the field line exceeds the char limit, abbreviate
233
+ # the field name to fit while maintaining the full type
234
+ if len(col_str) > SCHEMA_LINE_CHAR_LIMIT:
235
+ shortened_suffix = f"...: {str(t)}"
236
+ # Show at least 10 characters of the field name, even if
237
+ # we have already hit the line limit with the type.
238
+ chars_left_for_col_name = max(
239
+ SCHEMA_LINE_CHAR_LIMIT - len(shortened_suffix),
240
+ MIN_FIELD_LENGTH,
241
+ )
242
+ col_str = (
243
+ f"{col_str[:chars_left_for_col_name]}{shortened_suffix}"
244
+ )
245
+ schema_str.append(col_str)
246
+ schema_str = ",\n".join(schema_str)
247
+ schema_str = (
248
+ "{\n" + schema_str + f"\n{trailing_space}{INDENT_STR}" + "}"
249
+ )
250
+ name_str = (
251
+ f"\n{trailing_space}{INDENT_STR}name={self._dataset_name},"
252
+ if self._dataset_name is not None
253
+ else ""
254
+ )
255
+ num_blocks_str = (
256
+ f"\n{trailing_space}{INDENT_STR}num_blocks={num_blocks},"
257
+ if num_blocks
258
+ else ""
259
+ )
260
+ dataset_str = (
261
+ f"{dataset_cls.__name__}("
262
+ f"{name_str}"
263
+ f"{num_blocks_str}"
264
+ f"\n{trailing_space}{INDENT_STR}num_rows={count},"
265
+ f"\n{trailing_space}{INDENT_STR}schema={schema_str}"
266
+ f"\n{trailing_space})"
267
+ )
268
+
269
+ if plan_max_depth == 0:
270
+ plan_str += dataset_str
271
+ else:
272
+ plan_str += f"{INDENT_STR * (plan_max_depth - 1)}+- {dataset_str}"
273
+ return plan_str
274
+
275
+ def link_logical_plan(self, logical_plan: "LogicalPlan"):
276
+ """Link the logical plan into this execution plan.
277
+
278
+ This is used for triggering execution for optimizer code path in this legacy
279
+ execution plan.
280
+ """
281
+ self._logical_plan = logical_plan
282
+ self._logical_plan._context = self._context
283
+
284
+ def copy(self) -> "ExecutionPlan":
285
+ """Create a shallow copy of this execution plan.
286
+
287
+ This copy can be executed without mutating the original, but clearing the copy
288
+ will also clear the original.
289
+
290
+ Returns:
291
+ A shallow copy of this execution plan.
292
+ """
293
+ plan_copy = ExecutionPlan(
294
+ self._in_stats,
295
+ data_context=self._context,
296
+ )
297
+ if self._snapshot_bundle is not None:
298
+ # Copy over the existing snapshot.
299
+ plan_copy._snapshot_bundle = self._snapshot_bundle
300
+ plan_copy._snapshot_operator = self._snapshot_operator
301
+ plan_copy._snapshot_stats = self._snapshot_stats
302
+ plan_copy._dataset_name = self._dataset_name
303
+ return plan_copy
304
+
305
+ def deep_copy(self) -> "ExecutionPlan":
306
+ """Create a deep copy of this execution plan.
307
+
308
+ This copy can be executed AND cleared without mutating the original.
309
+
310
+ Returns:
311
+ A deep copy of this execution plan.
312
+ """
313
+ plan_copy = ExecutionPlan(copy.copy(self._in_stats))
314
+ if self._snapshot_bundle:
315
+ # Copy over the existing snapshot.
316
+ plan_copy._snapshot_bundle = copy.copy(self._snapshot_bundle)
317
+ plan_copy._snapshot_operator = copy.copy(self._snapshot_operator)
318
+ plan_copy._snapshot_stats = copy.copy(self._snapshot_stats)
319
+ plan_copy._dataset_name = self._dataset_name
320
+ return plan_copy
321
+
322
+ def initial_num_blocks(self) -> Optional[int]:
323
+ """Get the estimated number of blocks from the logical plan
324
+ after applying execution plan optimizations, but prior to
325
+ fully executing the dataset."""
326
+ return self._logical_plan.dag.estimated_num_outputs()
327
+
328
+ def schema(
329
+ self, fetch_if_missing: bool = False
330
+ ) -> Union[type, "pyarrow.lib.Schema"]:
331
+ """Get the schema after applying all execution plan optimizations,
332
+ but prior to fully executing the dataset
333
+ (unless `fetch_if_missing` is set to True).
334
+
335
+ Args:
336
+ fetch_if_missing: Whether to execute the plan to fetch the schema.
337
+
338
+ Returns:
339
+ The schema of the output dataset.
340
+ """
341
+ if self._schema is not None:
342
+ return self._schema
343
+
344
+ schema = None
345
+ if self.has_computed_output():
346
+ schema = unify_block_metadata_schema(self._snapshot_bundle.metadata)
347
+ elif self._logical_plan.dag.aggregate_output_metadata().schema is not None:
348
+ schema = self._logical_plan.dag.aggregate_output_metadata().schema
349
+ elif fetch_if_missing:
350
+ iter_ref_bundles, _, _ = self.execute_to_iterator()
351
+ for ref_bundle in iter_ref_bundles:
352
+ for metadata in ref_bundle.metadata:
353
+ if metadata.schema is not None and (
354
+ metadata.num_rows is None or metadata.num_rows > 0
355
+ ):
356
+ schema = metadata.schema
357
+ break
358
+ elif self.is_read_only():
359
+ # For consistency with the previous implementation, we fetch the schema if
360
+ # the plan is read-only even if `fetch_if_missing` is False.
361
+ iter_ref_bundles, _, _ = self.execute_to_iterator()
362
+ try:
363
+ ref_bundle = next(iter(iter_ref_bundles))
364
+ for metadata in ref_bundle.metadata:
365
+ if metadata.schema is not None:
366
+ schema = metadata.schema
367
+ break
368
+ except StopIteration: # Empty dataset.
369
+ schema = None
370
+
371
+ self._schema = schema
372
+ return self._schema
373
+
374
+ def cache_schema(self, schema: Union[type, "pyarrow.lib.Schema"]):
375
+ self._schema = schema
376
+
377
+ def input_files(self) -> Optional[List[str]]:
378
+ """Get the input files of the dataset, if available."""
379
+ return self._logical_plan.dag.aggregate_output_metadata().input_files
380
+
381
+ def meta_count(self) -> Optional[int]:
382
+ """Get the number of rows after applying all plan optimizations, if possible.
383
+
384
+ This method will never trigger any computation.
385
+
386
+ Returns:
387
+ The number of records of the result Dataset, or None.
388
+ """
389
+ if self.has_computed_output():
390
+ num_rows = sum(m.num_rows for m in self._snapshot_bundle.metadata)
391
+ elif self._logical_plan.dag.aggregate_output_metadata().num_rows is not None:
392
+ num_rows = self._logical_plan.dag.aggregate_output_metadata().num_rows
393
+ else:
394
+ num_rows = None
395
+ return num_rows
396
+
397
+ @omit_traceback_stdout
398
+ def execute_to_iterator(
399
+ self,
400
+ ) -> Tuple[Iterator[RefBundle], DatasetStats, Optional["Executor"]]:
401
+ """Execute this plan, returning an iterator.
402
+
403
+ This will use streaming execution to generate outputs.
404
+
405
+ Returns:
406
+ Tuple of iterator over output RefBundles, DatasetStats, and the executor.
407
+ """
408
+ self._has_started_execution = True
409
+
410
+ # Always used the saved context for execution.
411
+ ctx = self._context
412
+
413
+ if self.has_computed_output():
414
+ bundle = self.execute()
415
+ return iter([bundle]), self._snapshot_stats, None
416
+
417
+ from ray.data._internal.execution.legacy_compat import (
418
+ execute_to_legacy_bundle_iterator,
419
+ )
420
+ from ray.data._internal.execution.streaming_executor import StreamingExecutor
421
+
422
+ metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid)
423
+ executor = StreamingExecutor(copy.deepcopy(ctx.execution_options), metrics_tag)
424
+ bundle_iter = execute_to_legacy_bundle_iterator(executor, self)
425
+ # Since the generator doesn't run any code until we try to fetch the first
426
+ # value, force execution of one bundle before we call get_stats().
427
+ gen = iter(bundle_iter)
428
+ try:
429
+ bundle_iter = itertools.chain([next(gen)], gen)
430
+ except StopIteration:
431
+ pass
432
+ self._snapshot_stats = executor.get_stats()
433
+ return bundle_iter, self._snapshot_stats, executor
434
+
435
+ @omit_traceback_stdout
436
+ def execute(
437
+ self,
438
+ preserve_order: bool = False,
439
+ ) -> RefBundle:
440
+ """Execute this plan.
441
+
442
+ Args:
443
+ preserve_order: Whether to preserve order in execution.
444
+
445
+ Returns:
446
+ The blocks of the output dataset.
447
+ """
448
+ self._has_started_execution = True
449
+
450
+ # Always used the saved context for execution.
451
+ context = self._context
452
+
453
+ if not ray.available_resources().get("CPU"):
454
+ if log_once("cpu_warning"):
455
+ logger.warning(
456
+ "Warning: The Ray cluster currently does not have "
457
+ "any available CPUs. The Dataset job will hang unless more CPUs "
458
+ "are freed up. A common reason is that cluster resources are "
459
+ "used by Actors or Tune trials; see the following link "
460
+ "for more details: "
461
+ "https://docs.ray.io/en/latest/data/data-internals.html#ray-data-and-tune" # noqa: E501
462
+ )
463
+ if not self.has_computed_output():
464
+ from ray.data._internal.execution.legacy_compat import (
465
+ _get_initial_stats_from_plan,
466
+ execute_to_legacy_block_list,
467
+ )
468
+
469
+ if self._logical_plan.dag.output_data() is not None:
470
+ # If the data is already materialized (e.g., `from_pandas`), we can
471
+ # skip execution and directly return the output data. This avoids
472
+ # recording unnecessary metrics for an empty plan execution.
473
+ stats = _get_initial_stats_from_plan(self)
474
+
475
+ # TODO(@bveeramani): Make `ExecutionPlan.execute()` return
476
+ # `List[RefBundle]` instead of `RefBundle`. Among other reasons, it'd
477
+ # allow us to remove the unwrapping logic below.
478
+ output_bundles = self._logical_plan.dag.output_data()
479
+ owns_blocks = all(bundle.owns_blocks for bundle in output_bundles)
480
+ bundle = RefBundle(
481
+ [
482
+ (block, metadata)
483
+ for bundle in output_bundles
484
+ for block, metadata in bundle.blocks
485
+ ],
486
+ owns_blocks=owns_blocks,
487
+ )
488
+ else:
489
+ from ray.data._internal.execution.streaming_executor import (
490
+ StreamingExecutor,
491
+ )
492
+
493
+ metrics_tag = create_dataset_tag(self._dataset_name, self._dataset_uuid)
494
+ executor = StreamingExecutor(
495
+ copy.deepcopy(context.execution_options),
496
+ metrics_tag,
497
+ )
498
+ blocks = execute_to_legacy_block_list(
499
+ executor,
500
+ self,
501
+ dataset_uuid=self._dataset_uuid,
502
+ preserve_order=preserve_order,
503
+ )
504
+ bundle = RefBundle(
505
+ tuple(blocks.iter_blocks_with_metadata()),
506
+ owns_blocks=blocks._owned_by_consumer,
507
+ )
508
+ stats = executor.get_stats()
509
+ stats_summary_string = stats.to_summary().to_string(
510
+ include_parent=False
511
+ )
512
+ if context.enable_auto_log_stats:
513
+ logger.info(stats_summary_string)
514
+
515
+ # Retrieve memory-related stats from ray.
516
+ try:
517
+ reply = get_memory_info_reply(
518
+ get_state_from_address(ray.get_runtime_context().gcs_address)
519
+ )
520
+ if reply.store_stats.spill_time_total_s > 0:
521
+ stats.global_bytes_spilled = int(
522
+ reply.store_stats.spilled_bytes_total
523
+ )
524
+ if reply.store_stats.restore_time_total_s > 0:
525
+ stats.global_bytes_restored = int(
526
+ reply.store_stats.restored_bytes_total
527
+ )
528
+ except Exception as e:
529
+ logger.debug(
530
+ "Skipping recording memory spilled and restored statistics due to "
531
+ f"exception: {e}"
532
+ )
533
+
534
+ stats.dataset_bytes_spilled = 0
535
+
536
+ def collect_stats(cur_stats):
537
+ stats.dataset_bytes_spilled += cur_stats.extra_metrics.get(
538
+ "obj_store_mem_spilled", 0
539
+ )
540
+ for parent in cur_stats.parents:
541
+ collect_stats(parent)
542
+
543
+ collect_stats(stats)
544
+
545
+ # Set the snapshot to the output of the final operator.
546
+ self._snapshot_bundle = bundle
547
+ self._snapshot_operator = self._logical_plan.dag
548
+ self._snapshot_stats = stats
549
+ self._snapshot_stats.dataset_uuid = self._dataset_uuid
550
+
551
+ return self._snapshot_bundle
552
+
553
+ @property
554
+ def has_started_execution(self) -> bool:
555
+ """Return ``True`` if this plan has been partially or fully executed."""
556
+ return self._has_started_execution
557
+
558
+ def clear_snapshot(self) -> None:
559
+ """Clear the snapshot kept in the plan to the beginning state."""
560
+ self._snapshot_bundle = None
561
+ self._snapshot_operator = None
562
+ self._snapshot_stats = None
563
+
564
+ def stats(self) -> DatasetStats:
565
+ """Return stats for this plan.
566
+
567
+ If the plan isn't executed, an empty stats object will be returned.
568
+ """
569
+ if not self._snapshot_stats:
570
+ return DatasetStats(metadata={}, parent=None)
571
+ return self._snapshot_stats
572
+
573
+ def has_lazy_input(self) -> bool:
574
+ """Return whether this plan has lazy input blocks."""
575
+ return all(isinstance(op, Read) for op in self._logical_plan.sources())
576
+
577
+ def is_read_only(self, root_op: Optional[LogicalOperator] = None) -> bool:
578
+ """Return whether the LogicalPlan corresponding to `root_op`
579
+ contains only a Read op. By default, the last operator of
580
+ the LogicalPlan is used."""
581
+ if root_op is None:
582
+ root_op = self._logical_plan.dag
583
+ return isinstance(root_op, Read) and len(root_op.input_dependencies) == 0
584
+
585
+ def has_computed_output(self) -> bool:
586
+ """Whether this plan has a computed snapshot for the final operator, i.e. for
587
+ the output of this plan.
588
+ """
589
+ return (
590
+ self._snapshot_bundle is not None
591
+ and self._snapshot_operator == self._logical_plan.dag
592
+ )
593
+
594
+ def require_preserve_order(self) -> bool:
595
+ """Whether this plan requires to preserve order."""
596
+ from ray.data._internal.logical.operators.all_to_all_operator import Sort
597
+ from ray.data._internal.logical.operators.n_ary_operator import Zip
598
+
599
+ for op in self._logical_plan.dag.post_order_iter():
600
+ if isinstance(op, (Zip, Sort)):
601
+ return True
602
+ return False
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/row.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Mapping
2
+ from typing import Any
3
+
4
+
5
+ class TableRow(Mapping):
6
+ """
7
+ A dict-like row of a tabular ``Dataset``.
8
+
9
+ This implements the dictionary mapping interface, but provides more
10
+ efficient access with less data copying than converting Arrow Tables
11
+ or Pandas DataFrames into per-row dicts. This class must be subclassed,
12
+ with subclasses implementing ``__getitem__``, ``__iter__``, and ``__len__``.
13
+
14
+ Concrete subclasses include ``ray.data._internal.arrow_block.ArrowRow`` and
15
+ ``ray.data._internal.pandas_block.PandasRow``.
16
+ """
17
+
18
+ def __init__(self, row: Any):
19
+ """
20
+ Construct a ``TableRow`` (internal API).
21
+
22
+ Args:
23
+ row: The tabular row that backs this row mapping.
24
+ """
25
+ self._row = row
26
+
27
+ def as_pydict(self) -> dict:
28
+ """
29
+ Convert to a normal Python dict. This will create a new copy of the row."""
30
+ return dict(self.items())
31
+
32
+ def __str__(self):
33
+ return str(self.as_pydict())
34
+
35
+ def __repr__(self):
36
+ return str(self)
37
+
38
+ def _repr_pretty_(self, p, cycle):
39
+ from IPython.lib.pretty import _dict_pprinter_factory
40
+
41
+ pprinter = _dict_pprinter_factory("{", "}")
42
+ return pprinter(self, p, cycle)
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/split.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import logging
3
+ from typing import Iterable, List, Tuple, Union
4
+
5
+ import ray
6
+ from ray.data._internal.memory_tracing import trace_deallocation
7
+ from ray.data._internal.remote_fn import cached_remote_fn
8
+ from ray.data.block import (
9
+ Block,
10
+ BlockAccessor,
11
+ BlockExecStats,
12
+ BlockMetadata,
13
+ BlockPartition,
14
+ )
15
+ from ray.types import ObjectRef
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ def _calculate_blocks_rows(
21
+ blocks_with_metadata: BlockPartition,
22
+ ) -> List[int]:
23
+ """Calculate the number of rows for a list of blocks with metadata."""
24
+ get_num_rows = cached_remote_fn(_get_num_rows)
25
+ block_rows = []
26
+ for block, metadata in blocks_with_metadata:
27
+ if metadata.num_rows is None:
28
+ # Need to fetch number of rows.
29
+ num_rows = ray.get(get_num_rows.remote(block))
30
+ metadata.num_rows = num_rows
31
+ else:
32
+ num_rows = metadata.num_rows
33
+ block_rows.append(num_rows)
34
+ return block_rows
35
+
36
+
37
+ def _generate_valid_indices(
38
+ num_rows_per_block: List[int],
39
+ split_indices: List[int],
40
+ ) -> List[int]:
41
+ """Generate valid split indices by apply min(index, total_num_rows)
42
+ to every index."""
43
+ total_rows = sum(num_rows_per_block)
44
+ return [min(index, total_rows) for index in split_indices]
45
+
46
+
47
+ def _generate_per_block_split_indices(
48
+ num_rows_per_block: List[int],
49
+ split_indices: List[int],
50
+ ) -> List[List[int]]:
51
+ """Given num rows per block and valid split indices, generate per block split indices.
52
+
53
+ Args:
54
+ num_rows_per_block: num of rows per block.
55
+ split_indices: The (global) indices at which to split the blocks.
56
+ Returns:
57
+ Per block split indices indicates each input block's split point(s).
58
+ """
59
+ # for each split index, we iterate though the currnet input block
60
+ # to see if the index falls into this block. if the index
61
+ # falls into this block, we push it back to the current block's
62
+ # split indices. Otherwise, we move on to the next block.
63
+ per_block_split_indices = []
64
+ current_input_block_id = 0
65
+ current_block_split_indices = []
66
+ current_block_global_offset = 0
67
+ current_index_id = 0
68
+
69
+ while current_index_id < len(split_indices):
70
+ split_index = split_indices[current_index_id]
71
+ current_block_row = num_rows_per_block[current_input_block_id]
72
+ if split_index - current_block_global_offset <= current_block_row:
73
+ current_block_split_indices.append(
74
+ split_index - current_block_global_offset
75
+ )
76
+ current_index_id += 1
77
+ continue
78
+ per_block_split_indices.append(current_block_split_indices)
79
+ current_block_split_indices = []
80
+ current_block_global_offset += num_rows_per_block[current_input_block_id]
81
+ current_input_block_id += 1
82
+
83
+ # we might finished all the indices but there are still blocks left, also
84
+ # current_block_split_indices might not be added yet.
85
+ while len(per_block_split_indices) < len(num_rows_per_block):
86
+ per_block_split_indices.append(current_block_split_indices)
87
+ current_block_split_indices = []
88
+ return per_block_split_indices
89
+
90
+
91
+ def _split_single_block(
92
+ block_id: int,
93
+ block: Block,
94
+ meta: BlockMetadata,
95
+ split_indices: List[int],
96
+ ) -> Tuple[Union[Tuple[int, List[BlockMetadata]], Block], ...]:
97
+ """Split the provided block at the given indices.
98
+
99
+ Args:
100
+ block_id: the id of this block in the block list.
101
+ block: block to be split.
102
+ meta: metadata of the block, we expect meta.num is valid.
103
+ split_indices: the indices where the block should be split.
104
+ Returns:
105
+ returns block_id, split blocks metadata, and a list of blocks
106
+ in the following form. We return blocks in this way
107
+ so that the owner of blocks could be the caller(driver)
108
+ instead of worker itself.
109
+ Tuple(block_id, split_blocks_meta), block0, block1 ...
110
+ """
111
+ split_meta = []
112
+ split_blocks = []
113
+ block_accessor = BlockAccessor.for_block(block)
114
+ prev_index = 0
115
+ # append one more entry at the last so we don't
116
+ # need handle empty edge case.
117
+ split_indices.append(meta.num_rows)
118
+ for index in split_indices:
119
+ logger.debug(f"slicing block {prev_index}:{index}")
120
+ stats = BlockExecStats.builder()
121
+ split_block = block_accessor.slice(prev_index, index)
122
+ accessor = BlockAccessor.for_block(split_block)
123
+ _meta = BlockMetadata(
124
+ num_rows=accessor.num_rows(),
125
+ size_bytes=accessor.size_bytes(),
126
+ schema=meta.schema,
127
+ input_files=meta.input_files,
128
+ exec_stats=stats.build(),
129
+ )
130
+ split_meta.append(_meta)
131
+ split_blocks.append(split_block)
132
+ prev_index = index
133
+ results = [(block_id, split_meta)]
134
+ results.extend(split_blocks)
135
+ return tuple(results)
136
+
137
+
138
+ def _drop_empty_block_split(block_split_indices: List[int], num_rows: int) -> List[int]:
139
+ """drop split indices that creates empty block split. This could happen when there
140
+ are duplicated indices, or index equal to 0 (start of the block) or num_block_rows
141
+ (end of the block).
142
+ """
143
+ prev_index = -1
144
+ optimized_indices = []
145
+ for index in block_split_indices:
146
+ if index == 0 or index == num_rows:
147
+ continue
148
+ if index == prev_index:
149
+ continue
150
+ optimized_indices.append(index)
151
+ prev_index = index
152
+ return optimized_indices
153
+
154
+
155
+ def _split_all_blocks(
156
+ blocks_with_metadata: List[Tuple[ObjectRef[Block], BlockMetadata]],
157
+ per_block_split_indices: List[List[int]],
158
+ owned_by_consumer: bool,
159
+ ) -> Iterable[Tuple[ObjectRef[Block], BlockMetadata]]:
160
+ """Split all the input blocks based on the split indices"""
161
+ split_single_block = cached_remote_fn(_split_single_block)
162
+
163
+ all_blocks_split_results: List[BlockPartition] = [None] * len(blocks_with_metadata)
164
+
165
+ per_block_split_metadata_futures = []
166
+ per_block_split_block_refs = []
167
+
168
+ # tracking splitted blocks for gc.
169
+ blocks_splitted = []
170
+ for block_id, block_split_indices in enumerate(per_block_split_indices):
171
+ (block_ref, meta) = blocks_with_metadata[block_id]
172
+ block_row = meta.num_rows
173
+ block_split_indices = _drop_empty_block_split(block_split_indices, block_row)
174
+ if len(block_split_indices) == 0:
175
+ # optimization: if no split is needed, we just need to add it to the
176
+ # result
177
+ all_blocks_split_results[block_id] = [(block_ref, meta)]
178
+ else:
179
+ # otherwise call split remote function.
180
+ object_refs = split_single_block.options(
181
+ scheduling_strategy="SPREAD", num_returns=2 + len(block_split_indices)
182
+ ).remote(
183
+ block_id,
184
+ block_ref,
185
+ meta,
186
+ block_split_indices,
187
+ )
188
+ per_block_split_metadata_futures.append(object_refs[0])
189
+ per_block_split_block_refs.append(object_refs[1:])
190
+
191
+ blocks_splitted.append(block_ref)
192
+
193
+ if per_block_split_metadata_futures:
194
+ # only get metadata.
195
+ per_block_split_metadata = ray.get(per_block_split_metadata_futures)
196
+ for (block_id, meta), block_refs in zip(
197
+ per_block_split_metadata, per_block_split_block_refs
198
+ ):
199
+ assert len(meta) == len(block_refs)
200
+ all_blocks_split_results[block_id] = zip(block_refs, meta)
201
+
202
+ # We make a copy for the blocks that have been splitted, so the input blocks
203
+ # can be cleared if they are owned by consumer (consumer-owned blocks will
204
+ # only be consumed by the owner).
205
+ if owned_by_consumer:
206
+ for b in blocks_splitted:
207
+ trace_deallocation(b, "split._split_all_blocks")
208
+ else:
209
+ for b in blocks_splitted:
210
+ trace_deallocation(b, "split._split_all_blocks", free=False)
211
+
212
+ return itertools.chain.from_iterable(all_blocks_split_results)
213
+
214
+
215
+ def _generate_global_split_results(
216
+ all_blocks_split_results: Iterable[Tuple[ObjectRef[Block], BlockMetadata]],
217
+ global_split_sizes: List[int],
218
+ ) -> Tuple[List[List[ObjectRef[Block]]], List[List[BlockMetadata]]]:
219
+ """Reassemble per block's split result into final split result."""
220
+ result_blocks = []
221
+ result_metas = []
222
+
223
+ current_blocks = []
224
+ current_meta = []
225
+ current_split_size = 0
226
+ current_split_id = 0
227
+
228
+ while current_split_id < len(global_split_sizes):
229
+ if current_split_size >= global_split_sizes[current_split_id]:
230
+ assert current_split_size == global_split_sizes[current_split_id]
231
+ result_blocks.append(current_blocks)
232
+ result_metas.append(current_meta)
233
+
234
+ current_blocks = []
235
+ current_meta = []
236
+ current_split_size = 0
237
+ current_split_id += 1
238
+ else:
239
+ (block_ref, meta) = next(all_blocks_split_results)
240
+ current_blocks.append(block_ref)
241
+ current_meta.append(meta)
242
+ current_split_size += meta.num_rows
243
+
244
+ return result_blocks, result_metas
245
+
246
+
247
+ def _split_at_indices(
248
+ blocks_with_metadata: List[Tuple[ObjectRef[Block], BlockMetadata]],
249
+ indices: List[int],
250
+ owned_by_consumer: bool = True,
251
+ block_rows: List[int] = None,
252
+ ) -> Tuple[List[List[ObjectRef[Block]]], List[List[BlockMetadata]]]:
253
+ """Split blocks at the provided indices.
254
+
255
+ Args:
256
+ blocks_with_metadata: Block futures to split, including the associated metadata.
257
+ indices: The (global) indices at which to split the blocks.
258
+ owned_by_consumer: Whether the provided blocks are owned by the consumer.
259
+ block_rows: The number of rows for each block, in case it has already been
260
+ computed.
261
+
262
+ Returns:
263
+ The block split futures and their metadata. If an index split is empty, the
264
+ corresponding block split will be empty .
265
+ """
266
+
267
+ # We implement the split in 3 phases.
268
+ # phase 1: calculate the per block split indices.
269
+ blocks_with_metadata = list(blocks_with_metadata)
270
+ if len(blocks_with_metadata) == 0:
271
+ return ([[]] * (len(indices) + 1), [[]] * (len(indices) + 1))
272
+ if block_rows is None:
273
+ block_rows = _calculate_blocks_rows(blocks_with_metadata)
274
+ valid_indices = _generate_valid_indices(block_rows, indices)
275
+ per_block_split_indices: List[List[int]] = _generate_per_block_split_indices(
276
+ block_rows, valid_indices
277
+ )
278
+
279
+ # phase 2: split each block based on the indices from previous step.
280
+ all_blocks_split_results: Iterable[
281
+ Tuple[ObjectRef[Block], BlockMetadata]
282
+ ] = _split_all_blocks(
283
+ blocks_with_metadata, per_block_split_indices, owned_by_consumer
284
+ )
285
+
286
+ # phase 3: generate the final split.
287
+
288
+ # first calculate the size for each split.
289
+ helper = [0] + valid_indices + [sum(block_rows)]
290
+ split_sizes = [helper[i] - helper[i - 1] for i in range(1, len(helper))]
291
+
292
+ return _generate_global_split_results(all_blocks_split_results, split_sizes)
293
+
294
+
295
+ def _get_num_rows(block: Block) -> int:
296
+ """Get the number of rows contained in the provided block."""
297
+ return BlockAccessor.for_block(block).num_rows()
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/stats.py ADDED
@@ -0,0 +1,1495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import threading
4
+ import time
5
+ from contextlib import contextmanager
6
+ from dataclasses import dataclass
7
+ from typing import Any, Dict, List, Optional, Set, Tuple, Union
8
+ from uuid import uuid4
9
+
10
+ import numpy as np
11
+
12
+ import ray
13
+ from ray.actor import ActorHandle
14
+ from ray.data._internal.block_list import BlockList
15
+ from ray.data._internal.execution.interfaces.op_runtime_metrics import (
16
+ MetricsGroup,
17
+ OpRuntimeMetrics,
18
+ )
19
+ from ray.data._internal.util import capfirst
20
+ from ray.data.block import BlockMetadata
21
+ from ray.data.context import DataContext
22
+ from ray.util.annotations import DeveloperAPI
23
+ from ray.util.metrics import Gauge
24
+ from ray.util.scheduling_strategies import NodeAffinitySchedulingStrategy
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+ STATS_ACTOR_NAME = "datasets_stats_actor"
29
+ STATS_ACTOR_NAMESPACE = "_dataset_stats_actor"
30
+
31
+
32
+ StatsDict = Dict[str, List[BlockMetadata]]
33
+
34
+
35
+ def fmt(seconds: float) -> str:
36
+ if seconds > 1:
37
+ return str(round(seconds, 2)) + "s"
38
+ elif seconds > 0.001:
39
+ return str(round(seconds * 1000, 2)) + "ms"
40
+ else:
41
+ return str(round(seconds * 1000 * 1000, 2)) + "us"
42
+
43
+
44
+ def leveled_indent(lvl: int = 0, spaces_per_indent: int = 3) -> str:
45
+ """Returns a string of spaces which contains `level` indents,
46
+ each indent containing `spaces_per_indent` spaces. For example:
47
+ >>> leveled_indent(2, 3)
48
+ ' '
49
+ """
50
+ return (" " * spaces_per_indent) * lvl
51
+
52
+
53
+ class Timer:
54
+ """Helper class for tracking accumulated time (in seconds)."""
55
+
56
+ def __init__(self):
57
+ self._value: float = 0
58
+ self._min: float = float("inf")
59
+ self._max: float = 0
60
+ self._total_count: float = 0
61
+
62
+ @contextmanager
63
+ def timer(self) -> None:
64
+ time_start = time.perf_counter()
65
+ try:
66
+ yield
67
+ finally:
68
+ self.add(time.perf_counter() - time_start)
69
+
70
+ def add(self, value: float) -> None:
71
+ self._value += value
72
+ if value < self._min:
73
+ self._min = value
74
+ if value > self._max:
75
+ self._max = value
76
+ self._total_count += 1
77
+
78
+ def get(self) -> float:
79
+ return self._value
80
+
81
+ def min(self) -> float:
82
+ return self._min
83
+
84
+ def max(self) -> float:
85
+ return self._max
86
+
87
+ def avg(self) -> float:
88
+ return self._value / self._total_count if self._total_count else float("inf")
89
+
90
+
91
+ class _DatasetStatsBuilder:
92
+ """Helper class for building dataset stats.
93
+
94
+ When this class is created, we record the start time. When build() is
95
+ called with the final blocks of the new dataset, the time delta is
96
+ saved as part of the stats."""
97
+
98
+ def __init__(
99
+ self,
100
+ operator_name: str,
101
+ parent: "DatasetStats",
102
+ override_start_time: Optional[float],
103
+ ):
104
+ self.operator_name = operator_name
105
+ self.parent = parent
106
+ self.start_time = override_start_time or time.perf_counter()
107
+
108
+ def build_multioperator(self, metadata: StatsDict) -> "DatasetStats":
109
+ op_metadata = {}
110
+ for i, (k, v) in enumerate(metadata.items()):
111
+ capped_k = capfirst(k)
112
+ if len(metadata) > 1:
113
+ if i == 0:
114
+ op_metadata[self.operator_name + capped_k] = v
115
+ else:
116
+ op_metadata[self.operator_name.split("->")[-1] + capped_k] = v
117
+ else:
118
+ op_metadata[self.operator_name] = v
119
+ stats = DatasetStats(
120
+ metadata=op_metadata,
121
+ parent=self.parent,
122
+ base_name=self.operator_name,
123
+ )
124
+ stats.time_total_s = time.perf_counter() - self.start_time
125
+ return stats
126
+
127
+ def build(self, final_blocks: BlockList) -> "DatasetStats":
128
+ stats = DatasetStats(
129
+ metadata={self.operator_name: final_blocks.get_metadata()},
130
+ parent=self.parent,
131
+ )
132
+ stats.time_total_s = time.perf_counter() - self.start_time
133
+ return stats
134
+
135
+
136
+ @ray.remote(num_cpus=0)
137
+ class _StatsActor:
138
+ """Actor holding stats for blocks created by LazyBlockList.
139
+
140
+ This actor is shared across all datasets created in the same cluster.
141
+ In order to cap memory usage, we set a max number of stats to keep
142
+ in the actor. When this limit is exceeded, the stats will be garbage
143
+ collected in FIFO order.
144
+
145
+ TODO(ekl) we should consider refactoring LazyBlockList so stats can be
146
+ extracted without using an out-of-band actor."""
147
+
148
+ def __init__(self, max_stats=1000):
149
+ # Mapping from uuid -> (task_id -> list of blocks statistics).
150
+ self.metadata = collections.defaultdict(dict)
151
+ self.last_time = {}
152
+ self.start_time = {}
153
+ self.max_stats = max_stats
154
+ self.fifo_queue = []
155
+
156
+ # Assign dataset uuids with a global counter.
157
+ self.next_dataset_id = 0
158
+ # Dataset metadata to be queried directly by DashboardHead api.
159
+ self.datasets: Dict[str, Any] = {}
160
+
161
+ # Ray Data dashboard metrics
162
+ # Everything is a gauge because we need to reset all of
163
+ # a dataset's metrics to 0 after each finishes execution.
164
+ op_tags_keys = ("dataset", "operator")
165
+
166
+ # TODO(scottjlee): move these overvie metrics as fields in a
167
+ # separate dataclass, similar to OpRuntimeMetrics.
168
+ self.spilled_bytes = Gauge(
169
+ "data_spilled_bytes",
170
+ description="""Bytes spilled by dataset operators.
171
+ DataContext.enable_get_object_locations_for_metrics
172
+ must be set to True to report this metric""",
173
+ tag_keys=op_tags_keys,
174
+ )
175
+ self.allocated_bytes = Gauge(
176
+ "data_allocated_bytes",
177
+ description="Bytes allocated by dataset operators",
178
+ tag_keys=op_tags_keys,
179
+ )
180
+ self.freed_bytes = Gauge(
181
+ "data_freed_bytes",
182
+ description="Bytes freed by dataset operators",
183
+ tag_keys=op_tags_keys,
184
+ )
185
+ self.current_bytes = Gauge(
186
+ "data_current_bytes",
187
+ description="Bytes currently in memory store used by dataset operators",
188
+ tag_keys=op_tags_keys,
189
+ )
190
+ self.cpu_usage_cores = Gauge(
191
+ "data_cpu_usage_cores",
192
+ description="CPUs allocated to dataset operators",
193
+ tag_keys=op_tags_keys,
194
+ )
195
+ self.gpu_usage_cores = Gauge(
196
+ "data_gpu_usage_cores",
197
+ description="GPUs allocated to dataset operators",
198
+ tag_keys=op_tags_keys,
199
+ )
200
+ self.output_bytes = Gauge(
201
+ "data_output_bytes",
202
+ description="Bytes outputted by dataset operators",
203
+ tag_keys=op_tags_keys,
204
+ )
205
+ self.output_rows = Gauge(
206
+ "data_output_rows",
207
+ description="Rows outputted by dataset operators",
208
+ tag_keys=op_tags_keys,
209
+ )
210
+
211
+ # === Metrics from OpRuntimeMetrics ===
212
+ # Inputs-related metrics
213
+ self.execution_metrics_inputs = (
214
+ self._create_prometheus_metrics_for_execution_metrics(
215
+ metrics_group=MetricsGroup.INPUTS,
216
+ tag_keys=op_tags_keys,
217
+ )
218
+ )
219
+
220
+ # Outputs-related metrics
221
+ self.execution_metrics_outputs = (
222
+ self._create_prometheus_metrics_for_execution_metrics(
223
+ metrics_group=MetricsGroup.OUTPUTS,
224
+ tag_keys=op_tags_keys,
225
+ )
226
+ )
227
+
228
+ # Task-related metrics
229
+ self.execution_metrics_tasks = (
230
+ self._create_prometheus_metrics_for_execution_metrics(
231
+ metrics_group=MetricsGroup.TASKS,
232
+ tag_keys=op_tags_keys,
233
+ )
234
+ )
235
+
236
+ # Object store memory-related metrics
237
+ self.execution_metrics_obj_store_memory = (
238
+ self._create_prometheus_metrics_for_execution_metrics(
239
+ metrics_group=MetricsGroup.OBJECT_STORE_MEMORY,
240
+ tag_keys=op_tags_keys,
241
+ )
242
+ )
243
+
244
+ # Miscellaneous metrics
245
+ self.execution_metrics_misc = (
246
+ self._create_prometheus_metrics_for_execution_metrics(
247
+ metrics_group=MetricsGroup.MISC,
248
+ tag_keys=op_tags_keys,
249
+ )
250
+ )
251
+
252
+ iter_tag_keys = ("dataset",)
253
+ self.iter_total_blocked_s = Gauge(
254
+ "data_iter_total_blocked_seconds",
255
+ description="Seconds user thread is blocked by iter_batches()",
256
+ tag_keys=iter_tag_keys,
257
+ )
258
+ self.iter_user_s = Gauge(
259
+ "data_iter_user_seconds",
260
+ description="Seconds spent in user code",
261
+ tag_keys=iter_tag_keys,
262
+ )
263
+ self.iter_initialize_s = Gauge(
264
+ "data_iter_initialize_seconds",
265
+ description="Seconds spent in iterator initialization code",
266
+ tag_keys=iter_tag_keys,
267
+ )
268
+
269
+ def _create_prometheus_metrics_for_execution_metrics(
270
+ self, metrics_group: MetricsGroup, tag_keys: Tuple[str, ...]
271
+ ) -> Dict[str, Gauge]:
272
+ metrics = {}
273
+ for metric in OpRuntimeMetrics.get_metrics():
274
+ if not metric.metrics_group == metrics_group:
275
+ continue
276
+ metric_name = f"data_{metric.name}"
277
+ metric_description = metric.description
278
+ metrics[metric.name] = Gauge(
279
+ metric_name,
280
+ description=metric_description,
281
+ tag_keys=tag_keys,
282
+ )
283
+ return metrics
284
+
285
+ def record_start(self, stats_uuid):
286
+ self.start_time[stats_uuid] = time.perf_counter()
287
+ self.fifo_queue.append(stats_uuid)
288
+ # Purge the oldest stats if the limit is exceeded.
289
+ if len(self.fifo_queue) > self.max_stats:
290
+ uuid = self.fifo_queue.pop(0)
291
+ if uuid in self.start_time:
292
+ del self.start_time[uuid]
293
+ if uuid in self.last_time:
294
+ del self.last_time[uuid]
295
+ if uuid in self.metadata:
296
+ del self.metadata[uuid]
297
+
298
+ def record_task(
299
+ self, stats_uuid: str, task_idx: int, blocks_metadata: List[BlockMetadata]
300
+ ):
301
+ # Null out the schema to keep the stats size small.
302
+ # TODO(chengsu): ideally schema should be null out on caller side.
303
+ for metadata in blocks_metadata:
304
+ metadata.schema = None
305
+ if stats_uuid in self.start_time:
306
+ self.metadata[stats_uuid][task_idx] = blocks_metadata
307
+ self.last_time[stats_uuid] = time.perf_counter()
308
+
309
+ def get(self, stats_uuid):
310
+ if stats_uuid not in self.metadata:
311
+ return {}, 0.0
312
+ return (
313
+ self.metadata[stats_uuid],
314
+ self.last_time[stats_uuid] - self.start_time[stats_uuid],
315
+ )
316
+
317
+ def _get_stats_dict_size(self):
318
+ return len(self.start_time), len(self.last_time), len(self.metadata)
319
+
320
+ def get_dataset_id(self):
321
+ dataset_id = str(self.next_dataset_id)
322
+ self.next_dataset_id += 1
323
+ return dataset_id
324
+
325
+ def update_metrics(self, execution_metrics, iteration_metrics):
326
+ for metrics in execution_metrics:
327
+ self.update_execution_metrics(*metrics)
328
+ for metrics in iteration_metrics:
329
+ self.update_iteration_metrics(*metrics)
330
+
331
+ def update_execution_metrics(
332
+ self,
333
+ dataset_tag: str,
334
+ op_metrics: List[Dict[str, Union[int, float]]],
335
+ operator_tags: List[str],
336
+ state: Dict[str, Any],
337
+ ):
338
+ for stats, operator_tag in zip(op_metrics, operator_tags):
339
+ tags = self._create_tags(dataset_tag, operator_tag)
340
+
341
+ self.spilled_bytes.set(stats.get("obj_store_mem_spilled", 0), tags)
342
+ self.freed_bytes.set(stats.get("obj_store_mem_freed", 0), tags)
343
+ self.current_bytes.set(stats.get("obj_store_mem_used", 0), tags)
344
+ self.output_bytes.set(stats.get("bytes_task_outputs_generated", 0), tags)
345
+ self.output_rows.set(stats.get("rows_task_outputs_generated", 0), tags)
346
+ self.cpu_usage_cores.set(stats.get("cpu_usage", 0), tags)
347
+ self.gpu_usage_cores.set(stats.get("gpu_usage", 0), tags)
348
+
349
+ for field_name, prom_metric in self.execution_metrics_inputs.items():
350
+ prom_metric.set(stats.get(field_name, 0), tags)
351
+
352
+ for field_name, prom_metric in self.execution_metrics_outputs.items():
353
+ prom_metric.set(stats.get(field_name, 0), tags)
354
+
355
+ for field_name, prom_metric in self.execution_metrics_tasks.items():
356
+ prom_metric.set(stats.get(field_name, 0), tags)
357
+
358
+ for (
359
+ field_name,
360
+ prom_metric,
361
+ ) in self.execution_metrics_obj_store_memory.items():
362
+ prom_metric.set(stats.get(field_name, 0), tags)
363
+
364
+ for field_name, prom_metric in self.execution_metrics_misc.items():
365
+ prom_metric.set(stats.get(field_name, 0), tags)
366
+
367
+ # This update is called from a dataset's executor,
368
+ # so all tags should contain the same dataset
369
+ self.update_dataset(dataset_tag, state)
370
+
371
+ def update_iteration_metrics(
372
+ self,
373
+ stats: "DatasetStats",
374
+ dataset_tag,
375
+ ):
376
+ tags = self._create_tags(dataset_tag)
377
+ self.iter_total_blocked_s.set(stats.iter_total_blocked_s.get(), tags)
378
+ self.iter_user_s.set(stats.iter_user_s.get(), tags)
379
+ self.iter_initialize_s.set(stats.iter_initialize_s.get(), tags)
380
+
381
+ def register_dataset(self, job_id: str, dataset_tag: str, operator_tags: List[str]):
382
+ self.datasets[dataset_tag] = {
383
+ "job_id": job_id,
384
+ "state": "RUNNING",
385
+ "progress": 0,
386
+ "total": 0,
387
+ "start_time": time.time(),
388
+ "end_time": None,
389
+ "operators": {
390
+ operator: {
391
+ "state": "RUNNING",
392
+ "progress": 0,
393
+ "total": 0,
394
+ }
395
+ for operator in operator_tags
396
+ },
397
+ }
398
+
399
+ def update_dataset(self, dataset_tag, state):
400
+ self.datasets[dataset_tag].update(state)
401
+
402
+ def get_datasets(self, job_id: Optional[str] = None):
403
+ if not job_id:
404
+ return self.datasets
405
+ return {k: v for k, v in self.datasets.items() if v["job_id"] == job_id}
406
+
407
+ def _create_tags(self, dataset_tag: str, operator_tag: Optional[str] = None):
408
+ tags = {"dataset": dataset_tag}
409
+ if operator_tag is not None:
410
+ tags["operator"] = operator_tag
411
+ return tags
412
+
413
+
414
+ # Creating/getting an actor from multiple threads is not safe.
415
+ # https://github.com/ray-project/ray/issues/41324
416
+ _stats_actor_lock: threading.RLock = threading.RLock()
417
+
418
+
419
+ def _get_or_create_stats_actor():
420
+ ctx = DataContext.get_current()
421
+ scheduling_strategy = ctx.scheduling_strategy
422
+ if not ray.util.client.ray.is_connected():
423
+ # Pin the stats actor to the local node
424
+ # so it fate-shares with the driver.
425
+ scheduling_strategy = NodeAffinitySchedulingStrategy(
426
+ ray.get_runtime_context().get_node_id(),
427
+ soft=False,
428
+ )
429
+ with _stats_actor_lock:
430
+ return _StatsActor.options(
431
+ name=STATS_ACTOR_NAME,
432
+ namespace=STATS_ACTOR_NAMESPACE,
433
+ get_if_exists=True,
434
+ lifetime="detached",
435
+ scheduling_strategy=scheduling_strategy,
436
+ ).remote()
437
+
438
+
439
+ class _StatsManager:
440
+ """A Class containing util functions that manage remote calls to _StatsActor.
441
+
442
+ This class collects stats from execution and iteration codepaths and keeps
443
+ track of the latest snapshot.
444
+
445
+ An instance of this class runs a single background thread that periodically
446
+ forwards the latest execution/iteration stats to the _StatsActor.
447
+
448
+ This thread will terminate itself after being inactive (meaning that there are
449
+ no active executors or iterators) for STATS_ACTOR_UPDATE_THREAD_INACTIVITY_LIMIT
450
+ iterations. After terminating, a new thread will start if more calls are made
451
+ to this class.
452
+ """
453
+
454
+ # Interval for making remote calls to the _StatsActor.
455
+ STATS_ACTOR_UPDATE_INTERVAL_SECONDS = 5
456
+
457
+ # After this many iterations of inactivity,
458
+ # _StatsManager._update_thread will close itself.
459
+ UPDATE_THREAD_INACTIVITY_LIMIT = 5
460
+
461
+ def __init__(self):
462
+ # Lazily get stats actor handle to avoid circular import.
463
+ self._stats_actor_handle: Optional[ActorHandle] = None
464
+ self._stats_actor_cluster_id = None
465
+
466
+ # Last execution stats snapshots for all executing datasets
467
+ self._last_execution_stats = {}
468
+ # Last iteration stats snapshots for all running iterators
469
+ self._last_iteration_stats: Dict[
470
+ str, Tuple[Dict[str, str], "DatasetStats"]
471
+ ] = {}
472
+ # Lock for updating stats snapshots
473
+ self._stats_lock: threading.Lock = threading.Lock()
474
+
475
+ # Background thread to make remote calls to _StatsActor
476
+ self._update_thread: Optional[threading.Thread] = None
477
+ self._update_thread_lock: threading.Lock = threading.Lock()
478
+
479
+ def _stats_actor(self, create_if_not_exists=True) -> Optional[ActorHandle]:
480
+ if ray._private.worker._global_node is None:
481
+ raise RuntimeError("Global node is not initialized.")
482
+ current_cluster_id = ray._private.worker._global_node.cluster_id
483
+ if (
484
+ self._stats_actor_handle is None
485
+ or self._stats_actor_cluster_id != current_cluster_id
486
+ ):
487
+ if create_if_not_exists:
488
+ self._stats_actor_handle = _get_or_create_stats_actor()
489
+ else:
490
+ try:
491
+ self._stats_actor_handle = ray.get_actor(
492
+ name=STATS_ACTOR_NAME, namespace=STATS_ACTOR_NAMESPACE
493
+ )
494
+ except ValueError:
495
+ return None
496
+ self._stats_actor_cluster_id = current_cluster_id
497
+ return self._stats_actor_handle
498
+
499
+ def _start_thread_if_not_running(self):
500
+ # Start background update thread if not running.
501
+ with self._update_thread_lock:
502
+ if self._update_thread is None or not self._update_thread.is_alive():
503
+
504
+ def _run_update_loop():
505
+ iter_stats_inactivity = 0
506
+ while True:
507
+ if self._last_iteration_stats or self._last_execution_stats:
508
+ try:
509
+ # Do not create _StatsActor if it doesn't exist because
510
+ # this thread can be running even after the cluster is
511
+ # shutdown. Creating an actor will automatically start
512
+ # a new cluster.
513
+ stats_actor = self._stats_actor(
514
+ create_if_not_exists=False
515
+ )
516
+ if stats_actor is None:
517
+ continue
518
+ stats_actor.update_metrics.remote(
519
+ execution_metrics=list(
520
+ self._last_execution_stats.values()
521
+ ),
522
+ iteration_metrics=list(
523
+ self._last_iteration_stats.values()
524
+ ),
525
+ )
526
+ iter_stats_inactivity = 0
527
+ except Exception:
528
+ logger.debug(
529
+ "Error occurred during remote call to _StatsActor.",
530
+ exc_info=True,
531
+ )
532
+ return
533
+ else:
534
+ iter_stats_inactivity += 1
535
+ if (
536
+ iter_stats_inactivity
537
+ >= _StatsManager.UPDATE_THREAD_INACTIVITY_LIMIT
538
+ ):
539
+ logger.debug(
540
+ "Terminating StatsManager thread due to inactivity."
541
+ )
542
+ return
543
+ time.sleep(StatsManager.STATS_ACTOR_UPDATE_INTERVAL_SECONDS)
544
+
545
+ self._update_thread = threading.Thread(
546
+ target=_run_update_loop, daemon=True
547
+ )
548
+ self._update_thread.start()
549
+
550
+ # Execution methods
551
+
552
+ def update_execution_metrics(
553
+ self,
554
+ dataset_tag: str,
555
+ op_metrics: List[OpRuntimeMetrics],
556
+ operator_tags: List[str],
557
+ state: Dict[str, Any],
558
+ force_update: bool = False,
559
+ ):
560
+ op_metrics_dicts = [metric.as_dict() for metric in op_metrics]
561
+ args = (dataset_tag, op_metrics_dicts, operator_tags, state)
562
+ if force_update:
563
+ self._stats_actor().update_execution_metrics.remote(*args)
564
+ else:
565
+ with self._stats_lock:
566
+ self._last_execution_stats[dataset_tag] = args
567
+ self._start_thread_if_not_running()
568
+
569
+ def clear_last_execution_stats(self, dataset_tag: str):
570
+ # After dataset completes execution, remove cached execution stats.
571
+ # Marks the dataset as finished on job page's Ray Data Overview.
572
+ with self._stats_lock:
573
+ if dataset_tag in self._last_execution_stats:
574
+ del self._last_execution_stats[dataset_tag]
575
+
576
+ # Iteration methods
577
+
578
+ def update_iteration_metrics(self, stats: "DatasetStats", dataset_tag: str):
579
+ with self._stats_lock:
580
+ self._last_iteration_stats[dataset_tag] = (stats, dataset_tag)
581
+ self._start_thread_if_not_running()
582
+
583
+ def clear_iteration_metrics(self, dataset_tag: str):
584
+ # Delete the last iteration stats so that update thread will have
585
+ # a chance to terminate.
586
+ # Note we don't reset the actual metric values through the StatsActor
587
+ # since the value is essentially a counter value. See
588
+ # https://github.com/ray-project/ray/pull/48618 for more context.
589
+ with self._stats_lock:
590
+ if dataset_tag in self._last_iteration_stats:
591
+ del self._last_iteration_stats[dataset_tag]
592
+
593
+ # Other methods
594
+
595
+ def register_dataset_to_stats_actor(self, dataset_tag, operator_tags):
596
+ self._stats_actor().register_dataset.remote(
597
+ ray.get_runtime_context().get_job_id(),
598
+ dataset_tag,
599
+ operator_tags,
600
+ )
601
+
602
+ def get_dataset_id_from_stats_actor(self) -> str:
603
+ try:
604
+ return ray.get(self._stats_actor().get_dataset_id.remote())
605
+ except Exception:
606
+ # Getting dataset id from _StatsActor may fail, in this case
607
+ # fall back to uuid4
608
+ return uuid4().hex
609
+
610
+
611
+ StatsManager = _StatsManager()
612
+
613
+
614
+ class DatasetStats:
615
+ """Holds the execution times for a given Dataset.
616
+
617
+ This object contains a reference to the parent Dataset's stats as well,
618
+ but not the Dataset object itself, to allow its blocks to be dropped from
619
+ memory."""
620
+
621
+ def __init__(
622
+ self,
623
+ *,
624
+ metadata: StatsDict,
625
+ parent: Union[Optional["DatasetStats"], List["DatasetStats"]],
626
+ needs_stats_actor: bool = False,
627
+ stats_uuid: str = None,
628
+ base_name: str = None,
629
+ ):
630
+ """Create dataset stats.
631
+
632
+ Args:
633
+ metadata: Dict of operators used to create this Dataset from the
634
+ previous one. Typically one entry, e.g., {"map": [...]}.
635
+ parent: Reference to parent Dataset's stats, or a list of parents
636
+ if there are multiple.
637
+ needs_stats_actor: Whether this Dataset's stats needs a stats actor for
638
+ stats collection. This is currently only used for Datasets using a
639
+ lazy datasource (i.e. a LazyBlockList).
640
+ stats_uuid: The uuid for the stats, used to fetch the right stats
641
+ from the stats actor.
642
+ base_name: The name of the base operation for a multi-operator operation.
643
+ """
644
+
645
+ self.metadata: StatsDict = metadata
646
+ if parent is not None and not isinstance(parent, list):
647
+ parent = [parent]
648
+ self.parents: List["DatasetStats"] = parent or []
649
+ self.number: int = (
650
+ 0 if not self.parents else max(p.number for p in self.parents) + 1
651
+ )
652
+ self.base_name = base_name
653
+ # TODO(ekl) deprecate and remove the notion of dataset UUID once we move
654
+ # fully to streaming execution.
655
+ self.dataset_uuid: str = "unknown_uuid"
656
+ self.time_total_s: float = 0
657
+ self.needs_stats_actor = needs_stats_actor
658
+ self.stats_uuid = stats_uuid
659
+
660
+ # Streaming executor stats
661
+ self.streaming_exec_schedule_s: Timer = Timer()
662
+
663
+ # Iteration stats, filled out if the user iterates over the dataset.
664
+ self.iter_wait_s: Timer = Timer()
665
+ self.iter_get_s: Timer = Timer()
666
+ self.iter_next_batch_s: Timer = Timer()
667
+ self.iter_format_batch_s: Timer = Timer()
668
+ self.iter_collate_batch_s: Timer = Timer()
669
+ self.iter_finalize_batch_s: Timer = Timer()
670
+ self.iter_total_blocked_s: Timer = Timer()
671
+ self.iter_user_s: Timer = Timer()
672
+ self.iter_initialize_s: Timer = Timer()
673
+ self.iter_total_s: Timer = Timer()
674
+ self.extra_metrics = {}
675
+
676
+ # Block fetch stats during iteration.
677
+ # These are stats about locations of blocks when the iterator is trying to
678
+ # consume them. The iteration performance will be affected depending on
679
+ # whether the block is in the local object store of the node where the
680
+ # iterator is running.
681
+ # This serves as an indicator of block prefetching effectiveness.
682
+ self.iter_blocks_local: int = 0
683
+ self.iter_blocks_remote: int = 0
684
+ self.iter_unknown_location: int = 0
685
+
686
+ # Memory usage stats
687
+ self.global_bytes_spilled: int = 0
688
+ self.global_bytes_restored: int = 0
689
+ self.dataset_bytes_spilled: int = 0
690
+
691
+ # Streaming split coordinator stats (dataset level)
692
+ self.streaming_split_coordinator_s: Timer = Timer()
693
+
694
+ @property
695
+ def stats_actor(self):
696
+ return _get_or_create_stats_actor()
697
+
698
+ def child_builder(
699
+ self, name: str, override_start_time: Optional[float] = None
700
+ ) -> _DatasetStatsBuilder:
701
+ """Start recording stats for an op of the given name (e.g., map)."""
702
+ return _DatasetStatsBuilder(name, self, override_start_time)
703
+
704
+ def to_summary(self) -> "DatasetStatsSummary":
705
+ """Generate a `DatasetStatsSummary` object from the given `DatasetStats`
706
+ object, which can be used to generate a summary string."""
707
+ if self.needs_stats_actor:
708
+ ac = self.stats_actor
709
+ # TODO(chengsu): this is a super hack, clean it up.
710
+ stats_map, self.time_total_s = ray.get(ac.get.remote(self.stats_uuid))
711
+ # Only populate stats when stats from all read tasks are ready at
712
+ # stats actor.
713
+ if len(stats_map.items()) == len(self.metadata["Read"]):
714
+ self.metadata["Read"] = []
715
+ for _, blocks_metadata in sorted(stats_map.items()):
716
+ self.metadata["Read"] += blocks_metadata
717
+
718
+ operators_stats = []
719
+ is_sub_operator = len(self.metadata) > 1
720
+ for name, meta in self.metadata.items():
721
+ operators_stats.append(
722
+ OperatorStatsSummary.from_block_metadata(
723
+ name,
724
+ meta,
725
+ is_sub_operator=is_sub_operator,
726
+ )
727
+ )
728
+
729
+ iter_stats = IterStatsSummary(
730
+ self.iter_wait_s,
731
+ self.iter_get_s,
732
+ self.iter_next_batch_s,
733
+ self.iter_format_batch_s,
734
+ self.iter_collate_batch_s,
735
+ self.iter_finalize_batch_s,
736
+ self.iter_total_blocked_s,
737
+ self.iter_user_s,
738
+ self.iter_initialize_s,
739
+ self.iter_total_s,
740
+ self.streaming_split_coordinator_s,
741
+ self.iter_blocks_local,
742
+ self.iter_blocks_remote,
743
+ self.iter_unknown_location,
744
+ )
745
+ stats_summary_parents = []
746
+ if self.parents is not None:
747
+ stats_summary_parents = [p.to_summary() for p in self.parents]
748
+ streaming_exec_schedule_s = (
749
+ self.streaming_exec_schedule_s.get()
750
+ if self.streaming_exec_schedule_s
751
+ else 0
752
+ )
753
+ return DatasetStatsSummary(
754
+ operators_stats,
755
+ iter_stats,
756
+ stats_summary_parents,
757
+ self.number,
758
+ self.dataset_uuid,
759
+ self.time_total_s,
760
+ self.base_name,
761
+ self.extra_metrics,
762
+ self.global_bytes_spilled,
763
+ self.global_bytes_restored,
764
+ self.dataset_bytes_spilled,
765
+ streaming_exec_schedule_s,
766
+ )
767
+
768
+ def runtime_metrics(self) -> str:
769
+ """Generate a string representing the runtime metrics of a Dataset. This is
770
+ a high level summary of the time spent in Ray Data code broken down by operator.
771
+ It also includes the time spent in the scheduler. Times are shown as the total
772
+ time for each operator and percentages of time are shown as a fraction of the
773
+ total time for the whole dataset."""
774
+ return self.to_summary().runtime_metrics()
775
+
776
+
777
+ @DeveloperAPI
778
+ @dataclass
779
+ class DatasetStatsSummary:
780
+ operators_stats: List["OperatorStatsSummary"]
781
+ iter_stats: "IterStatsSummary"
782
+ parents: List["DatasetStatsSummary"]
783
+ number: int
784
+ dataset_uuid: str
785
+ time_total_s: float
786
+ base_name: str
787
+ extra_metrics: Dict[str, Any]
788
+ global_bytes_spilled: int
789
+ global_bytes_restored: int
790
+ dataset_bytes_spilled: int
791
+ streaming_exec_schedule_s: float
792
+
793
+ def to_string(
794
+ self,
795
+ already_printed: Optional[Set[str]] = None,
796
+ include_parent: bool = True,
797
+ add_global_stats=True,
798
+ ) -> str:
799
+ """Return a human-readable summary of this Dataset's stats.
800
+
801
+ Args:
802
+ already_printed: Set of operator IDs that have already had its stats printed
803
+ out.
804
+ include_parent: If true, also include parent stats summary; otherwise, only
805
+ log stats of the latest operator.
806
+ add_global_stats: If true, includes global stats to this summary.
807
+ Returns:
808
+ String with summary statistics for executing the Dataset.
809
+ """
810
+ if already_printed is None:
811
+ already_printed = set()
812
+
813
+ out = ""
814
+ if self.parents and include_parent:
815
+ for p in self.parents:
816
+ parent_sum = p.to_string(already_printed, add_global_stats=False)
817
+ if parent_sum:
818
+ out += parent_sum
819
+ out += "\n"
820
+ operators_stats_summary = None
821
+ if len(self.operators_stats) == 1:
822
+ operators_stats_summary = self.operators_stats[0]
823
+ operator_name = operators_stats_summary.operator_name
824
+ operator_uuid = self.dataset_uuid + operator_name
825
+ out += "Operator {} {}: ".format(self.number, operator_name)
826
+ if operator_uuid in already_printed:
827
+ out += "[execution cached]\n"
828
+ else:
829
+ already_printed.add(operator_uuid)
830
+ out += str(operators_stats_summary)
831
+ elif len(self.operators_stats) > 1:
832
+ rounded_total = round(self.time_total_s, 2)
833
+ if rounded_total <= 0:
834
+ # Handle -0.0 case.
835
+ rounded_total = 0
836
+ out += "Operator {} {}: executed in {}s\n".format(
837
+ self.number, self.base_name, rounded_total
838
+ )
839
+ for n, operators_stats_summary in enumerate(self.operators_stats):
840
+ operator_name = operators_stats_summary.operator_name
841
+ operator_uuid = self.dataset_uuid + operator_name
842
+ out += "\n"
843
+ out += "\tSuboperator {} {}: ".format(n, operator_name)
844
+ if operator_uuid in already_printed:
845
+ out += "\t[execution cached]\n"
846
+ else:
847
+ already_printed.add(operator_uuid)
848
+ out += str(operators_stats_summary)
849
+ verbose_stats_logs = DataContext.get_current().verbose_stats_logs
850
+ if verbose_stats_logs and self.extra_metrics:
851
+ indent = (
852
+ "\t"
853
+ if operators_stats_summary and operators_stats_summary.is_sub_operator
854
+ else ""
855
+ )
856
+ out += indent
857
+ out += "* Extra metrics: " + str(self.extra_metrics) + "\n"
858
+ out += str(self.iter_stats)
859
+
860
+ if len(self.operators_stats) > 0 and add_global_stats:
861
+ mb_spilled = round(self.global_bytes_spilled / 1e6)
862
+ mb_restored = round(self.global_bytes_restored / 1e6)
863
+ if mb_spilled or mb_restored:
864
+ out += "\nCluster memory:\n"
865
+ out += "* Spilled to disk: {}MB\n".format(mb_spilled)
866
+ out += "* Restored from disk: {}MB\n".format(mb_restored)
867
+
868
+ dataset_mb_spilled = round(self.dataset_bytes_spilled / 1e6)
869
+ if dataset_mb_spilled:
870
+ out += "\nDataset memory:\n"
871
+ out += "* Spilled to disk: {}MB\n".format(dataset_mb_spilled)
872
+
873
+ # For throughput, we compute both an observed Ray Data dataset throughput
874
+ # and an estimated single node dataset throughput.
875
+
876
+ # The observed dataset throughput is computed by dividing the total number
877
+ # of rows produced by the total wall time of the dataset (i.e. from start to
878
+ # finish how long did the dataset take to be processed). With the recursive
879
+ # nature of the DatasetStatsSummary, we use get_total_wall_time to determine
880
+ # the total wall time (this finds the difference between the earliest start
881
+ # and latest end for any block in any operator).
882
+
883
+ # The estimated single node dataset throughput is computed by dividing the
884
+ # total number of rows produced the sum of the wall times across all blocks
885
+ # of all operators. This assumes that on a single node the work done would
886
+ # be equivalent, with no concurrency.
887
+ output_num_rows = self.operators_stats[-1].output_num_rows
888
+ total_num_out_rows = output_num_rows["sum"] if output_num_rows else 0
889
+ wall_time = self.get_total_wall_time()
890
+ total_time_all_blocks = self.get_total_time_all_blocks()
891
+ if total_num_out_rows and wall_time and total_time_all_blocks:
892
+ out += "\n"
893
+ out += "Dataset throughput:\n"
894
+ out += (
895
+ "\t* Ray Data throughput:"
896
+ f" {total_num_out_rows / wall_time} "
897
+ "rows/s\n"
898
+ )
899
+ out += (
900
+ "\t* Estimated single node throughput:"
901
+ f" {total_num_out_rows / total_time_all_blocks} "
902
+ "rows/s\n"
903
+ )
904
+ if verbose_stats_logs and add_global_stats:
905
+ out += "\n" + self.runtime_metrics()
906
+
907
+ return out
908
+
909
+ @staticmethod
910
+ def _collect_dataset_stats_summaries(
911
+ curr: "DatasetStatsSummary",
912
+ ) -> List["DatasetStatsSummary"]:
913
+ summs = []
914
+ # TODO: Do operators ever have multiple parents? Do we need to deduplicate?
915
+ for p in curr.parents:
916
+ if p and p.parents:
917
+ summs.extend(DatasetStatsSummary._collect_dataset_stats_summaries(p))
918
+ return summs + [curr]
919
+
920
+ @staticmethod
921
+ def _find_start_and_end(summ: "DatasetStatsSummary") -> Tuple[float, float]:
922
+ earliest_start = min(ops.earliest_start_time for ops in summ.operators_stats)
923
+ latest_end = max(ops.latest_end_time for ops in summ.operators_stats)
924
+ return earliest_start, latest_end
925
+
926
+ def runtime_metrics(self) -> str:
927
+ total_wall_time = self.get_total_wall_time()
928
+
929
+ def fmt_line(name: str, time: float) -> str:
930
+ return f"* {name}: {fmt(time)} ({time / total_wall_time * 100:.3f}%)\n"
931
+
932
+ summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self)
933
+ out = "Runtime Metrics:\n"
934
+ for summ in summaries:
935
+ if len(summ.operators_stats) > 0:
936
+ earliest_start, latest_end = DatasetStatsSummary._find_start_and_end(
937
+ summ
938
+ )
939
+ op_total_time = latest_end - earliest_start
940
+ out += fmt_line(summ.base_name, op_total_time)
941
+ out += fmt_line("Scheduling", self.streaming_exec_schedule_s)
942
+ out += fmt_line("Total", total_wall_time)
943
+ return out
944
+
945
+ def __repr__(self, level=0) -> str:
946
+ indent = leveled_indent(level)
947
+ operators_stats = "\n".join(
948
+ [ss.__repr__(level + 2) for ss in self.operators_stats]
949
+ )
950
+ parent_stats = "\n".join([ps.__repr__(level + 2) for ps in self.parents])
951
+ extra_metrics = "\n".join(
952
+ f"{leveled_indent(level + 2)}{k}: {v},"
953
+ for k, v in self.extra_metrics.items()
954
+ )
955
+
956
+ # Handle formatting case for empty outputs.
957
+ operators_stats = (
958
+ f"\n{operators_stats},\n{indent} " if operators_stats else ""
959
+ )
960
+ parent_stats = f"\n{parent_stats},\n{indent} " if parent_stats else ""
961
+ extra_metrics = f"\n{extra_metrics}\n{indent} " if extra_metrics else ""
962
+ return (
963
+ f"{indent}DatasetStatsSummary(\n"
964
+ f"{indent} dataset_uuid={self.dataset_uuid},\n"
965
+ f"{indent} base_name={self.base_name},\n"
966
+ f"{indent} number={self.number},\n"
967
+ f"{indent} extra_metrics={{{extra_metrics}}},\n"
968
+ f"{indent} operators_stats=[{operators_stats}],\n"
969
+ f"{indent} iter_stats={self.iter_stats.__repr__(level+1)},\n"
970
+ f"{indent} global_bytes_spilled={self.global_bytes_spilled / 1e6}MB,\n"
971
+ f"{indent} global_bytes_restored={self.global_bytes_restored / 1e6}MB,\n"
972
+ f"{indent} dataset_bytes_spilled={self.dataset_bytes_spilled / 1e6}MB,\n"
973
+ f"{indent} parents=[{parent_stats}],\n"
974
+ f"{indent})"
975
+ )
976
+
977
+ def get_total_wall_time(self) -> float:
978
+ """Calculate the total wall time for the dataset, this is done by finding
979
+ the earliest start time and latest end time for any block in any operator.
980
+ The wall time is the difference of these two times.
981
+ """
982
+ start_ends = [
983
+ DatasetStatsSummary._find_start_and_end(summ)
984
+ for summ in DatasetStatsSummary._collect_dataset_stats_summaries(self)
985
+ if len(summ.operators_stats) > 0
986
+ ]
987
+ if len(start_ends) == 0:
988
+ return 0
989
+ else:
990
+ earliest_start = min(start_end[0] for start_end in start_ends)
991
+ latest_end = max(start_end[1] for start_end in start_ends)
992
+ return latest_end - earliest_start
993
+
994
+ def get_total_time_all_blocks(self) -> float:
995
+ """Calculate the sum of the wall times across all blocks of all operators."""
996
+ summaries = DatasetStatsSummary._collect_dataset_stats_summaries(self)
997
+ return sum(
998
+ (
999
+ sum(
1000
+ ops.wall_time.get("sum", 0) if ops.wall_time else 0
1001
+ for ops in summ.operators_stats
1002
+ )
1003
+ )
1004
+ for summ in summaries
1005
+ )
1006
+
1007
+ def get_total_cpu_time(self) -> float:
1008
+ parent_sum = sum(p.get_total_cpu_time() for p in self.parents)
1009
+ return parent_sum + sum(
1010
+ ss.cpu_time.get("sum", 0) for ss in self.operators_stats
1011
+ )
1012
+
1013
+ def get_max_heap_memory(self) -> float:
1014
+ parent_memory = [p.get_max_heap_memory() for p in self.parents]
1015
+ parent_max = max(parent_memory) if parent_memory else 0
1016
+ if not self.operators_stats:
1017
+ return parent_max
1018
+
1019
+ return max(
1020
+ parent_max,
1021
+ *[ss.memory.get("max", 0) for ss in self.operators_stats],
1022
+ )
1023
+
1024
+
1025
+ @dataclass
1026
+ class OperatorStatsSummary:
1027
+ operator_name: str
1028
+ # Whether the operator associated with this OperatorStatsSummary object
1029
+ # is a suboperator
1030
+ is_sub_operator: bool
1031
+ # This is the total walltime of the entire operator, typically obtained from
1032
+ # `DatasetStats.time_total_s`. An important distinction is that this is the
1033
+ # overall runtime of the operator, pulled from the stats actor, whereas the
1034
+ # computed walltimes in `self.wall_time` are calculated on a operator level.
1035
+ time_total_s: float
1036
+ earliest_start_time: float
1037
+ latest_end_time: float
1038
+ # String summarizing high-level statistics from executing the operator
1039
+ block_execution_summary_str: str
1040
+ # The fields below are dicts with stats aggregated across blocks
1041
+ # processed in this operator. For example:
1042
+ # {"min": ..., "max": ..., "mean": ..., "sum": ...}
1043
+ wall_time: Optional[Dict[str, float]] = None
1044
+ cpu_time: Optional[Dict[str, float]] = None
1045
+ udf_time: Optional[Dict[str, float]] = None
1046
+ # memory: no "sum" stat
1047
+ memory: Optional[Dict[str, float]] = None
1048
+ output_num_rows: Optional[Dict[str, float]] = None
1049
+ output_size_bytes: Optional[Dict[str, float]] = None
1050
+ # node_count: "count" stat instead of "sum"
1051
+ node_count: Optional[Dict[str, float]] = None
1052
+ task_rows: Optional[Dict[str, float]] = None
1053
+
1054
+ @classmethod
1055
+ def from_block_metadata(
1056
+ cls,
1057
+ operator_name: str,
1058
+ block_metas: List[BlockMetadata],
1059
+ is_sub_operator: bool,
1060
+ ) -> "OperatorStatsSummary":
1061
+ """Calculate the stats for a operator from a given list of blocks,
1062
+ and generates a `OperatorStatsSummary` object with the results.
1063
+
1064
+ Args:
1065
+ block_metas: List of `BlockMetadata` to calculate stats of
1066
+ operator_name: Name of operator associated with `blocks`
1067
+ is_sub_operator: Whether this set of blocks belongs to a sub operator.
1068
+ Returns:
1069
+ A `OperatorStatsSummary` object initialized with the calculated statistics
1070
+ """
1071
+ exec_stats = [m.exec_stats for m in block_metas if m.exec_stats is not None]
1072
+ rounded_total = 0
1073
+ time_total_s = 0
1074
+ earliest_start_time, latest_end_time = 0, 0
1075
+
1076
+ if exec_stats:
1077
+ # Calculate the total execution time of operator as
1078
+ # the difference between the latest end time and
1079
+ # the earliest start time of all blocks in the operator.
1080
+ earliest_start_time = min(s.start_time_s for s in exec_stats)
1081
+ latest_end_time = max(s.end_time_s for s in exec_stats)
1082
+ time_total_s = latest_end_time - earliest_start_time
1083
+
1084
+ if is_sub_operator:
1085
+ exec_summary_str = "{} blocks produced\n".format(len(exec_stats))
1086
+ else:
1087
+ if exec_stats:
1088
+ rounded_total = round(time_total_s, 2)
1089
+ if rounded_total <= 0:
1090
+ # Handle -0.0 case.
1091
+ rounded_total = 0
1092
+ exec_summary_str = "{} blocks produced in {}s".format(
1093
+ len(exec_stats), rounded_total
1094
+ )
1095
+ else:
1096
+ exec_summary_str = ""
1097
+ exec_summary_str += "\n"
1098
+
1099
+ task_rows = collections.defaultdict(int)
1100
+ for meta in block_metas:
1101
+ if meta.num_rows is not None and meta.exec_stats is not None:
1102
+ task_rows[meta.exec_stats.task_idx] += meta.num_rows
1103
+ task_rows_stats = None
1104
+ if len(task_rows) > 0:
1105
+ task_rows_stats = {
1106
+ "min": min(task_rows.values()),
1107
+ "max": max(task_rows.values()),
1108
+ "mean": int(np.mean(list(task_rows.values()))),
1109
+ "count": len(task_rows),
1110
+ }
1111
+ exec_summary_str = "{} tasks executed, {}".format(
1112
+ len(task_rows), exec_summary_str
1113
+ )
1114
+
1115
+ wall_time_stats, cpu_stats, memory_stats, udf_stats = None, None, None, None
1116
+ if exec_stats:
1117
+ wall_time_stats = {
1118
+ "min": min([e.wall_time_s for e in exec_stats]),
1119
+ "max": max([e.wall_time_s for e in exec_stats]),
1120
+ "mean": np.mean([e.wall_time_s for e in exec_stats]),
1121
+ "sum": sum([e.wall_time_s for e in exec_stats]),
1122
+ }
1123
+ cpu_stats = {
1124
+ "min": min([e.cpu_time_s for e in exec_stats]),
1125
+ "max": max([e.cpu_time_s for e in exec_stats]),
1126
+ "mean": np.mean([e.cpu_time_s for e in exec_stats]),
1127
+ "sum": sum([e.cpu_time_s for e in exec_stats]),
1128
+ }
1129
+
1130
+ memory_stats_mb = [
1131
+ round(e.max_rss_bytes / (1024 * 1024), 2) for e in exec_stats
1132
+ ]
1133
+ memory_stats = {
1134
+ "min": min(memory_stats_mb),
1135
+ "max": max(memory_stats_mb),
1136
+ "mean": int(np.mean(memory_stats_mb)),
1137
+ }
1138
+
1139
+ udf_stats = {
1140
+ "min": min([e.udf_time_s for e in exec_stats]),
1141
+ "max": max([e.udf_time_s for e in exec_stats]),
1142
+ "mean": np.mean([e.udf_time_s for e in exec_stats]),
1143
+ "sum": sum([e.udf_time_s for e in exec_stats]),
1144
+ }
1145
+
1146
+ output_num_rows_stats = None
1147
+ output_num_rows = [m.num_rows for m in block_metas if m.num_rows is not None]
1148
+ if output_num_rows:
1149
+ output_num_rows_stats = {
1150
+ "min": min(output_num_rows),
1151
+ "max": max(output_num_rows),
1152
+ "mean": int(np.mean(output_num_rows)),
1153
+ "sum": sum(output_num_rows),
1154
+ }
1155
+
1156
+ output_size_bytes_stats = None
1157
+ output_size_bytes = [
1158
+ m.size_bytes for m in block_metas if m.size_bytes is not None
1159
+ ]
1160
+ if output_size_bytes:
1161
+ output_size_bytes_stats = {
1162
+ "min": min(output_size_bytes),
1163
+ "max": max(output_size_bytes),
1164
+ "mean": int(np.mean(output_size_bytes)),
1165
+ "sum": sum(output_size_bytes),
1166
+ }
1167
+
1168
+ node_counts_stats = None
1169
+ if exec_stats:
1170
+ node_tasks = collections.defaultdict(set)
1171
+ for s in exec_stats:
1172
+ node_tasks[s.node_id].add(s.task_idx)
1173
+
1174
+ node_counts = {node: len(tasks) for node, tasks in node_tasks.items()}
1175
+ node_counts_stats = {
1176
+ "min": min(node_counts.values()),
1177
+ "max": max(node_counts.values()),
1178
+ "mean": int(np.mean(list(node_counts.values()))),
1179
+ "count": len(node_counts),
1180
+ }
1181
+
1182
+ return OperatorStatsSummary(
1183
+ operator_name=operator_name,
1184
+ is_sub_operator=is_sub_operator,
1185
+ time_total_s=time_total_s,
1186
+ earliest_start_time=earliest_start_time,
1187
+ latest_end_time=latest_end_time,
1188
+ block_execution_summary_str=exec_summary_str,
1189
+ wall_time=wall_time_stats,
1190
+ cpu_time=cpu_stats,
1191
+ udf_time=udf_stats,
1192
+ memory=memory_stats,
1193
+ output_num_rows=output_num_rows_stats,
1194
+ output_size_bytes=output_size_bytes_stats,
1195
+ node_count=node_counts_stats,
1196
+ task_rows=task_rows_stats,
1197
+ )
1198
+
1199
+ def __str__(self) -> str:
1200
+ """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from
1201
+ `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string
1202
+ that summarizes operator execution statistics.
1203
+
1204
+ Returns:
1205
+ String with summary statistics for executing the given operator.
1206
+ """
1207
+ indent = "\t" if self.is_sub_operator else ""
1208
+ out = self.block_execution_summary_str
1209
+
1210
+ wall_time_stats = self.wall_time
1211
+ if wall_time_stats:
1212
+ out += indent
1213
+ out += "* Remote wall time: {} min, {} max, {} mean, {} total\n".format(
1214
+ fmt(wall_time_stats["min"]),
1215
+ fmt(wall_time_stats["max"]),
1216
+ fmt(wall_time_stats["mean"]),
1217
+ fmt(wall_time_stats["sum"]),
1218
+ )
1219
+
1220
+ cpu_stats = self.cpu_time
1221
+ if cpu_stats:
1222
+ out += indent
1223
+ out += "* Remote cpu time: {} min, {} max, {} mean, {} total\n".format(
1224
+ fmt(cpu_stats["min"]),
1225
+ fmt(cpu_stats["max"]),
1226
+ fmt(cpu_stats["mean"]),
1227
+ fmt(cpu_stats["sum"]),
1228
+ )
1229
+
1230
+ udf_stats = self.udf_time
1231
+ if udf_stats:
1232
+ out += indent
1233
+ out += "* UDF time: {} min, {} max, {} mean, {} total\n".format(
1234
+ fmt(udf_stats["min"]),
1235
+ fmt(udf_stats["max"]),
1236
+ fmt(udf_stats["mean"]),
1237
+ fmt(udf_stats["sum"]),
1238
+ )
1239
+
1240
+ memory_stats = self.memory
1241
+ if memory_stats:
1242
+ out += indent
1243
+ out += "* Peak heap memory usage (MiB): {} min, {} max, {} mean\n".format(
1244
+ memory_stats["min"],
1245
+ memory_stats["max"],
1246
+ memory_stats["mean"],
1247
+ )
1248
+
1249
+ output_num_rows_stats = self.output_num_rows
1250
+ if output_num_rows_stats:
1251
+ out += indent
1252
+ out += (
1253
+ "* Output num rows per block: {} min, {} max, {} mean, {} total\n"
1254
+ ).format(
1255
+ output_num_rows_stats["min"],
1256
+ output_num_rows_stats["max"],
1257
+ output_num_rows_stats["mean"],
1258
+ output_num_rows_stats["sum"],
1259
+ )
1260
+
1261
+ output_size_bytes_stats = self.output_size_bytes
1262
+ if output_size_bytes_stats:
1263
+ out += indent
1264
+ out += (
1265
+ "* Output size bytes per block: {} min, {} max, {} mean, {} total\n"
1266
+ ).format(
1267
+ output_size_bytes_stats["min"],
1268
+ output_size_bytes_stats["max"],
1269
+ output_size_bytes_stats["mean"],
1270
+ output_size_bytes_stats["sum"],
1271
+ )
1272
+
1273
+ task_rows = self.task_rows
1274
+ if task_rows:
1275
+ out += indent
1276
+ out += (
1277
+ "* Output rows per task: {} min, {} max, {} mean, {} tasks used\n"
1278
+ ).format(
1279
+ task_rows["min"],
1280
+ task_rows["max"],
1281
+ task_rows["mean"],
1282
+ task_rows["count"],
1283
+ )
1284
+
1285
+ node_count_stats = self.node_count
1286
+ if node_count_stats:
1287
+ out += indent
1288
+ out += "* Tasks per node: {} min, {} max, {} mean; {} nodes used\n".format(
1289
+ node_count_stats["min"],
1290
+ node_count_stats["max"],
1291
+ node_count_stats["mean"],
1292
+ node_count_stats["count"],
1293
+ )
1294
+ if output_num_rows_stats and self.time_total_s and wall_time_stats:
1295
+ # For throughput, we compute both an observed Ray Data operator throughput
1296
+ # and an estimated single node operator throughput.
1297
+
1298
+ # The observed Ray Data operator throughput is computed by dividing the
1299
+ # total number of rows produced by the wall time of the operator,
1300
+ # time_total_s.
1301
+
1302
+ # The estimated single node operator throughput is computed by dividing the
1303
+ # total number of rows produced by the the sum of the wall times across all
1304
+ # blocks of the operator. This assumes that on a single node the work done
1305
+ # would be equivalent, with no concurrency.
1306
+ total_num_out_rows = output_num_rows_stats["sum"]
1307
+ out += indent
1308
+ out += "* Operator throughput:\n"
1309
+ out += (
1310
+ indent + "\t* Ray Data throughput:"
1311
+ f" {total_num_out_rows / self.time_total_s} "
1312
+ "rows/s\n"
1313
+ )
1314
+ out += (
1315
+ indent + "\t* Estimated single node throughput:"
1316
+ f" {total_num_out_rows / wall_time_stats['sum']} "
1317
+ "rows/s\n"
1318
+ )
1319
+ return out
1320
+
1321
+ def __repr__(self, level=0) -> str:
1322
+ """For a given (pre-calculated) `OperatorStatsSummary` object (e.g. generated from
1323
+ `OperatorStatsSummary.from_block_metadata()`), returns a human-friendly string
1324
+ that summarizes operator execution statistics.
1325
+
1326
+ Returns:
1327
+ String with summary statistics for executing the given operator.
1328
+ """
1329
+ indent = leveled_indent(level)
1330
+ indent += leveled_indent(1) if self.is_sub_operator else ""
1331
+
1332
+ wall_time_stats = {k: fmt(v) for k, v in (self.wall_time or {}).items()}
1333
+ cpu_stats = {k: fmt(v) for k, v in (self.cpu_time or {}).items()}
1334
+ memory_stats = {k: fmt(v) for k, v in (self.memory or {}).items()}
1335
+ output_num_rows_stats = {
1336
+ k: fmt(v) for k, v in (self.output_num_rows or {}).items()
1337
+ }
1338
+ output_size_bytes_stats = {
1339
+ k: fmt(v) for k, v in (self.output_size_bytes or {}).items()
1340
+ }
1341
+ node_conut_stats = {k: fmt(v) for k, v in (self.node_count or {}).items()}
1342
+ out = (
1343
+ f"{indent}OperatorStatsSummary(\n"
1344
+ f"{indent} operator_name='{self.operator_name}',\n"
1345
+ f"{indent} is_suboperator={self.is_sub_operator},\n"
1346
+ f"{indent} time_total_s={fmt(self.time_total_s)},\n"
1347
+ # block_execution_summary_str already ends with \n
1348
+ f"{indent} block_execution_summary_str={self.block_execution_summary_str}"
1349
+ f"{indent} wall_time={wall_time_stats or None},\n"
1350
+ f"{indent} cpu_time={cpu_stats or None},\n"
1351
+ f"{indent} memory={memory_stats or None},\n"
1352
+ f"{indent} output_num_rows={output_num_rows_stats or None},\n"
1353
+ f"{indent} output_size_bytes={output_size_bytes_stats or None},\n"
1354
+ f"{indent} node_count={node_conut_stats or None},\n"
1355
+ f"{indent})"
1356
+ )
1357
+ return out
1358
+
1359
+
1360
+ @dataclass
1361
+ class IterStatsSummary:
1362
+ # Time spent in actor based prefetching, in seconds.
1363
+ wait_time: Timer
1364
+ # Time spent in `ray.get()`, in seconds
1365
+ get_time: Timer
1366
+ # Time spent in batch building, in seconds
1367
+ next_time: Timer
1368
+ # Time spent in `_format_batch_()`, in seconds
1369
+ format_time: Timer
1370
+ # Time spent in collate fn, in seconds
1371
+ collate_time: Timer
1372
+ # Time spent in finalize_fn, in seconds
1373
+ finalize_batch_time: Timer
1374
+ # Total time user thread is blocked by iter_batches
1375
+ block_time: Timer
1376
+ # Time spent in user code, in seconds
1377
+ user_time: Timer
1378
+ initialize_time: Timer
1379
+ # Total time taken by Dataset iterator, in seconds
1380
+ total_time: Timer
1381
+ # Time spent in streaming split coordinator
1382
+ streaming_split_coord_time: Timer
1383
+ # Num of blocks that are in local object store
1384
+ iter_blocks_local: int
1385
+ # Num of blocks that are in remote node and have to fetch locally
1386
+ iter_blocks_remote: int
1387
+ # Num of blocks with unknown locations
1388
+ iter_unknown_location: int
1389
+
1390
+ def __str__(self) -> str:
1391
+ return self.to_string()
1392
+
1393
+ def to_string(self) -> str:
1394
+ out = ""
1395
+ if (
1396
+ self.block_time.get()
1397
+ or self.total_time.get()
1398
+ or self.get_time.get()
1399
+ or self.next_time.get()
1400
+ or self.format_time.get()
1401
+ or self.collate_time.get()
1402
+ or self.finalize_batch_time.get()
1403
+ ):
1404
+ out += "\nDataset iterator time breakdown:\n"
1405
+ if self.total_time.get():
1406
+ out += "* Total time overall: {}\n".format(fmt(self.total_time.get()))
1407
+ if self.initialize_time.get():
1408
+ out += (
1409
+ " * Total time in Ray Data iterator initialization code: "
1410
+ "{}\n".format(fmt(self.initialize_time.get()))
1411
+ )
1412
+ if self.block_time.get():
1413
+ out += (
1414
+ " * Total time user thread is blocked by Ray Data iter_batches: "
1415
+ "{}\n".format(fmt(self.block_time.get()))
1416
+ )
1417
+ if self.user_time.get():
1418
+ out += " * Total execution time for user thread: {}\n".format(
1419
+ fmt(self.user_time.get())
1420
+ )
1421
+ out += (
1422
+ "* Batch iteration time breakdown (summed across prefetch threads):\n"
1423
+ )
1424
+ if self.get_time.get():
1425
+ out += " * In ray.get(): {} min, {} max, {} avg, {} total\n".format(
1426
+ fmt(self.get_time.min()),
1427
+ fmt(self.get_time.max()),
1428
+ fmt(self.get_time.avg()),
1429
+ fmt(self.get_time.get()),
1430
+ )
1431
+ if self.next_time.get():
1432
+ batch_creation_str = (
1433
+ " * In batch creation: {} min, {} max, " "{} avg, {} total\n"
1434
+ )
1435
+ out += batch_creation_str.format(
1436
+ fmt(self.next_time.min()),
1437
+ fmt(self.next_time.max()),
1438
+ fmt(self.next_time.avg()),
1439
+ fmt(self.next_time.get()),
1440
+ )
1441
+ if self.format_time.get():
1442
+ format_str = (
1443
+ " * In batch formatting: {} min, {} max, " "{} avg, {} total\n"
1444
+ )
1445
+ out += format_str.format(
1446
+ fmt(self.format_time.min()),
1447
+ fmt(self.format_time.max()),
1448
+ fmt(self.format_time.avg()),
1449
+ fmt(self.format_time.get()),
1450
+ )
1451
+ if self.collate_time.get():
1452
+ out += " * In collate_fn: {} min, {} max, {} avg, {} total\n".format(
1453
+ fmt(self.collate_time.min()),
1454
+ fmt(self.collate_time.max()),
1455
+ fmt(self.collate_time.avg()),
1456
+ fmt(self.collate_time.get()),
1457
+ )
1458
+ if self.finalize_batch_time.get():
1459
+ format_str = (
1460
+ " * In host->device transfer: {} min, {} max, {} avg, {} total\n"
1461
+ )
1462
+ out += format_str.format(
1463
+ fmt(self.finalize_batch_time.min()),
1464
+ fmt(self.finalize_batch_time.max()),
1465
+ fmt(self.finalize_batch_time.avg()),
1466
+ fmt(self.finalize_batch_time.get()),
1467
+ )
1468
+ if DataContext.get_current().enable_get_object_locations_for_metrics:
1469
+ out += "Block locations:\n"
1470
+ out += " * Num blocks local: {}\n".format(self.iter_blocks_local)
1471
+ out += " * Num blocks remote: {}\n".format(self.iter_blocks_remote)
1472
+ out += " * Num blocks unknown location: {}\n".format(
1473
+ self.iter_unknown_location
1474
+ )
1475
+ if self.streaming_split_coord_time.get() != 0:
1476
+ out += "Streaming split coordinator overhead time: "
1477
+ out += f"{fmt(self.streaming_split_coord_time.get())}\n"
1478
+
1479
+ return out
1480
+
1481
+ def __repr__(self, level=0) -> str:
1482
+ indent = leveled_indent(level)
1483
+ return (
1484
+ f"IterStatsSummary(\n"
1485
+ f"{indent} wait_time={fmt(self.wait_time.get()) or None},\n"
1486
+ f"{indent} get_time={fmt(self.get_time.get()) or None},\n"
1487
+ f"{indent} iter_blocks_local={self.iter_blocks_local or None},\n"
1488
+ f"{indent} iter_blocks_remote={self.iter_blocks_remote or None},\n"
1489
+ f"{indent} iter_unknown_location={self.iter_unknown_location or None},\n"
1490
+ f"{indent} next_time={fmt(self.next_time.get()) or None},\n"
1491
+ f"{indent} format_time={fmt(self.format_time.get()) or None},\n"
1492
+ f"{indent} user_time={fmt(self.user_time.get()) or None},\n"
1493
+ f"{indent} total_time={fmt(self.total_time.get()) or None},\n"
1494
+ f"{indent})"
1495
+ )
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/torch_iterable_dataset.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from torch.utils.data import IterableDataset
2
+
3
+
4
+ class TorchIterableDataset(IterableDataset):
5
+ def __init__(self, generator_func):
6
+ self.generator_func = generator_func
7
+
8
+ def __iter__(self):
9
+ it = self.generator_func()
10
+ yield from it
infer_4_47_1/lib/python3.10/site-packages/ray/data/_internal/util.py ADDED
@@ -0,0 +1,1091 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import logging
3
+ import os
4
+ import pathlib
5
+ import random
6
+ import sys
7
+ import threading
8
+ import time
9
+ import urllib.parse
10
+ from collections import deque
11
+ from types import ModuleType
12
+ from typing import (
13
+ TYPE_CHECKING,
14
+ Any,
15
+ Callable,
16
+ Iterable,
17
+ Iterator,
18
+ List,
19
+ Optional,
20
+ Tuple,
21
+ TypeVar,
22
+ Union,
23
+ )
24
+
25
+ import numpy as np
26
+
27
+ import ray
28
+ from ray._private.utils import _get_pyarrow_version
29
+ from ray.data.context import DEFAULT_READ_OP_MIN_NUM_BLOCKS, WARN_PREFIX, DataContext
30
+
31
+ if TYPE_CHECKING:
32
+ import pandas
33
+ import pyarrow
34
+
35
+ from ray.data._internal.compute import ComputeStrategy
36
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey
37
+ from ray.data.block import Block, BlockMetadata, UserDefinedFunction
38
+ from ray.data.datasource import Datasource, Reader
39
+ from ray.util.placement_group import PlacementGroup
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ KiB = 1024 # bytes
45
+ MiB = 1024 * KiB
46
+ GiB = 1024 * MiB
47
+
48
+
49
+ # NOTE: Make sure that these lower and upper bounds stay in sync with version
50
+ # constraints given in python/setup.py.
51
+ # Inclusive minimum pyarrow version.
52
+ MIN_PYARROW_VERSION = "6.0.1"
53
+ RAY_DISABLE_PYARROW_VERSION_CHECK = "RAY_DISABLE_PYARROW_VERSION_CHECK"
54
+ _VERSION_VALIDATED = False
55
+ _LOCAL_SCHEME = "local"
56
+ _EXAMPLE_SCHEME = "example"
57
+
58
+
59
+ LazyModule = Union[None, bool, ModuleType]
60
+ _pyarrow_dataset: LazyModule = None
61
+
62
+
63
+ class _NullSentinel:
64
+ """Sentinel value that sorts greater than any other value."""
65
+
66
+ def __eq__(self, other):
67
+ return isinstance(other, _NullSentinel)
68
+
69
+ def __lt__(self, other):
70
+ return False
71
+
72
+ def __le__(self, other):
73
+ return isinstance(other, _NullSentinel)
74
+
75
+ def __gt__(self, other):
76
+ return True
77
+
78
+ def __ge__(self, other):
79
+ return True
80
+
81
+
82
+ NULL_SENTINEL = _NullSentinel()
83
+
84
+
85
+ def _lazy_import_pyarrow_dataset() -> LazyModule:
86
+ global _pyarrow_dataset
87
+ if _pyarrow_dataset is None:
88
+ try:
89
+ from pyarrow import dataset as _pyarrow_dataset
90
+ except ModuleNotFoundError:
91
+ # If module is not found, set _pyarrow to False so we won't
92
+ # keep trying to import it on every _lazy_import_pyarrow() call.
93
+ _pyarrow_dataset = False
94
+ return _pyarrow_dataset
95
+
96
+
97
+ def _check_pyarrow_version():
98
+ """Check that pyarrow's version is within the supported bounds."""
99
+ global _VERSION_VALIDATED
100
+
101
+ if not _VERSION_VALIDATED:
102
+ if os.environ.get(RAY_DISABLE_PYARROW_VERSION_CHECK, "0") == "1":
103
+ _VERSION_VALIDATED = True
104
+ return
105
+
106
+ version = _get_pyarrow_version()
107
+ if version is not None:
108
+ from packaging.version import parse as parse_version
109
+
110
+ if parse_version(version) < parse_version(MIN_PYARROW_VERSION):
111
+ raise ImportError(
112
+ f"Dataset requires pyarrow >= {MIN_PYARROW_VERSION}, but "
113
+ f"{version} is installed. Reinstall with "
114
+ f'`pip install -U "pyarrow"`. '
115
+ "If you want to disable this pyarrow version check, set the "
116
+ f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1."
117
+ )
118
+ else:
119
+ logger.warning(
120
+ "You are using the 'pyarrow' module, but the exact version is unknown "
121
+ "(possibly carried as an internal component by another module). Please "
122
+ f"make sure you are using pyarrow >= {MIN_PYARROW_VERSION} to ensure "
123
+ "compatibility with Ray Dataset. "
124
+ "If you want to disable this pyarrow version check, set the "
125
+ f"environment variable {RAY_DISABLE_PYARROW_VERSION_CHECK}=1."
126
+ )
127
+ _VERSION_VALIDATED = True
128
+
129
+
130
+ def _autodetect_parallelism(
131
+ parallelism: int,
132
+ target_max_block_size: int,
133
+ ctx: DataContext,
134
+ datasource_or_legacy_reader: Optional[Union["Datasource", "Reader"]] = None,
135
+ mem_size: Optional[int] = None,
136
+ placement_group: Optional["PlacementGroup"] = None,
137
+ avail_cpus: Optional[int] = None,
138
+ ) -> Tuple[int, str, Optional[int]]:
139
+ """Returns parallelism to use and the min safe parallelism to avoid OOMs.
140
+
141
+ This detects parallelism using the following heuristics, applied in order:
142
+
143
+ 1) We start with the default value of 200. This can be overridden by
144
+ setting the `read_op_min_num_blocks` attribute of
145
+ :class:`~ray.data.context.DataContext`.
146
+ 2) Min block size. If the parallelism would make blocks smaller than this
147
+ threshold, the parallelism is reduced to avoid the overhead of tiny blocks.
148
+ 3) Max block size. If the parallelism would make blocks larger than this
149
+ threshold, the parallelism is increased to avoid OOMs during processing.
150
+ 4) Available CPUs. If the parallelism cannot make use of all the available
151
+ CPUs in the cluster, the parallelism is increased until it can.
152
+
153
+ Args:
154
+ parallelism: The user-requested parallelism, or -1 for auto-detection.
155
+ target_max_block_size: The target max block size to
156
+ produce. We pass this separately from the
157
+ DatasetContext because it may be set per-op instead of
158
+ per-Dataset.
159
+ ctx: The current Dataset context to use for configs.
160
+ datasource_or_legacy_reader: The datasource or legacy reader, to be used for
161
+ data size estimation.
162
+ mem_size: If passed, then used to compute the parallelism according to
163
+ target_max_block_size.
164
+ placement_group: The placement group that this Dataset
165
+ will execute inside, if any.
166
+ avail_cpus: Override avail cpus detection (for testing only).
167
+
168
+ Returns:
169
+ Tuple of detected parallelism (only if -1 was specified), the reason
170
+ for the detected parallelism (only if -1 was specified), and the estimated
171
+ inmemory size of the dataset.
172
+ """
173
+ min_safe_parallelism = 1
174
+ max_reasonable_parallelism = sys.maxsize
175
+ if mem_size is None and datasource_or_legacy_reader:
176
+ mem_size = datasource_or_legacy_reader.estimate_inmemory_data_size()
177
+ if mem_size is not None and not np.isnan(mem_size):
178
+ min_safe_parallelism = max(1, int(mem_size / target_max_block_size))
179
+ max_reasonable_parallelism = max(1, int(mem_size / ctx.target_min_block_size))
180
+
181
+ reason = ""
182
+ if parallelism < 0:
183
+ if parallelism != -1:
184
+ raise ValueError("`parallelism` must either be -1 or a positive integer.")
185
+
186
+ if (
187
+ ctx.min_parallelism is not None
188
+ and ctx.min_parallelism != DEFAULT_READ_OP_MIN_NUM_BLOCKS
189
+ and ctx.read_op_min_num_blocks == DEFAULT_READ_OP_MIN_NUM_BLOCKS
190
+ ):
191
+ logger.warning(
192
+ "``DataContext.min_parallelism`` is deprecated in Ray 2.10. "
193
+ "Please specify ``DataContext.read_op_min_num_blocks`` instead."
194
+ )
195
+ ctx.read_op_min_num_blocks = ctx.min_parallelism
196
+
197
+ # Start with 2x the number of cores as a baseline, with a min floor.
198
+ if placement_group is None:
199
+ placement_group = ray.util.get_current_placement_group()
200
+ avail_cpus = avail_cpus or _estimate_avail_cpus(placement_group)
201
+ parallelism = max(
202
+ min(ctx.read_op_min_num_blocks, max_reasonable_parallelism),
203
+ min_safe_parallelism,
204
+ avail_cpus * 2,
205
+ )
206
+
207
+ if parallelism == ctx.read_op_min_num_blocks:
208
+ reason = (
209
+ "DataContext.get_current().read_op_min_num_blocks="
210
+ f"{ctx.read_op_min_num_blocks}"
211
+ )
212
+ elif parallelism == max_reasonable_parallelism:
213
+ reason = (
214
+ "output blocks of size at least "
215
+ "DataContext.get_current().target_min_block_size="
216
+ f"{ctx.target_min_block_size / (1024 * 1024)}MiB"
217
+ )
218
+ elif parallelism == min_safe_parallelism:
219
+ reason = (
220
+ "output blocks of size at most "
221
+ "DataContext.get_current().target_max_block_size="
222
+ f"{ctx.target_max_block_size / (1024 * 1024)}MiB"
223
+ )
224
+ else:
225
+ reason = (
226
+ "parallelism at least twice the available number "
227
+ f"of CPUs ({avail_cpus})"
228
+ )
229
+
230
+ logger.debug(
231
+ f"Autodetected parallelism={parallelism} based on "
232
+ f"estimated_available_cpus={avail_cpus} and "
233
+ f"estimated_data_size={mem_size}."
234
+ )
235
+
236
+ return parallelism, reason, mem_size
237
+
238
+
239
+ def _estimate_avail_cpus(cur_pg: Optional["PlacementGroup"]) -> int:
240
+ """Estimates the available CPU parallelism for this Dataset in the cluster.
241
+
242
+ If we aren't in a placement group, this is trivially the number of CPUs in the
243
+ cluster. Otherwise, we try to calculate how large the placement group is relative
244
+ to the size of the cluster.
245
+
246
+ Args:
247
+ cur_pg: The current placement group, if any.
248
+ """
249
+ cluster_cpus = int(ray.cluster_resources().get("CPU", 1))
250
+ cluster_gpus = int(ray.cluster_resources().get("GPU", 0))
251
+
252
+ # If we're in a placement group, we shouldn't assume the entire cluster's
253
+ # resources are available for us to use. Estimate an upper bound on what's
254
+ # reasonable to assume is available for datasets to use.
255
+ if cur_pg:
256
+ pg_cpus = 0
257
+ for bundle in cur_pg.bundle_specs:
258
+ # Calculate the proportion of the cluster this placement group "takes up".
259
+ # Then scale our cluster_cpus proportionally to avoid over-parallelizing
260
+ # if there are many parallel Tune trials using the cluster.
261
+ cpu_fraction = bundle.get("CPU", 0) / max(1, cluster_cpus)
262
+ gpu_fraction = bundle.get("GPU", 0) / max(1, cluster_gpus)
263
+ max_fraction = max(cpu_fraction, gpu_fraction)
264
+ # Over-parallelize by up to a factor of 2, but no more than that. It's
265
+ # preferrable to over-estimate than under-estimate.
266
+ pg_cpus += 2 * int(max_fraction * cluster_cpus)
267
+
268
+ return min(cluster_cpus, pg_cpus)
269
+
270
+ return cluster_cpus
271
+
272
+
273
+ def _estimate_available_parallelism() -> int:
274
+ """Estimates the available CPU parallelism for this Dataset in the cluster.
275
+ If we are currently in a placement group, take that into account."""
276
+ cur_pg = ray.util.get_current_placement_group()
277
+ return _estimate_avail_cpus(cur_pg)
278
+
279
+
280
+ def _warn_on_high_parallelism(requested_parallelism, num_read_tasks):
281
+ available_cpu_slots = ray.available_resources().get("CPU", 1)
282
+ if (
283
+ requested_parallelism
284
+ and num_read_tasks > available_cpu_slots * 4
285
+ and num_read_tasks >= 5000
286
+ ):
287
+ logger.warning(
288
+ f"{WARN_PREFIX} The requested parallelism of {requested_parallelism} "
289
+ "is more than 4x the number of available CPU slots in the cluster of "
290
+ f"{available_cpu_slots}. This can "
291
+ "lead to slowdowns during the data reading phase due to excessive "
292
+ "task creation. Reduce the parallelism to match with the available "
293
+ "CPU slots in the cluster, or set parallelism to -1 for Ray Data "
294
+ "to automatically determine the parallelism. "
295
+ "You can ignore this message if the cluster is expected to autoscale."
296
+ )
297
+
298
+
299
+ def _check_import(obj, *, module: str, package: str) -> None:
300
+ """Check if a required dependency is installed.
301
+
302
+ If `module` can't be imported, this function raises an `ImportError` instructing
303
+ the user to install `package` from PyPI.
304
+
305
+ Args:
306
+ obj: The object that has a dependency.
307
+ module: The name of the module to import.
308
+ package: The name of the package on PyPI.
309
+ """
310
+ try:
311
+ importlib.import_module(module)
312
+ except ImportError:
313
+ raise ImportError(
314
+ f"`{obj.__class__.__name__}` depends on '{package}', but '{package}' "
315
+ f"couldn't be imported. You can install '{package}' by running `pip "
316
+ f"install {package}`."
317
+ )
318
+
319
+
320
+ def _resolve_custom_scheme(path: str) -> str:
321
+ """Returns the resolved path if the given path follows a Ray-specific custom
322
+ scheme. Othewise, returns the path unchanged.
323
+
324
+ The supported custom schemes are: "local", "example".
325
+ """
326
+ parsed_uri = urllib.parse.urlparse(path)
327
+ if parsed_uri.scheme == _LOCAL_SCHEME:
328
+ path = parsed_uri.netloc + parsed_uri.path
329
+ elif parsed_uri.scheme == _EXAMPLE_SCHEME:
330
+ example_data_path = pathlib.Path(__file__).parent.parent / "examples" / "data"
331
+ path = example_data_path / (parsed_uri.netloc + parsed_uri.path)
332
+ path = str(path.resolve())
333
+ return path
334
+
335
+
336
+ def _is_local_scheme(paths: Union[str, List[str]]) -> bool:
337
+ """Returns True if the given paths are in local scheme.
338
+ Note: The paths must be in same scheme, i.e. it's invalid and
339
+ will raise error if paths are mixed with different schemes.
340
+ """
341
+ if isinstance(paths, str):
342
+ paths = [paths]
343
+ if isinstance(paths, pathlib.Path):
344
+ paths = [str(paths)]
345
+ elif not isinstance(paths, list) or any(not isinstance(p, str) for p in paths):
346
+ raise ValueError("paths must be a path string or a list of path strings.")
347
+ elif len(paths) == 0:
348
+ raise ValueError("Must provide at least one path.")
349
+ num = sum(urllib.parse.urlparse(path).scheme == _LOCAL_SCHEME for path in paths)
350
+ if num > 0 and num < len(paths):
351
+ raise ValueError(
352
+ "The paths must all be local-scheme or not local-scheme, "
353
+ f"but found mixed {paths}"
354
+ )
355
+ return num == len(paths)
356
+
357
+
358
+ def _truncated_repr(obj: Any) -> str:
359
+ """Utility to return a truncated object representation for error messages."""
360
+ msg = str(obj)
361
+ if len(msg) > 200:
362
+ msg = msg[:200] + "..."
363
+ return msg
364
+
365
+
366
+ def _insert_doc_at_pattern(
367
+ obj,
368
+ *,
369
+ message: str,
370
+ pattern: str,
371
+ insert_after: bool = True,
372
+ directive: Optional[str] = None,
373
+ skip_matches: int = 0,
374
+ ) -> str:
375
+ if "\n" in message:
376
+ raise ValueError(
377
+ "message shouldn't contain any newlines, since this function will insert "
378
+ f"its own linebreaks when text wrapping: {message}"
379
+ )
380
+
381
+ doc = obj.__doc__.strip()
382
+ if not doc:
383
+ doc = ""
384
+
385
+ if pattern == "" and insert_after:
386
+ # Empty pattern + insert_after means that we want to append the message to the
387
+ # end of the docstring.
388
+ head = doc
389
+ tail = ""
390
+ else:
391
+ tail = doc
392
+ i = tail.find(pattern)
393
+ skip_matches_left = skip_matches
394
+ while i != -1:
395
+ if insert_after:
396
+ # Set offset to the first character after the pattern.
397
+ offset = i + len(pattern)
398
+ else:
399
+ # Set offset to the first character in the matched line.
400
+ offset = tail[:i].rfind("\n") + 1
401
+ head = tail[:offset]
402
+ tail = tail[offset:]
403
+ skip_matches_left -= 1
404
+ if skip_matches_left <= 0:
405
+ break
406
+ elif not insert_after:
407
+ # Move past the found pattern, since we're skipping it.
408
+ tail = tail[i - offset + len(pattern) :]
409
+ i = tail.find(pattern)
410
+ else:
411
+ raise ValueError(
412
+ f"Pattern {pattern} not found after {skip_matches} skips in docstring "
413
+ f"{doc}"
414
+ )
415
+ # Get indentation of the to-be-inserted text.
416
+ after_lines = list(filter(bool, tail.splitlines()))
417
+ if len(after_lines) > 0:
418
+ lines = after_lines
419
+ else:
420
+ lines = list(filter(bool, reversed(head.splitlines())))
421
+ # Should always have at least one non-empty line in the docstring.
422
+ assert len(lines) > 0
423
+ indent = " " * (len(lines[0]) - len(lines[0].lstrip()))
424
+ # Handle directive.
425
+ message = message.strip("\n")
426
+ if directive is not None:
427
+ base = f"{indent}.. {directive}::\n"
428
+ message = message.replace("\n", "\n" + indent + " " * 4)
429
+ message = base + indent + " " * 4 + message
430
+ else:
431
+ message = indent + message.replace("\n", "\n" + indent)
432
+ # Add two blank lines before/after message, if necessary.
433
+ if insert_after ^ (pattern == "\n\n"):
434
+ # Only two blank lines before message if:
435
+ # 1. Inserting message after pattern and pattern is not two blank lines.
436
+ # 2. Inserting message before pattern and pattern is two blank lines.
437
+ message = "\n\n" + message
438
+ if (not insert_after) ^ (pattern == "\n\n"):
439
+ # Only two blank lines after message if:
440
+ # 1. Inserting message before pattern and pattern is not two blank lines.
441
+ # 2. Inserting message after pattern and pattern is two blank lines.
442
+ message = message + "\n\n"
443
+
444
+ # Insert message before/after pattern.
445
+ parts = [head, message, tail]
446
+ # Build new docstring.
447
+ obj.__doc__ = "".join(parts)
448
+
449
+
450
+ def _consumption_api(
451
+ if_more_than_read: bool = False,
452
+ datasource_metadata: Optional[str] = None,
453
+ extra_condition: Optional[str] = None,
454
+ delegate: Optional[str] = None,
455
+ pattern="Examples:",
456
+ insert_after=False,
457
+ ):
458
+ """Annotate the function with an indication that it's a consumption API, and that it
459
+ will trigger Dataset execution.
460
+ """
461
+ base = (
462
+ " will trigger execution of the lazy transformations performed on "
463
+ "this dataset."
464
+ )
465
+ if delegate:
466
+ message = delegate + base
467
+ elif not if_more_than_read:
468
+ message = "This operation" + base
469
+ else:
470
+ condition = "If this dataset consists of more than a read, "
471
+ if datasource_metadata is not None:
472
+ condition += (
473
+ f"or if the {datasource_metadata} can't be determined from the "
474
+ "metadata provided by the datasource, "
475
+ )
476
+ if extra_condition is not None:
477
+ condition += extra_condition + ", "
478
+ message = condition + "then this operation" + base
479
+
480
+ def wrap(obj):
481
+ _insert_doc_at_pattern(
482
+ obj,
483
+ message=message,
484
+ pattern=pattern,
485
+ insert_after=insert_after,
486
+ directive="note",
487
+ )
488
+ return obj
489
+
490
+ return wrap
491
+
492
+
493
+ def ConsumptionAPI(*args, **kwargs):
494
+ """Annotate the function with an indication that it's a consumption API, and that it
495
+ will trigger Dataset execution.
496
+ """
497
+ if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
498
+ return _consumption_api()(args[0])
499
+ return _consumption_api(*args, **kwargs)
500
+
501
+
502
+ def _all_to_all_api(*args, **kwargs):
503
+ """Annotate the function with an indication that it's a all to all API, and that it
504
+ is an operation that requires all inputs to be materialized in-memory to execute.
505
+ """
506
+
507
+ def wrap(obj):
508
+ _insert_doc_at_pattern(
509
+ obj,
510
+ message=(
511
+ "This operation requires all inputs to be "
512
+ "materialized in object store for it to execute."
513
+ ),
514
+ pattern="Examples:",
515
+ insert_after=False,
516
+ directive="note",
517
+ )
518
+ return obj
519
+
520
+ return wrap
521
+
522
+
523
+ def AllToAllAPI(*args, **kwargs):
524
+ """Annotate the function with an indication that it's a all to all API, and that it
525
+ is an operation that requires all inputs to be materialized in-memory to execute.
526
+ """
527
+ # This should only be used as a decorator for dataset methods.
528
+ assert len(args) == 1 and len(kwargs) == 0 and callable(args[0])
529
+ return _all_to_all_api()(args[0])
530
+
531
+
532
+ def _split_list(arr: List[Any], num_splits: int) -> List[List[Any]]:
533
+ """Split the list into `num_splits` lists.
534
+
535
+ The splits will be even if the `num_splits` divides the length of list, otherwise
536
+ the remainder (suppose it's R) will be allocated to the first R splits (one for
537
+ each).
538
+ This is the same as numpy.array_split(). The reason we make this a separate
539
+ implementation is to allow the heterogeneity in the elements in the list.
540
+ """
541
+ assert num_splits > 0
542
+ q, r = divmod(len(arr), num_splits)
543
+ splits = [
544
+ arr[i * q + min(i, r) : (i + 1) * q + min(i + 1, r)] for i in range(num_splits)
545
+ ]
546
+ return splits
547
+
548
+
549
+ def get_compute_strategy(
550
+ fn: "UserDefinedFunction",
551
+ fn_constructor_args: Optional[Iterable[Any]] = None,
552
+ compute: Optional[Union[str, "ComputeStrategy"]] = None,
553
+ concurrency: Optional[Union[int, Tuple[int, int]]] = None,
554
+ ) -> "ComputeStrategy":
555
+ """Get `ComputeStrategy` based on the function or class, and concurrency
556
+ information.
557
+
558
+ Args:
559
+ fn: The function or generator to apply to a record batch, or a class type
560
+ that can be instantiated to create such a callable.
561
+ fn_constructor_args: Positional arguments to pass to ``fn``'s constructor.
562
+ compute: Either "tasks" (default) to use Ray Tasks or an
563
+ :class:`~ray.data.ActorPoolStrategy` to use an autoscaling actor pool.
564
+ concurrency: The number of Ray workers to use concurrently.
565
+
566
+ Returns:
567
+ The `ComputeStrategy` for execution.
568
+ """
569
+ # Lazily import these objects to avoid circular imports.
570
+ from ray.data._internal.compute import ActorPoolStrategy, TaskPoolStrategy
571
+ from ray.data.block import CallableClass
572
+
573
+ if isinstance(fn, CallableClass):
574
+ is_callable_class = True
575
+ else:
576
+ # TODO(chengsu): disallow object that is not a function. For example,
577
+ # An object instance of class often indicates a bug in user code.
578
+ is_callable_class = False
579
+ if fn_constructor_args is not None:
580
+ raise ValueError(
581
+ "``fn_constructor_args`` can only be specified if providing a "
582
+ f"callable class instance for ``fn``, but got: {fn}."
583
+ )
584
+
585
+ if compute is not None:
586
+ # Legacy code path to support `compute` argument.
587
+ logger.warning(
588
+ "The argument ``compute`` is deprecated in Ray 2.9. Please specify "
589
+ "argument ``concurrency`` instead. For more information, see "
590
+ "https://docs.ray.io/en/master/data/transforming-data.html#"
591
+ "stateful-transforms."
592
+ )
593
+ if is_callable_class and (
594
+ compute == "tasks" or isinstance(compute, TaskPoolStrategy)
595
+ ):
596
+ raise ValueError(
597
+ "``compute`` must specify an actor compute strategy when using a "
598
+ f"callable class, but got: {compute}. For example, use "
599
+ "``compute=ray.data.ActorPoolStrategy(size=n)``."
600
+ )
601
+ elif not is_callable_class and (
602
+ compute == "actors" or isinstance(compute, ActorPoolStrategy)
603
+ ):
604
+ raise ValueError(
605
+ f"``compute`` is specified as the actor compute strategy: {compute}, "
606
+ f"but ``fn`` is not a callable class: {fn}. Pass a callable class or "
607
+ "use the default ``compute`` strategy."
608
+ )
609
+ return compute
610
+ elif concurrency is not None:
611
+ if isinstance(concurrency, tuple):
612
+ if (
613
+ len(concurrency) == 2
614
+ and isinstance(concurrency[0], int)
615
+ and isinstance(concurrency[1], int)
616
+ ):
617
+ if is_callable_class:
618
+ return ActorPoolStrategy(
619
+ min_size=concurrency[0], max_size=concurrency[1]
620
+ )
621
+ else:
622
+ raise ValueError(
623
+ "``concurrency`` is set as a tuple of integers, but ``fn`` "
624
+ f"is not a callable class: {fn}. Use ``concurrency=n`` to "
625
+ "control maximum number of workers to use."
626
+ )
627
+ else:
628
+ raise ValueError(
629
+ "``concurrency`` is expected to be set as a tuple of "
630
+ f"integers, but got: {concurrency}."
631
+ )
632
+ elif isinstance(concurrency, int):
633
+ if is_callable_class:
634
+ return ActorPoolStrategy(size=concurrency)
635
+ else:
636
+ return TaskPoolStrategy(size=concurrency)
637
+ else:
638
+ raise ValueError(
639
+ "``concurrency`` is expected to be set as an integer or a "
640
+ f"tuple of integers, but got: {concurrency}."
641
+ )
642
+ else:
643
+ if is_callable_class:
644
+ raise ValueError(
645
+ "``concurrency`` must be specified when using a callable class. "
646
+ "For example, use ``concurrency=n`` for a pool of ``n`` workers."
647
+ )
648
+ else:
649
+ return TaskPoolStrategy()
650
+
651
+
652
+ def capfirst(s: str):
653
+ """Capitalize the first letter of a string
654
+
655
+ Args:
656
+ s: String to capitalize
657
+
658
+ Returns:
659
+ Capitalized string
660
+ """
661
+ return s[0].upper() + s[1:]
662
+
663
+
664
+ def capitalize(s: str):
665
+ """Capitalize a string, removing '_' and keeping camelcase.
666
+
667
+ Args:
668
+ s: String to capitalize
669
+
670
+ Returns:
671
+ Capitalized string with no underscores.
672
+ """
673
+ return "".join(capfirst(x) for x in s.split("_"))
674
+
675
+
676
+ def pandas_df_to_arrow_block(df: "pandas.DataFrame") -> "Block":
677
+ from ray.data.block import BlockAccessor, BlockExecStats
678
+
679
+ block = BlockAccessor.for_block(df).to_arrow()
680
+ stats = BlockExecStats.builder()
681
+ return (
682
+ block,
683
+ BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build()),
684
+ )
685
+
686
+
687
+ def ndarray_to_block(ndarray: np.ndarray, ctx: DataContext) -> "Block":
688
+ from ray.data.block import BlockAccessor, BlockExecStats
689
+
690
+ DataContext._set_current(ctx)
691
+
692
+ stats = BlockExecStats.builder()
693
+ block = BlockAccessor.batch_to_block({"data": ndarray})
694
+ metadata = BlockAccessor.for_block(block).get_metadata(exec_stats=stats.build())
695
+ return block, metadata
696
+
697
+
698
+ def get_table_block_metadata(
699
+ table: Union["pyarrow.Table", "pandas.DataFrame"]
700
+ ) -> "BlockMetadata":
701
+ from ray.data.block import BlockAccessor, BlockExecStats
702
+
703
+ stats = BlockExecStats.builder()
704
+ return BlockAccessor.for_block(table).get_metadata(exec_stats=stats.build())
705
+
706
+
707
+ def unify_block_metadata_schema(
708
+ metadata: List["BlockMetadata"],
709
+ ) -> Optional[Union[type, "pyarrow.lib.Schema"]]:
710
+ """For the input list of BlockMetadata, return a unified schema of the
711
+ corresponding blocks. If the metadata have no valid schema, returns None.
712
+ """
713
+ # Some blocks could be empty, in which case we cannot get their schema.
714
+ # TODO(ekl) validate schema is the same across different blocks.
715
+ from ray.data._internal.arrow_ops.transform_pyarrow import unify_schemas
716
+
717
+ # First check if there are blocks with computed schemas, then unify
718
+ # valid schemas from all such blocks.
719
+ schemas_to_unify = []
720
+ for m in metadata:
721
+ if m.schema is not None and (m.num_rows is None or m.num_rows > 0):
722
+ schemas_to_unify.append(m.schema)
723
+ if schemas_to_unify:
724
+ # Check valid pyarrow installation before attempting schema unification
725
+ try:
726
+ import pyarrow as pa
727
+ except ImportError:
728
+ pa = None
729
+ # If the result contains PyArrow schemas, unify them
730
+ if pa is not None and all(isinstance(s, pa.Schema) for s in schemas_to_unify):
731
+ return unify_schemas(schemas_to_unify)
732
+ # Otherwise, if the resulting schemas are simple types (e.g. int),
733
+ # return the first schema.
734
+ return schemas_to_unify[0]
735
+ return None
736
+
737
+
738
+ def find_partition_index(
739
+ table: Union["pyarrow.Table", "pandas.DataFrame"],
740
+ desired: List[Any],
741
+ sort_key: "SortKey",
742
+ ) -> int:
743
+ columns = sort_key.get_columns()
744
+ descending = sort_key.get_descending()
745
+
746
+ left, right = 0, len(table)
747
+ for i in range(len(desired)):
748
+ if left == right:
749
+ return right
750
+ col_name = columns[i]
751
+ col_vals = table[col_name].to_numpy()[left:right]
752
+ desired_val = desired[i]
753
+
754
+ # Handle null values - replace them with sentinel values
755
+ if desired_val is None:
756
+ desired_val = NULL_SENTINEL
757
+
758
+ # Replace None/NaN values in col_vals with sentinel
759
+ null_mask = col_vals == None # noqa: E711
760
+ if null_mask.any():
761
+ col_vals = col_vals.copy() # Make a copy to avoid modifying original
762
+ col_vals[null_mask] = NULL_SENTINEL
763
+
764
+ prevleft = left
765
+ if descending is True:
766
+ left = prevleft + (
767
+ len(col_vals)
768
+ - np.searchsorted(
769
+ col_vals,
770
+ desired_val,
771
+ side="right",
772
+ sorter=np.arange(len(col_vals) - 1, -1, -1),
773
+ )
774
+ )
775
+ right = prevleft + (
776
+ len(col_vals)
777
+ - np.searchsorted(
778
+ col_vals,
779
+ desired_val,
780
+ side="left",
781
+ sorter=np.arange(len(col_vals) - 1, -1, -1),
782
+ )
783
+ )
784
+ else:
785
+ left = prevleft + np.searchsorted(col_vals, desired_val, side="left")
786
+ right = prevleft + np.searchsorted(col_vals, desired_val, side="right")
787
+ return right if descending is True else left
788
+
789
+
790
+ def find_partitions(table, boundaries, sort_key):
791
+ partitions = []
792
+
793
+ # For each boundary value, count the number of items that are less
794
+ # than it. Since the block is sorted, these counts partition the items
795
+ # such that boundaries[i] <= x < boundaries[i + 1] for each x in
796
+ # partition[i]. If `descending` is true, `boundaries` would also be
797
+ # in descending order and we only need to count the number of items
798
+ # *greater than* the boundary value instead.
799
+ bounds = [
800
+ find_partition_index(table, boundary, sort_key) for boundary in boundaries
801
+ ]
802
+
803
+ last_idx = 0
804
+ for idx in bounds:
805
+ partitions.append(table[last_idx:idx])
806
+ last_idx = idx
807
+ partitions.append(table[last_idx:])
808
+ return partitions
809
+
810
+
811
+ def get_attribute_from_class_name(class_name: str) -> Any:
812
+ """Get Python attribute from the provided class name.
813
+
814
+ The caller needs to make sure the provided class name includes
815
+ full module name, and can be imported successfully.
816
+ """
817
+ from importlib import import_module
818
+
819
+ paths = class_name.split(".")
820
+ if len(paths) < 2:
821
+ raise ValueError(f"Cannot create object from {class_name}.")
822
+
823
+ module_name = ".".join(paths[:-1])
824
+ attribute_name = paths[-1]
825
+ return getattr(import_module(module_name), attribute_name)
826
+
827
+
828
+ class Queue:
829
+ """A thread-safe queue implementation for multiple producers and consumers.
830
+
831
+ Provide `release()` to exit producer threads cooperatively for resource release.
832
+ """
833
+
834
+ def __init__(self, queue_size: int):
835
+ # The queue shared across multiple producer threads.
836
+ self._queue = deque()
837
+ # The boolean varilable to indicate whether producer threads should exit.
838
+ self._threads_exit = False
839
+ # The semaphore for producer threads to put item into queue.
840
+ self._producer_semaphore = threading.Semaphore(queue_size)
841
+ # The semaphore for consumer threads to get item from queue.
842
+ self._consumer_semaphore = threading.Semaphore(0)
843
+ # The mutex lock to guard access of `self._queue` and `self._threads_exit`.
844
+ self._mutex = threading.Lock()
845
+
846
+ def put(self, item: Any) -> bool:
847
+ """Put an item into the queue.
848
+
849
+ Block if necessary until a free slot is available in queue.
850
+ This method is called by producer threads.
851
+
852
+ Returns:
853
+ True if the caller thread should exit immediately.
854
+ """
855
+ self._producer_semaphore.acquire()
856
+ with self._mutex:
857
+ if self._threads_exit:
858
+ return True
859
+ else:
860
+ self._queue.append(item)
861
+ self._consumer_semaphore.release()
862
+ return False
863
+
864
+ def get(self) -> Any:
865
+ """Remove and return an item from the queue.
866
+
867
+ Block if necessary until an item is available in queue.
868
+ This method is called by consumer threads.
869
+ """
870
+ self._consumer_semaphore.acquire()
871
+ with self._mutex:
872
+ next_item = self._queue.popleft()
873
+ self._producer_semaphore.release()
874
+ return next_item
875
+
876
+ def release(self, num_threads: int):
877
+ """Release `num_threads` of producers so they would exit cooperatively."""
878
+ with self._mutex:
879
+ self._threads_exit = True
880
+ for _ in range(num_threads):
881
+ # NOTE: After Python 3.9+, Semaphore.release(n) can be used to
882
+ # release all threads at once.
883
+ self._producer_semaphore.release()
884
+
885
+ def qsize(self):
886
+ """Return the size of the queue."""
887
+ with self._mutex:
888
+ return len(self._queue)
889
+
890
+
891
+ T = TypeVar("T")
892
+ U = TypeVar("U")
893
+
894
+
895
+ def make_async_gen(
896
+ base_iterator: Iterator[T],
897
+ fn: Callable[[Iterator[T]], Iterator[U]],
898
+ num_workers: int = 1,
899
+ ) -> Iterator[U]:
900
+ """Returns a new iterator with elements fetched from the base_iterator
901
+ in an async fashion using a threadpool.
902
+
903
+ Each thread in the threadpool will fetch data from the base_iterator in a
904
+ thread-safe fashion, and apply the provided `fn` computation concurrently.
905
+
906
+ Args:
907
+ base_iterator: The iterator to asynchronously fetch from.
908
+ fn: The function to run on the input iterator.
909
+ num_workers: The number of threads to use in the threadpool. Defaults to 1.
910
+
911
+ Returns:
912
+ An iterator with the same elements as outputted from `fn`.
913
+ """
914
+
915
+ if num_workers < 1:
916
+ raise ValueError("Size of threadpool must be at least 1.")
917
+
918
+ # Use a lock to fetch from the base_iterator in a thread-safe fashion.
919
+ def convert_to_threadsafe_iterator(base_iterator: Iterator[T]) -> Iterator[T]:
920
+ class ThreadSafeIterator:
921
+ def __init__(self, it):
922
+ self.lock = threading.Lock()
923
+ self.it = it
924
+
925
+ def __next__(self):
926
+ with self.lock:
927
+ return next(self.it)
928
+
929
+ def __iter__(self):
930
+ return self
931
+
932
+ return ThreadSafeIterator(base_iterator)
933
+
934
+ thread_safe_generator = convert_to_threadsafe_iterator(base_iterator)
935
+
936
+ class Sentinel:
937
+ def __init__(self, thread_index: int):
938
+ self.thread_index = thread_index
939
+
940
+ output_queue = Queue(1)
941
+
942
+ # Because pulling from the base iterator cannot happen concurrently,
943
+ # we must execute the expensive computation in a separate step which
944
+ # can be parallelized via a threadpool.
945
+ def execute_computation(thread_index: int):
946
+ try:
947
+ for item in fn(thread_safe_generator):
948
+ if output_queue.put(item):
949
+ # Return early when it's instructed to do so.
950
+ return
951
+ output_queue.put(Sentinel(thread_index))
952
+ except Exception as e:
953
+ output_queue.put(e)
954
+
955
+ # Use separate threads to produce output batches.
956
+ threads = [
957
+ threading.Thread(target=execute_computation, args=(i,), daemon=True)
958
+ for i in range(num_workers)
959
+ ]
960
+
961
+ for thread in threads:
962
+ thread.start()
963
+
964
+ # Use main thread to consume output batches.
965
+ num_threads_finished = 0
966
+ try:
967
+ while True:
968
+ next_item = output_queue.get()
969
+ if isinstance(next_item, Exception):
970
+ raise next_item
971
+ if isinstance(next_item, Sentinel):
972
+ num_threads_finished += 1
973
+ else:
974
+ yield next_item
975
+ if num_threads_finished >= num_workers:
976
+ break
977
+ finally:
978
+ # Cooperatively exit all producer threads.
979
+ # This is to avoid these daemon threads hanging there with holding batches in
980
+ # memory, which can cause GRAM OOM easily. This can happen when caller breaks
981
+ # in the middle of iteration.
982
+ num_threads_alive = num_workers - num_threads_finished
983
+ if num_threads_alive > 0:
984
+ output_queue.release(num_threads_alive)
985
+
986
+
987
+ def call_with_retry(
988
+ f: Callable[[], Any],
989
+ description: str,
990
+ *,
991
+ match: Optional[List[str]] = None,
992
+ max_attempts: int = 10,
993
+ max_backoff_s: int = 32,
994
+ ) -> Any:
995
+ """Retry a function with exponential backoff.
996
+
997
+ Args:
998
+ f: The function to retry.
999
+ match: A list of strings to match in the exception message. If ``None``, any
1000
+ error is retried.
1001
+ description: An imperitive description of the function being retried. For
1002
+ example, "open the file".
1003
+ max_attempts: The maximum number of attempts to retry.
1004
+ max_backoff_s: The maximum number of seconds to backoff.
1005
+ """
1006
+ assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}."
1007
+
1008
+ for i in range(max_attempts):
1009
+ try:
1010
+ return f()
1011
+ except Exception as e:
1012
+ is_retryable = match is None or any(
1013
+ [pattern in str(e) for pattern in match]
1014
+ )
1015
+ if is_retryable and i + 1 < max_attempts:
1016
+ # Retry with binary expoential backoff with random jitter.
1017
+ backoff = min((2 ** (i + 1)), max_backoff_s) * random.random()
1018
+ logger.debug(
1019
+ f"Retrying {i+1} attempts to {description} after {backoff} seconds."
1020
+ )
1021
+ time.sleep(backoff)
1022
+ else:
1023
+ raise e from None
1024
+
1025
+
1026
+ def iterate_with_retry(
1027
+ iterable_factory: Callable[[], Iterable],
1028
+ description: str,
1029
+ *,
1030
+ match: Optional[List[str]] = None,
1031
+ max_attempts: int = 10,
1032
+ max_backoff_s: int = 32,
1033
+ ) -> Any:
1034
+ """Iterate through an iterable with retries.
1035
+
1036
+ If the iterable raises an exception, this function recreates and re-iterates
1037
+ through the iterable, while skipping the items that have already been yielded.
1038
+
1039
+ Args:
1040
+ iterable_factory: A no-argument function that creates the iterable.
1041
+ match: A list of strings to match in the exception message. If ``None``, any
1042
+ error is retried.
1043
+ description: An imperitive description of the function being retried. For
1044
+ example, "open the file".
1045
+ max_attempts: The maximum number of attempts to retry.
1046
+ max_backoff_s: The maximum number of seconds to backoff.
1047
+ """
1048
+ assert max_attempts >= 1, f"`max_attempts` must be positive. Got {max_attempts}."
1049
+
1050
+ num_items_yielded = 0
1051
+ for i in range(max_attempts):
1052
+ try:
1053
+ iterable = iterable_factory()
1054
+ for i, item in enumerate(iterable):
1055
+ if i < num_items_yielded:
1056
+ # Skip items that have already been yielded.
1057
+ continue
1058
+
1059
+ num_items_yielded += 1
1060
+ yield item
1061
+ return
1062
+ except Exception as e:
1063
+ is_retryable = match is None or any(
1064
+ [pattern in str(e) for pattern in match]
1065
+ )
1066
+ if is_retryable and i + 1 < max_attempts:
1067
+ # Retry with binary expoential backoff with random jitter.
1068
+ backoff = min((2 ** (i + 1)), max_backoff_s) * random.random()
1069
+ logger.debug(
1070
+ f"Retrying {i+1} attempts to {description} after {backoff} seconds."
1071
+ )
1072
+ time.sleep(backoff)
1073
+ else:
1074
+ raise e from None
1075
+
1076
+
1077
+ def create_dataset_tag(dataset_name: Optional[str], *args):
1078
+ tag = dataset_name or "dataset"
1079
+ for arg in args:
1080
+ tag += f"_{arg}"
1081
+ return tag
1082
+
1083
+
1084
+ def convert_bytes_to_human_readable_str(num_bytes: int) -> str:
1085
+ if num_bytes >= 1e9:
1086
+ num_bytes_str = f"{round(num_bytes / 1e9)}GB"
1087
+ elif num_bytes >= 1e6:
1088
+ num_bytes_str = f"{round(num_bytes / 1e6)}MB"
1089
+ else:
1090
+ num_bytes_str = f"{round(num_bytes / 1e3)}KB"
1091
+ return num_bytes_str
infer_4_47_1/lib/python3.10/site-packages/ray/data/block.py ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import logging
3
+ import os
4
+ import time
5
+ from dataclasses import dataclass
6
+ from enum import Enum
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Callable,
11
+ Dict,
12
+ Iterator,
13
+ List,
14
+ Literal,
15
+ Optional,
16
+ Protocol,
17
+ Tuple,
18
+ TypeVar,
19
+ Union,
20
+ )
21
+
22
+ import numpy as np
23
+
24
+ import ray
25
+ from ray import DynamicObjectRefGenerator
26
+ from ray.air.util.tensor_extensions.arrow import ArrowConversionError
27
+ from ray.data._internal.util import _check_pyarrow_version, _truncated_repr
28
+ from ray.types import ObjectRef
29
+ from ray.util import log_once
30
+ from ray.util.annotations import DeveloperAPI
31
+
32
+ import psutil
33
+
34
+ try:
35
+ import resource
36
+ except ImportError:
37
+ resource = None
38
+
39
+ if TYPE_CHECKING:
40
+ import pandas
41
+ import pyarrow
42
+
43
+ from ray.data._internal.block_builder import BlockBuilder
44
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey
45
+ from ray.data.aggregate import AggregateFn
46
+
47
+
48
+ T = TypeVar("T", contravariant=True)
49
+ U = TypeVar("U", covariant=True)
50
+
51
+ KeyType = TypeVar("KeyType")
52
+ AggType = TypeVar("AggType")
53
+
54
+
55
+ # Represents a batch of records to be stored in the Ray object store.
56
+ #
57
+ # Block data can be accessed in a uniform way via ``BlockAccessors`` like`
58
+ # ``ArrowBlockAccessor``.
59
+ Block = Union["pyarrow.Table", "pandas.DataFrame"]
60
+
61
+
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ @DeveloperAPI
66
+ class BlockType(Enum):
67
+ ARROW = "arrow"
68
+ PANDAS = "pandas"
69
+
70
+
71
+ # User-facing data batch type. This is the data type for data that is supplied to and
72
+ # returned from batch UDFs.
73
+ DataBatch = Union["pyarrow.Table", "pandas.DataFrame", Dict[str, np.ndarray]]
74
+
75
+ # User-facing data column type. This is the data type for data that is supplied to and
76
+ # returned from column UDFs.
77
+ DataBatchColumn = Union[
78
+ "pyarrow.ChunkedArray", "pyarrow.Array", "pandas.Series", np.ndarray
79
+ ]
80
+
81
+
82
+ # A class type that implements __call__.
83
+ CallableClass = type
84
+
85
+
86
+ class _CallableClassProtocol(Protocol[T, U]):
87
+ def __call__(self, __arg: T) -> Union[U, Iterator[U]]:
88
+ ...
89
+
90
+
91
+ # A user defined function passed to map, map_batches, ec.
92
+ UserDefinedFunction = Union[
93
+ Callable[[T], U],
94
+ Callable[[T], Iterator[U]],
95
+ "_CallableClassProtocol",
96
+ ]
97
+
98
+ # A list of block references pending computation by a single task. For example,
99
+ # this may be the output of a task reading a file.
100
+ BlockPartition = List[Tuple[ObjectRef[Block], "BlockMetadata"]]
101
+
102
+ # The metadata that describes the output of a BlockPartition. This has the
103
+ # same type as the metadata that describes each block in the partition.
104
+ BlockPartitionMetadata = List["BlockMetadata"]
105
+
106
+ # TODO(ekl/chengsu): replace this with just
107
+ # `DynamicObjectRefGenerator` once block splitting
108
+ # is on by default. When block splitting is off, the type is a plain block.
109
+ MaybeBlockPartition = Union[Block, DynamicObjectRefGenerator]
110
+
111
+ VALID_BATCH_FORMATS = ["pandas", "pyarrow", "numpy", None]
112
+ DEFAULT_BATCH_FORMAT = "numpy"
113
+
114
+
115
+ def _apply_batch_format(given_batch_format: Optional[str]) -> str:
116
+ if given_batch_format == "default":
117
+ given_batch_format = DEFAULT_BATCH_FORMAT
118
+ if given_batch_format not in VALID_BATCH_FORMATS:
119
+ raise ValueError(
120
+ f"The given batch format {given_batch_format} isn't allowed (must be one of"
121
+ f" {VALID_BATCH_FORMATS})."
122
+ )
123
+ return given_batch_format
124
+
125
+
126
+ def _apply_batch_size(
127
+ given_batch_size: Optional[Union[int, Literal["default"]]]
128
+ ) -> Optional[int]:
129
+ if given_batch_size == "default":
130
+ return ray.data.context.DEFAULT_BATCH_SIZE
131
+ else:
132
+ return given_batch_size
133
+
134
+
135
+ @DeveloperAPI
136
+ class BlockExecStats:
137
+ """Execution stats for this block.
138
+
139
+ Attributes:
140
+ wall_time_s: The wall-clock time it took to compute this block.
141
+ cpu_time_s: The CPU time it took to compute this block.
142
+ node_id: A unique id for the node that computed this block.
143
+ """
144
+
145
+ def __init__(self):
146
+ self.start_time_s: Optional[float] = None
147
+ self.end_time_s: Optional[float] = None
148
+ self.wall_time_s: Optional[float] = None
149
+ self.udf_time_s: Optional[float] = 0
150
+ self.cpu_time_s: Optional[float] = None
151
+ self.node_id = ray.runtime_context.get_runtime_context().get_node_id()
152
+ # Max memory usage. May be an overestimate since we do not
153
+ # differentiate from previous tasks on the same worker.
154
+ self.max_rss_bytes: int = 0
155
+ self.task_idx: Optional[int] = None
156
+
157
+ @staticmethod
158
+ def builder() -> "_BlockExecStatsBuilder":
159
+ return _BlockExecStatsBuilder()
160
+
161
+ def __repr__(self):
162
+ return repr(
163
+ {
164
+ "wall_time_s": self.wall_time_s,
165
+ "cpu_time_s": self.cpu_time_s,
166
+ "udf_time_s": self.udf_time_s,
167
+ "node_id": self.node_id,
168
+ }
169
+ )
170
+
171
+
172
+ class _BlockExecStatsBuilder:
173
+ """Helper class for building block stats.
174
+
175
+ When this class is created, we record the start time. When build() is
176
+ called, the time delta is saved as part of the stats.
177
+ """
178
+
179
+ def __init__(self):
180
+ self.start_time = time.perf_counter()
181
+ self.start_cpu = time.process_time()
182
+
183
+ def build(self) -> "BlockExecStats":
184
+ self.end_time = time.perf_counter()
185
+ self.end_cpu = time.process_time()
186
+
187
+ stats = BlockExecStats()
188
+ stats.start_time_s = self.start_time
189
+ stats.end_time_s = self.end_time
190
+ stats.wall_time_s = self.end_time - self.start_time
191
+ stats.cpu_time_s = self.end_cpu - self.start_cpu
192
+ if resource is None:
193
+ # NOTE(swang): resource package is not supported on Windows. This
194
+ # is only the memory usage at the end of the task, not the peak
195
+ # memory.
196
+ process = psutil.Process(os.getpid())
197
+ stats.max_rss_bytes = int(process.memory_info().rss)
198
+ else:
199
+ stats.max_rss_bytes = int(
200
+ resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * 1e3
201
+ )
202
+ return stats
203
+
204
+
205
+ @DeveloperAPI
206
+ @dataclass
207
+ class BlockMetadata:
208
+ """Metadata about the block."""
209
+
210
+ #: The number of rows contained in this block, or None.
211
+ num_rows: Optional[int]
212
+ #: The approximate size in bytes of this block, or None.
213
+ size_bytes: Optional[int]
214
+ #: The pyarrow schema or types of the block elements, or None.
215
+ schema: Optional[Union[type, "pyarrow.lib.Schema"]]
216
+ #: The list of file paths used to generate this block, or
217
+ #: the empty list if indeterminate.
218
+ input_files: Optional[List[str]]
219
+ #: Execution stats for this block.
220
+ exec_stats: Optional[BlockExecStats]
221
+
222
+ def __post_init__(self):
223
+ if self.input_files is None:
224
+ self.input_files = []
225
+ if self.size_bytes is not None:
226
+ # Require size_bytes to be int, ray.util.metrics objects
227
+ # will not take other types like numpy.int64
228
+ assert isinstance(self.size_bytes, int)
229
+
230
+
231
+ @DeveloperAPI
232
+ class BlockAccessor:
233
+ """Provides accessor methods for a specific block.
234
+
235
+ Ideally, we wouldn't need a separate accessor classes for blocks. However,
236
+ this is needed if we want to support storing ``pyarrow.Table`` directly
237
+ as a top-level Ray object, without a wrapping class (issue #17186).
238
+ """
239
+
240
+ def num_rows(self) -> int:
241
+ """Return the number of rows contained in this block."""
242
+ raise NotImplementedError
243
+
244
+ def iter_rows(self, public_row_format: bool) -> Iterator[T]:
245
+ """Iterate over the rows of this block.
246
+
247
+ Args:
248
+ public_row_format: Whether to cast rows into the public Dict row
249
+ format (this incurs extra copy conversions).
250
+ """
251
+ raise NotImplementedError
252
+
253
+ def slice(self, start: int, end: int, copy: bool) -> Block:
254
+ """Return a slice of this block.
255
+
256
+ Args:
257
+ start: The starting index of the slice.
258
+ end: The ending index of the slice.
259
+ copy: Whether to perform a data copy for the slice.
260
+
261
+ Returns:
262
+ The sliced block result.
263
+ """
264
+ raise NotImplementedError
265
+
266
+ def take(self, indices: List[int]) -> Block:
267
+ """Return a new block containing the provided row indices.
268
+
269
+ Args:
270
+ indices: The row indices to return.
271
+
272
+ Returns:
273
+ A new block containing the provided row indices.
274
+ """
275
+ raise NotImplementedError
276
+
277
+ def select(self, columns: List[Optional[str]]) -> Block:
278
+ """Return a new block containing the provided columns."""
279
+ raise NotImplementedError
280
+
281
+ def random_shuffle(self, random_seed: Optional[int]) -> Block:
282
+ """Randomly shuffle this block."""
283
+ raise NotImplementedError
284
+
285
+ def to_pandas(self) -> "pandas.DataFrame":
286
+ """Convert this block into a Pandas dataframe."""
287
+ raise NotImplementedError
288
+
289
+ def to_numpy(
290
+ self, columns: Optional[Union[str, List[str]]] = None
291
+ ) -> Union[np.ndarray, Dict[str, np.ndarray]]:
292
+ """Convert this block (or columns of block) into a NumPy ndarray.
293
+
294
+ Args:
295
+ columns: Name of columns to convert, or None if converting all columns.
296
+ """
297
+ raise NotImplementedError
298
+
299
+ def to_arrow(self) -> "pyarrow.Table":
300
+ """Convert this block into an Arrow table."""
301
+ raise NotImplementedError
302
+
303
+ def to_block(self) -> Block:
304
+ """Return the base block that this accessor wraps."""
305
+ raise NotImplementedError
306
+
307
+ def to_default(self) -> Block:
308
+ """Return the default data format for this accessor."""
309
+ return self.to_block()
310
+
311
+ def to_batch_format(self, batch_format: Optional[str]) -> DataBatch:
312
+ """Convert this block into the provided batch format.
313
+
314
+ Args:
315
+ batch_format: The batch format to convert this block to.
316
+
317
+ Returns:
318
+ This block formatted as the provided batch format.
319
+ """
320
+ if batch_format is None:
321
+ return self.to_block()
322
+ elif batch_format == "default" or batch_format == "native":
323
+ return self.to_default()
324
+ elif batch_format == "pandas":
325
+ return self.to_pandas()
326
+ elif batch_format == "pyarrow":
327
+ return self.to_arrow()
328
+ elif batch_format == "numpy":
329
+ return self.to_numpy()
330
+ else:
331
+ raise ValueError(
332
+ f"The batch format must be one of {VALID_BATCH_FORMATS}, got: "
333
+ f"{batch_format}"
334
+ )
335
+
336
+ def size_bytes(self) -> int:
337
+ """Return the approximate size in bytes of this block."""
338
+ raise NotImplementedError
339
+
340
+ def schema(self) -> Union[type, "pyarrow.lib.Schema"]:
341
+ """Return the Python type or pyarrow schema of this block."""
342
+ raise NotImplementedError
343
+
344
+ def get_metadata(
345
+ self,
346
+ input_files: Optional[List[str]] = None,
347
+ exec_stats: Optional[BlockExecStats] = None,
348
+ ) -> BlockMetadata:
349
+ """Create a metadata object from this block."""
350
+ return BlockMetadata(
351
+ num_rows=self.num_rows(),
352
+ size_bytes=self.size_bytes(),
353
+ schema=self.schema(),
354
+ input_files=input_files,
355
+ exec_stats=exec_stats,
356
+ )
357
+
358
+ def zip(self, other: "Block") -> "Block":
359
+ """Zip this block with another block of the same type and size."""
360
+ raise NotImplementedError
361
+
362
+ @staticmethod
363
+ def builder() -> "BlockBuilder":
364
+ """Create a builder for this block type."""
365
+ raise NotImplementedError
366
+
367
+ @classmethod
368
+ def batch_to_block(
369
+ cls,
370
+ batch: DataBatch,
371
+ block_type: Optional[BlockType] = None,
372
+ ) -> Block:
373
+ """Create a block from user-facing data formats."""
374
+
375
+ if isinstance(batch, np.ndarray):
376
+ raise ValueError(
377
+ f"Error validating {_truncated_repr(batch)}: "
378
+ "Standalone numpy arrays are not "
379
+ "allowed in Ray 2.5. Return a dict of field -> array, "
380
+ "e.g., `{'data': array}` instead of `array`."
381
+ )
382
+
383
+ elif isinstance(batch, collections.abc.Mapping):
384
+ if block_type is None or block_type == BlockType.ARROW:
385
+ try:
386
+ return cls.batch_to_arrow_block(batch)
387
+ except ArrowConversionError as e:
388
+ if log_once("_fallback_to_pandas_block_warning"):
389
+ logger.warning(
390
+ f"Failed to convert batch to Arrow due to: {e}; "
391
+ f"falling back to Pandas block"
392
+ )
393
+
394
+ if block_type is None:
395
+ return cls.batch_to_pandas_block(batch)
396
+ else:
397
+ raise e
398
+ else:
399
+ assert block_type == BlockType.PANDAS
400
+ return cls.batch_to_pandas_block(batch)
401
+ return batch
402
+
403
+ @classmethod
404
+ def batch_to_arrow_block(cls, batch: Dict[str, Any]) -> Block:
405
+ """Create an Arrow block from user-facing data formats."""
406
+ from ray.data._internal.arrow_block import ArrowBlockBuilder
407
+
408
+ return ArrowBlockBuilder._table_from_pydict(batch)
409
+
410
+ @classmethod
411
+ def batch_to_pandas_block(cls, batch: Dict[str, Any]) -> Block:
412
+ """Create a Pandas block from user-facing data formats."""
413
+ from ray.data._internal.pandas_block import PandasBlockAccessor
414
+
415
+ return PandasBlockAccessor.numpy_to_block(batch)
416
+
417
+ @staticmethod
418
+ def for_block(block: Block) -> "BlockAccessor[T]":
419
+ """Create a block accessor for the given block."""
420
+ _check_pyarrow_version()
421
+ import pandas
422
+ import pyarrow
423
+
424
+ if isinstance(block, pyarrow.Table):
425
+ from ray.data._internal.arrow_block import ArrowBlockAccessor
426
+
427
+ return ArrowBlockAccessor(block)
428
+ elif isinstance(block, pandas.DataFrame):
429
+ from ray.data._internal.pandas_block import PandasBlockAccessor
430
+
431
+ return PandasBlockAccessor(block)
432
+ elif isinstance(block, bytes):
433
+ from ray.data._internal.arrow_block import ArrowBlockAccessor
434
+
435
+ return ArrowBlockAccessor.from_bytes(block)
436
+ elif isinstance(block, list):
437
+ raise ValueError(
438
+ f"Error validating {_truncated_repr(block)}: "
439
+ "Standalone Python objects are not "
440
+ "allowed in Ray 2.5. To use Python objects in a dataset, "
441
+ "wrap them in a dict of numpy arrays, e.g., "
442
+ "return `{'item': batch}` instead of just `batch`."
443
+ )
444
+ else:
445
+ raise TypeError("Not a block type: {} ({})".format(block, type(block)))
446
+
447
+ def sample(self, n_samples: int, sort_key: "SortKey") -> "Block":
448
+ """Return a random sample of items from this block."""
449
+ raise NotImplementedError
450
+
451
+ def sort_and_partition(
452
+ self, boundaries: List[T], sort_key: "SortKey"
453
+ ) -> List["Block"]:
454
+ """Return a list of sorted partitions of this block."""
455
+ raise NotImplementedError
456
+
457
+ def combine(self, key: "SortKey", aggs: Tuple["AggregateFn"]) -> Block:
458
+ """Combine rows with the same key into an accumulator."""
459
+ raise NotImplementedError
460
+
461
+ @staticmethod
462
+ def merge_sorted_blocks(
463
+ blocks: List["Block"], sort_key: "SortKey"
464
+ ) -> Tuple[Block, BlockMetadata]:
465
+ """Return a sorted block by merging a list of sorted blocks."""
466
+ raise NotImplementedError
467
+
468
+ @staticmethod
469
+ def aggregate_combined_blocks(
470
+ blocks: List[Block], sort_key: "SortKey", aggs: Tuple["AggregateFn"]
471
+ ) -> Tuple[Block, BlockMetadata]:
472
+ """Aggregate partially combined and sorted blocks."""
473
+ raise NotImplementedError
474
+
475
+ def block_type(self) -> BlockType:
476
+ """Return the block type of this block."""
477
+ raise NotImplementedError