ZTWHHH commited on
Commit
484de52
·
verified ·
1 Parent(s): d8c130b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/__init__.cpython-310.pyc +0 -0
  2. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/config.cpython-310.pyc +0 -0
  3. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/constants.cpython-310.pyc +0 -0
  4. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/data_batch_type.cpython-310.pyc +0 -0
  5. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/result.cpython-310.pyc +0 -0
  6. infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/session.cpython-310.pyc +0 -0
  7. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
  8. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/config.cpython-310.pyc +0 -0
  9. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/filelock.cpython-310.pyc +0 -0
  10. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/json.cpython-310.pyc +0 -0
  11. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/mlflow.cpython-310.pyc +0 -0
  12. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/session.cpython-310.pyc +0 -0
  13. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/tensorflow_utils.cpython-310.pyc +0 -0
  14. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/torch_utils.cpython-310.pyc +0 -0
  15. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/uri_utils.cpython-310.pyc +0 -0
  16. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/usage.cpython-310.pyc +0 -0
  17. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/util.cpython-310.pyc +0 -0
  18. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/config.py +47 -0
  19. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__init__.py +92 -0
  20. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/__init__.cpython-310.pyc +0 -0
  21. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/cpu.cpython-310.pyc +0 -0
  22. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/hpu.cpython-310.pyc +0 -0
  23. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/npu.cpython-310.pyc +0 -0
  24. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/nvidia_gpu.cpython-310.pyc +0 -0
  25. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/torch_device_manager.cpython-310.pyc +0 -0
  26. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/cpu.py +30 -0
  27. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/hpu.py +50 -0
  28. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/npu.py +105 -0
  29. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/nvidia_gpu.py +79 -0
  30. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/torch_device_manager.py +40 -0
  31. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/json.py +31 -0
  32. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/session.py +10 -0
  33. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/tensorflow_utils.py +131 -0
  34. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/torch_utils.py +294 -0
  35. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/uri_utils.py +101 -0
  36. infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/util.py +134 -0
  37. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/__init__.cpython-310.pyc +0 -0
  38. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/check_ingest.cpython-310.pyc +0 -0
  39. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__init__.py +0 -0
  40. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__init__.py +0 -0
  41. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/__init__.cpython-310.pyc +0 -0
  42. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/arrow.cpython-310.pyc +0 -0
  43. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/pandas.cpython-310.pyc +0 -0
  44. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/arrow.py +1216 -0
  45. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/pandas.py +1451 -0
  46. infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/utils.py +137 -0
  47. infer_4_37_2/lib/python3.10/site-packages/ray/core/__init__.py +0 -0
  48. infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/agent_manager_pb2.py +32 -0
  49. infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/autoscaler_pb2_grpc.py +198 -0
  50. infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/common_pb2_grpc.py +4 -0
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (587 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/config.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/constants.cpython-310.pyc ADDED
Binary file (1.24 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/data_batch_type.cpython-310.pyc ADDED
Binary file (401 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/result.cpython-310.pyc ADDED
Binary file (10 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/__pycache__/session.cpython-310.pyc ADDED
Binary file (270 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (175 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.62 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/filelock.cpython-310.pyc ADDED
Binary file (2.03 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/json.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/mlflow.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/session.cpython-310.pyc ADDED
Binary file (444 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/tensorflow_utils.cpython-310.pyc ADDED
Binary file (4.64 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/torch_utils.cpython-310.pyc ADDED
Binary file (9.07 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/uri_utils.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/usage.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__pycache__/util.cpython-310.pyc ADDED
Binary file (4.42 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/config.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dataclasses
2
+ from typing import Iterable
3
+
4
+
5
+ def ensure_only_allowed_dataclass_keys_updated(
6
+ dataclass: dataclasses.dataclass,
7
+ allowed_keys: Iterable[str],
8
+ ):
9
+ """
10
+ Validate dataclass by raising an exception if any key not included in
11
+ ``allowed_keys`` differs from the default value.
12
+
13
+ A ``ValueError`` will also be raised if any of the ``allowed_keys``
14
+ is not present in ``dataclass.__dict__``.
15
+
16
+ Args:
17
+ dataclass: Dict or dataclass to check.
18
+ allowed_keys: dataclass attribute keys that can have a value different than
19
+ the default one.
20
+ """
21
+ default_data = dataclass.__class__()
22
+
23
+ allowed_keys = set(allowed_keys)
24
+
25
+ # TODO: split keys_not_in_dict validation to a separate function.
26
+ keys_not_in_dict = [key for key in allowed_keys if key not in default_data.__dict__]
27
+ if keys_not_in_dict:
28
+ raise ValueError(
29
+ f"Key(s) {keys_not_in_dict} are not present in "
30
+ f"{dataclass.__class__.__name__}. "
31
+ "Remove them from `allowed_keys`. "
32
+ f"Valid keys: {list(default_data.__dict__.keys())}"
33
+ )
34
+
35
+ # These keys should not have been updated in the `dataclass` object
36
+ prohibited_keys = set(default_data.__dict__) - allowed_keys
37
+
38
+ bad_keys = [
39
+ key
40
+ for key in prohibited_keys
41
+ if dataclass.__dict__[key] != default_data.__dict__[key]
42
+ ]
43
+ if bad_keys:
44
+ raise ValueError(
45
+ f"Key(s) {bad_keys} are not allowed to be updated in the current context. "
46
+ "Remove them from the dataclass."
47
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import threading
3
+ from typing import Optional
4
+
5
+ import ray
6
+ import ray._private.ray_constants as ray_constants
7
+ from ray.air._internal.device_manager.cpu import CPUTorchDeviceManager
8
+ from ray.air._internal.device_manager.hpu import HPUTorchDeviceManager
9
+ from ray.air._internal.device_manager.npu import NPUTorchDeviceManager
10
+ from ray.air._internal.device_manager.nvidia_gpu import CUDATorchDeviceManager
11
+ from ray.air._internal.device_manager.torch_device_manager import TorchDeviceManager
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ DEFAULT_TORCH_DEVICE_MANAGER_CLS = CPUTorchDeviceManager
17
+
18
+
19
+ SUPPORTED_ACCELERATOR_TORCH_DEVICE_MANAGER = {
20
+ ray_constants.GPU: CUDATorchDeviceManager,
21
+ ray_constants.HPU: HPUTorchDeviceManager,
22
+ ray_constants.NPU: NPUTorchDeviceManager,
23
+ }
24
+
25
+
26
+ def register_custom_torch_dist_backend(backend: Optional[str] = None) -> None:
27
+ if backend == "hccl":
28
+ # The name for the communication backend of Habana and torch-npu is the same.
29
+ HPUTorchDeviceManager.register_custom_torch_dist_backend()
30
+
31
+ NPUTorchDeviceManager.register_custom_torch_dist_backend()
32
+
33
+
34
+ _torch_device_manager = None
35
+ _torch_device_manager_lock = threading.Lock()
36
+
37
+
38
+ def get_torch_device_manager_by_context() -> TorchDeviceManager:
39
+ global _torch_device_manager
40
+
41
+ with _torch_device_manager_lock:
42
+ if not _torch_device_manager:
43
+ existing_device_manager_cls = None
44
+ resources = ray.get_runtime_context().get_accelerator_ids()
45
+
46
+ # select correct accelerator type from resources
47
+ for resource_type, resource_value in resources.items():
48
+ device_manager_cls = SUPPORTED_ACCELERATOR_TORCH_DEVICE_MANAGER.get(
49
+ resource_type, None
50
+ )
51
+ if resource_value and device_manager_cls:
52
+ # An error will raise when multiple accelerators are specified.
53
+ if existing_device_manager_cls:
54
+ raise RuntimeError(
55
+ "Unable to determine the appropriate DeviceManager "
56
+ f"for the specified resources {resources}."
57
+ )
58
+ else:
59
+ existing_device_manager_cls = device_manager_cls
60
+
61
+ device_manager_cls = (
62
+ existing_device_manager_cls or DEFAULT_TORCH_DEVICE_MANAGER_CLS
63
+ )
64
+
65
+ _torch_device_manager = device_manager_cls()
66
+
67
+ return _torch_device_manager
68
+
69
+
70
+ def get_torch_device_manager_by_device_type(device_type: str):
71
+ if device_type.lower() == ray_constants.GPU.lower() or device_type == "cuda":
72
+ return CUDATorchDeviceManager()
73
+ elif device_type.lower() == ray_constants.NPU.lower():
74
+ return NPUTorchDeviceManager()
75
+ elif device_type.lower() == ray_constants.HPU.lower():
76
+ return HPUTorchDeviceManager()
77
+ elif device_type.lower() == "cpu":
78
+ return CPUTorchDeviceManager()
79
+
80
+ raise RuntimeError(f"Device type {device_type} cannot be recognized.")
81
+
82
+
83
+ __all__ = [
84
+ TorchDeviceManager,
85
+ CPUTorchDeviceManager,
86
+ CUDATorchDeviceManager,
87
+ HPUTorchDeviceManager,
88
+ NPUTorchDeviceManager,
89
+ register_custom_torch_dist_backend,
90
+ get_torch_device_manager_by_context,
91
+ get_torch_device_manager_by_device_type,
92
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/cpu.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/hpu.cpython-310.pyc ADDED
Binary file (2.37 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/npu.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/nvidia_gpu.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/__pycache__/torch_device_manager.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/cpu.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import List
3
+
4
+ import torch
5
+
6
+ from ray.air._internal.device_manager.torch_device_manager import TorchDeviceManager
7
+
8
+
9
+ class CPUTorchDeviceManager(TorchDeviceManager):
10
+ """CPU device manager"""
11
+
12
+ def is_available(self) -> bool():
13
+ return True
14
+
15
+ def get_devices(self) -> List[torch.device]:
16
+ """Gets the correct torch device list configured for this process."""
17
+ return [torch.device("cpu")]
18
+
19
+ def supports_stream(self) -> bool:
20
+ """Validate if the device type support create a stream"""
21
+ return False
22
+
23
+ def get_stream_context(self, stream):
24
+ """Return empty context mananger for CPU."""
25
+
26
+ @contextmanager
27
+ def default_context_manager():
28
+ yield
29
+
30
+ return default_context_manager()
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/hpu.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from typing import List, Union
3
+
4
+ import torch
5
+
6
+ from ray._private.accelerators.hpu import HPU_PACKAGE_AVAILABLE
7
+ from ray.air._internal.device_manager.torch_device_manager import TorchDeviceManager
8
+
9
+ if HPU_PACKAGE_AVAILABLE:
10
+ import habana_frameworks.torch.hpu as torch_hpu
11
+
12
+
13
+ class HPUTorchDeviceManager(TorchDeviceManager):
14
+ """HPU device manager"""
15
+
16
+ @staticmethod
17
+ def register_custom_torch_dist_backend():
18
+ if HPU_PACKAGE_AVAILABLE:
19
+ import habana_frameworks.torch.core # noqa: F401
20
+ import habana_frameworks.torch.distributed.hccl # noqa: F401
21
+
22
+ def is_available(self) -> bool():
23
+ if not HPU_PACKAGE_AVAILABLE:
24
+ return False
25
+
26
+ return torch_hpu.is_available()
27
+
28
+ def get_devices(self) -> List[torch.device]:
29
+ if not self.is_available():
30
+ raise RuntimeError(
31
+ "Using HPUTorchDeviceManager but torch hpu is not available."
32
+ )
33
+
34
+ return [torch.device("hpu")]
35
+
36
+ def set_device(self, device: Union[torch.device, int, str, None]):
37
+ torch_hpu.set_device(device)
38
+
39
+ def supports_stream(self) -> bool:
40
+ """Validate if the device type support create a stream"""
41
+ return False
42
+
43
+ def get_stream_context(self, stream):
44
+ """Get HPU stream context manager, empty so far."""
45
+
46
+ @contextmanager
47
+ def default_context_manager():
48
+ yield
49
+
50
+ return default_context_manager()
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/npu.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from importlib.util import find_spec
3
+ from typing import List, Union
4
+
5
+ import torch
6
+
7
+ import ray
8
+ import ray._private.ray_constants as ray_constants
9
+ from ray.air._internal.device_manager.torch_device_manager import TorchDeviceManager
10
+
11
+
12
+ def is_package_present(package_name: str) -> bool:
13
+ try:
14
+ return find_spec(package_name) is not None
15
+ except ModuleNotFoundError:
16
+ return False
17
+
18
+
19
+ NPU_TORCH_PACKAGE_AVAILABLE = is_package_present("torch_npu")
20
+
21
+
22
+ if NPU_TORCH_PACKAGE_AVAILABLE:
23
+ import torch_npu # noqa: F401
24
+
25
+
26
+ class NPUTorchDeviceManager(TorchDeviceManager):
27
+ """Ascend NPU device manager"""
28
+
29
+ @staticmethod
30
+ def register_custom_torch_dist_backend():
31
+ if NPU_TORCH_PACKAGE_AVAILABLE:
32
+ import torch_npu # noqa: F401, F811
33
+
34
+ def is_available(self) -> bool:
35
+ if not NPU_TORCH_PACKAGE_AVAILABLE:
36
+ return False
37
+
38
+ return torch.npu.is_available()
39
+
40
+ def get_devices(self) -> List[torch.device]:
41
+ """Gets the correct torch device list configured for this process.
42
+
43
+ Returns a list of torch NPU devices allocated for the current worker.
44
+ If no NPUs are assigned, then it returns a list with a single CPU device.
45
+ """
46
+ if NPU_TORCH_PACKAGE_AVAILABLE and torch.npu.is_available():
47
+ npu_ids = [
48
+ str(id)
49
+ for id in ray.get_runtime_context().get_accelerator_ids()[
50
+ ray_constants.NPU
51
+ ]
52
+ ]
53
+
54
+ device_ids = []
55
+
56
+ if len(npu_ids) > 0:
57
+ npu_visible_str = os.environ.get(
58
+ ray_constants.NPU_RT_VISIBLE_DEVICES_ENV_VAR, ""
59
+ )
60
+ if npu_visible_str and npu_visible_str != "NoDevFiles":
61
+ npu_visible_list = npu_visible_str.split(",")
62
+ else:
63
+ npu_visible_list = []
64
+
65
+ for npu_id in npu_ids:
66
+ try:
67
+ device_ids.append(npu_visible_list.index(npu_id))
68
+ except IndexError:
69
+ raise RuntimeError(
70
+ "ASCEND_RT_VISIBLE_DEVICES set incorrectly. "
71
+ f"Got {npu_visible_str}, expected to include {npu_id}. "
72
+ "Did you override the `ASCEND_RT_VISIBLE_DEVICES` "
73
+ "environment variable?"
74
+ )
75
+ else:
76
+ # If called on the driver or outside of Ray Train, return the
77
+ # 0th device.
78
+ device_ids.append(0)
79
+
80
+ devices = [torch.device(f"npu:{device_id}") for device_id in device_ids]
81
+ else:
82
+ raise RuntimeError(
83
+ "Using NPUTorchDeviceManager but torch npu is not available."
84
+ )
85
+
86
+ return devices
87
+
88
+ def set_device(self, device: Union[torch.device, int]):
89
+ torch.npu.set_device(device)
90
+
91
+ def supports_stream(self) -> bool:
92
+ """Validate if the device type support to create a stream"""
93
+ return True
94
+
95
+ def create_stream(self, device):
96
+ """Create a stream on NPU device"""
97
+ return torch.npu.Stream(device)
98
+
99
+ def get_stream_context(self, stream):
100
+ """Get a torch.stream context on NPU device"""
101
+ return torch.npu.stream(stream)
102
+
103
+ def get_current_stream(self):
104
+ """Get current stream for NPU device"""
105
+ return torch.npu.current_stream()
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/nvidia_gpu.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import List, Union
3
+
4
+ import torch
5
+
6
+ import ray
7
+ from ray.air._internal.device_manager.torch_device_manager import TorchDeviceManager
8
+
9
+
10
+ class CUDATorchDeviceManager(TorchDeviceManager):
11
+ """CUDA device manager"""
12
+
13
+ def is_available(self) -> bool():
14
+ return torch.cuda.is_available()
15
+
16
+ def get_devices(self) -> List[torch.device]:
17
+ """Gets the correct torch device list configured for this process.
18
+
19
+ Returns a list of torch CUDA devices allocated for the current worker.
20
+ If no GPUs are assigned, then it returns a list with a single CPU device.
21
+
22
+ Assumes that `CUDA_VISIBLE_DEVICES` is set and is a
23
+ superset of the `ray.get_gpu_ids()`.
24
+ """
25
+
26
+ # GPU IDs are assigned by Ray after you specify "use_gpu"
27
+ # GPU `ray.get_gpu_ids()` may return ints or may return strings.
28
+ # We should always convert to strings.
29
+ gpu_ids = [str(id) for id in ray.get_gpu_ids()]
30
+
31
+ device_ids = []
32
+
33
+ if len(gpu_ids) > 0:
34
+ cuda_visible_str = os.environ.get("CUDA_VISIBLE_DEVICES", "")
35
+ if cuda_visible_str and cuda_visible_str != "NoDevFiles":
36
+ cuda_visible_list = cuda_visible_str.split(",")
37
+ else:
38
+ cuda_visible_list = []
39
+
40
+ # By default, there should only be one GPU ID if `use_gpu=True`.
41
+ # If there are multiple GPUs, return a list of devices.
42
+ # If using fractional GPUs, these IDs are not guaranteed
43
+ # to be unique across different processes.
44
+ for gpu_id in gpu_ids:
45
+ try:
46
+ device_ids.append(cuda_visible_list.index(gpu_id))
47
+ except IndexError:
48
+ raise RuntimeError(
49
+ "CUDA_VISIBLE_DEVICES set incorrectly. "
50
+ f"Got {cuda_visible_str}, expected to include {gpu_id}. "
51
+ "Did you override the `CUDA_VISIBLE_DEVICES` environment"
52
+ " variable? If not, please help file an issue on Github."
53
+ )
54
+
55
+ else:
56
+ # If called on the driver or outside of Ray Train, return the
57
+ # 0th device.
58
+ device_ids.append(0)
59
+
60
+ return [torch.device(f"cuda:{device_id}") for device_id in device_ids]
61
+
62
+ def set_device(self, device: Union[torch.device, int, str, None]):
63
+ torch.cuda.set_device(device)
64
+
65
+ def supports_stream(self) -> bool:
66
+ """Validate if the device type support create a stream"""
67
+ return True
68
+
69
+ def create_stream(self, device: torch.device) -> torch.cuda.Stream:
70
+ """Create a stream on cuda device"""
71
+ return torch.cuda.Stream(device)
72
+
73
+ def get_stream_context(self, stream):
74
+ """Get a stream context for cuda device"""
75
+ return torch.cuda.stream(stream)
76
+
77
+ def get_current_stream(self) -> torch.cuda.Stream:
78
+ """Get current stream for cuda device"""
79
+ return torch.cuda.current_stream()
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/device_manager/torch_device_manager.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC
2
+ from typing import List, Union
3
+
4
+ import torch
5
+
6
+
7
+ class TorchDeviceManager(ABC):
8
+ """This class contains the function needed for supporting
9
+ an acclerator family in Ray AI Library.
10
+ """
11
+
12
+ def is_available(self) -> bool:
13
+ """Validate if device is available."""
14
+ ...
15
+
16
+ def get_devices(self) -> List[torch.device]:
17
+ """Gets the correct torch device configured for this process"""
18
+ ...
19
+
20
+ def set_device(self, device: Union[torch.device, int, str, None]):
21
+ """Set the correct device for this process"""
22
+ ...
23
+
24
+ def supports_stream(self) -> bool:
25
+ """Validate if the device type support create a stream"""
26
+ ...
27
+
28
+ def create_stream(self, device: torch.device):
29
+ """Create a device stream"""
30
+ ...
31
+
32
+ def get_stream_context(self, stream):
33
+ """Get a stream context of device. If device didn't support stream,
34
+ this should return a empty context manager instead of None.
35
+ """
36
+ ...
37
+
38
+ def get_current_stream(self):
39
+ """Get current stream on accelerators like torch.cuda.current_stream"""
40
+ ...
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/json.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import numbers
3
+
4
+ import numpy as np
5
+
6
+
7
+ class SafeFallbackEncoder(json.JSONEncoder):
8
+ def __init__(self, nan_str="null", **kwargs):
9
+ super(SafeFallbackEncoder, self).__init__(**kwargs)
10
+ self.nan_str = nan_str
11
+
12
+ def default(self, value):
13
+ try:
14
+ if type(value).__module__ == np.__name__ and isinstance(value, np.ndarray):
15
+ return value.tolist()
16
+
17
+ if isinstance(value, np.bool_):
18
+ return bool(value)
19
+
20
+ if np.isnan(value):
21
+ return self.nan_str
22
+
23
+ if issubclass(type(value), numbers.Integral):
24
+ return int(value)
25
+ if issubclass(type(value), numbers.Number):
26
+ return float(value)
27
+
28
+ return super(SafeFallbackEncoder, self).default(value)
29
+
30
+ except Exception:
31
+ return str(value) # give up, just stringify it (ok for logs)
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/session.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ logger = logging.getLogger(__name__)
4
+
5
+
6
+ # TODO(justinvyu): Move this to train
7
+ def _get_session(warn: bool = False):
8
+ from ray.train._internal.session import _session as train_session
9
+
10
+ return train_session
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/tensorflow_utils.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
2
+
3
+ import numpy as np
4
+ import pyarrow
5
+ import tensorflow as tf
6
+
7
+ from ray.air.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
8
+ from ray.air.util.tensor_extensions.arrow import get_arrow_extension_tensor_types
9
+
10
+ if TYPE_CHECKING:
11
+ from ray.data._internal.pandas_block import PandasBlockSchema
12
+
13
+
14
+ def convert_ndarray_to_tf_tensor(
15
+ ndarray: np.ndarray,
16
+ dtype: Optional[tf.dtypes.DType] = None,
17
+ type_spec: Optional[tf.TypeSpec] = None,
18
+ ) -> tf.Tensor:
19
+ """Convert a NumPy ndarray to a TensorFlow Tensor.
20
+
21
+ Args:
22
+ ndarray: A NumPy ndarray that we wish to convert to a TensorFlow Tensor.
23
+ dtype: A TensorFlow dtype for the created tensor; if None, the dtype will be
24
+ inferred from the NumPy ndarray data.
25
+ type_spec: A type spec that specifies the shape and dtype of the returned
26
+ tensor. If you specify ``dtype``, the dtype stored in the type spec is
27
+ ignored.
28
+
29
+ Returns: A TensorFlow Tensor.
30
+ """
31
+ if dtype is None and type_spec is not None:
32
+ dtype = type_spec.dtype
33
+
34
+ is_ragged = isinstance(type_spec, tf.RaggedTensorSpec)
35
+ ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
36
+ if is_ragged:
37
+ return tf.ragged.constant(ndarray, dtype=dtype)
38
+ else:
39
+ return tf.convert_to_tensor(ndarray, dtype=dtype)
40
+
41
+
42
+ def convert_ndarray_batch_to_tf_tensor_batch(
43
+ ndarrays: Union[np.ndarray, Dict[str, np.ndarray]],
44
+ dtypes: Optional[Union[tf.dtypes.DType, Dict[str, tf.dtypes.DType]]] = None,
45
+ ) -> Union[tf.Tensor, Dict[str, tf.Tensor]]:
46
+ """Convert a NumPy ndarray batch to a TensorFlow Tensor batch.
47
+
48
+ Args:
49
+ ndarray: A (dict of) NumPy ndarray(s) that we wish to convert to a TensorFlow
50
+ Tensor.
51
+ dtype: A (dict of) TensorFlow dtype(s) for the created tensor; if None, the
52
+ dtype will be inferred from the NumPy ndarray data.
53
+
54
+ Returns: A (dict of) TensorFlow Tensor(s).
55
+ """
56
+ if isinstance(ndarrays, np.ndarray):
57
+ # Single-tensor case.
58
+ if isinstance(dtypes, dict):
59
+ if len(dtypes) != 1:
60
+ raise ValueError(
61
+ "When constructing a single-tensor batch, only a single dtype "
62
+ f"should be given, instead got: {dtypes}"
63
+ )
64
+ dtypes = next(iter(dtypes.values()))
65
+ batch = convert_ndarray_to_tf_tensor(ndarrays, dtypes)
66
+ else:
67
+ # Multi-tensor case.
68
+ batch = {
69
+ col_name: convert_ndarray_to_tf_tensor(
70
+ col_ndarray,
71
+ dtype=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
72
+ )
73
+ for col_name, col_ndarray in ndarrays.items()
74
+ }
75
+
76
+ return batch
77
+
78
+
79
+ def get_type_spec(
80
+ schema: Union["pyarrow.lib.Schema", "PandasBlockSchema"],
81
+ columns: Union[str, List[str]],
82
+ ) -> Union[tf.TypeSpec, Dict[str, tf.TypeSpec]]:
83
+ import pyarrow as pa
84
+
85
+ from ray.data.extensions import TensorDtype
86
+
87
+ tensor_extension_types = get_arrow_extension_tensor_types()
88
+
89
+ assert not isinstance(schema, type)
90
+
91
+ dtypes: Dict[str, Union[np.dtype, pa.DataType]] = dict(
92
+ zip(schema.names, schema.types)
93
+ )
94
+
95
+ def get_dtype(dtype: Union[np.dtype, pa.DataType]) -> tf.dtypes.DType:
96
+ if isinstance(dtype, pa.DataType):
97
+ dtype = dtype.to_pandas_dtype()
98
+ if isinstance(dtype, TensorDtype):
99
+ dtype = dtype.element_dtype
100
+ return tf.dtypes.as_dtype(dtype)
101
+
102
+ def get_shape(dtype: Union[np.dtype, pa.DataType]) -> Tuple[int, ...]:
103
+ shape = (None,)
104
+ if isinstance(dtype, tensor_extension_types):
105
+ dtype = dtype.to_pandas_dtype()
106
+ if isinstance(dtype, TensorDtype):
107
+ shape += dtype.element_shape
108
+ return shape
109
+
110
+ def get_tensor_spec(
111
+ dtype: Union[np.dtype, pa.DataType], *, name: str
112
+ ) -> tf.TypeSpec:
113
+ shape, dtype = get_shape(dtype), get_dtype(dtype)
114
+ # Batch dimension is always `None`. So, if there's more than one `None`-valued
115
+ # dimension, then the tensor is ragged.
116
+ is_ragged = sum(dim is None for dim in shape) > 1
117
+ if is_ragged:
118
+ type_spec = tf.RaggedTensorSpec(shape, dtype=dtype)
119
+ else:
120
+ type_spec = tf.TensorSpec(shape, dtype=dtype, name=name)
121
+ return type_spec
122
+
123
+ if isinstance(columns, str):
124
+ name, dtype = columns, dtypes[columns]
125
+ return get_tensor_spec(dtype, name=name)
126
+
127
+ return {
128
+ name: get_tensor_spec(dtype, name=name)
129
+ for name, dtype in dtypes.items()
130
+ if name in columns
131
+ }
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/torch_utils.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import Any, Dict, List, Optional, Union
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import torch
7
+
8
+ from ray.air._internal.device_manager import get_torch_device_manager_by_context
9
+ from ray.air.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
10
+
11
+
12
+ def get_devices() -> List[torch.device]:
13
+ """Gets the correct torch device list configured for this process.
14
+
15
+ Returns a list of torch accelerator (GPU, HPU, NPU...) devices allocated for
16
+ the current worker.
17
+ If no accelerators are assigned, then it returns a list with a single CPU device.
18
+ """
19
+ return get_torch_device_manager_by_context().get_devices()
20
+
21
+
22
+ def convert_pandas_to_torch_tensor(
23
+ data_batch: pd.DataFrame,
24
+ columns: Optional[Union[List[str], List[List[str]]]] = None,
25
+ column_dtypes: Optional[Union[torch.dtype, List[torch.dtype]]] = None,
26
+ unsqueeze: bool = True,
27
+ ) -> Union[torch.Tensor, List[torch.Tensor]]:
28
+ """Converts a Pandas dataframe to a torch Tensor or list of torch Tensors.
29
+
30
+ The format of the return type will match the format of ``columns``. If a
31
+ list of columns is provided, the return type will be a single tensor. If
32
+ ``columns`` is a list of lists, then the return type will be a list of
33
+ tensors.
34
+
35
+ Args:
36
+ data_batch: The pandas dataframe to convert to a
37
+ torch tensor.
38
+ columns:
39
+ The names of the columns in the dataframe to include in the
40
+ torch tensor. If this arg is a List[List[str]], then the return
41
+ type will be a List of tensors. This is useful for multi-input
42
+ models. If None, then use all columns in the ``data_batch``.
43
+ column_dtype: The
44
+ torch dtype to use for the tensor. If set to None,
45
+ then automatically infer the dtype.
46
+ unsqueeze: If set to True, the tensors
47
+ will be unsqueezed (reshaped to (N, 1)) before being concatenated into
48
+ the final tensor. Otherwise, they will be left as is, that is
49
+ (N, ). Defaults to True.
50
+
51
+ Returns:
52
+ Either a torch tensor of size (N, len(columns)) where N is the
53
+ number of rows in the ``data_batch`` Dataframe, or a list of
54
+ tensors, where the size of item i is (N, len(columns[i])).
55
+
56
+ """
57
+
58
+ multi_input = columns and (isinstance(columns[0], (list, tuple)))
59
+
60
+ if not multi_input and column_dtypes and not isinstance(column_dtypes, torch.dtype):
61
+ raise TypeError(
62
+ "If `columns` is a list of strings, "
63
+ "`column_dtypes` must be None or a single `torch.dtype`."
64
+ f"Got {type(column_dtypes)} instead."
65
+ )
66
+
67
+ columns = columns if columns else []
68
+
69
+ def tensorize(vals, dtype):
70
+ """This recursive function allows to convert pyarrow List dtypes
71
+ to multi-dimensional tensors."""
72
+ if isinstance(vals, pd.api.extensions.ExtensionArray):
73
+ # torch.as_tensor() does not yet support the __array__ protocol, so we need
74
+ # to convert extension arrays to ndarrays manually before converting to a
75
+ # Torch tensor.
76
+ # See https://github.com/pytorch/pytorch/issues/51156.
77
+ vals = vals.to_numpy()
78
+
79
+ if vals.dtype.type is np.object_:
80
+ # Column has an object dtype which Torch can't handle, so we try to
81
+ # tensorize each column element and then stack the resulting tensors.
82
+ tensors = [tensorize(x, dtype) for x in vals]
83
+ try:
84
+ return torch.stack(tensors)
85
+ except RuntimeError:
86
+ # NOTE: RuntimeError is raised when trying to stack ragged tensors.
87
+ # Try to coerce the tensor to a nested tensor, if possible.
88
+ # If this fails, the exception will be propagated up to the caller.
89
+ return torch.nested_tensor(tensors)
90
+ else:
91
+ return torch.as_tensor(vals, dtype=dtype)
92
+
93
+ def get_tensor_for_columns(columns, dtype):
94
+ feature_tensors = []
95
+
96
+ if columns:
97
+ batch = data_batch[columns]
98
+ else:
99
+ batch = data_batch
100
+
101
+ for col in batch.columns:
102
+ col_vals = batch[col].values
103
+ try:
104
+ t = tensorize(col_vals, dtype=dtype)
105
+ except Exception as e:
106
+ raise ValueError(
107
+ f"Failed to convert column {col} to a Torch Tensor of dtype "
108
+ f"{dtype}. See above exception chain for the exact failure."
109
+ ) from e
110
+ if unsqueeze:
111
+ t = t.unsqueeze(1)
112
+ feature_tensors.append(t)
113
+
114
+ if len(feature_tensors) > 1:
115
+ feature_tensor = torch.cat(feature_tensors, dim=1)
116
+ else:
117
+ feature_tensor = feature_tensors[0]
118
+ return feature_tensor
119
+
120
+ if multi_input:
121
+ if type(column_dtypes) not in [list, tuple]:
122
+ column_dtypes = [column_dtypes] * len(columns)
123
+ return [
124
+ get_tensor_for_columns(columns=subcolumns, dtype=dtype)
125
+ for subcolumns, dtype in zip(columns, column_dtypes)
126
+ ]
127
+ else:
128
+ return get_tensor_for_columns(columns=columns, dtype=column_dtypes)
129
+
130
+
131
+ def convert_ndarray_to_torch_tensor(
132
+ ndarray: np.ndarray,
133
+ dtype: Optional[torch.dtype] = None,
134
+ device: Optional[str] = None,
135
+ ) -> torch.Tensor:
136
+ """Convert a NumPy ndarray to a Torch Tensor.
137
+
138
+ Args:
139
+ ndarray: A NumPy ndarray that we wish to convert to a Torch Tensor.
140
+ dtype: A Torch dtype for the created tensor; if None, the dtype will be
141
+ inferred from the NumPy ndarray data.
142
+ device: The device on which the tensor(s) should be placed; if None, the Torch
143
+ tensor(s) will be constructed on the CPU.
144
+
145
+ Returns: A Torch Tensor.
146
+ """
147
+ ndarray = _unwrap_ndarray_object_type_if_needed(ndarray)
148
+
149
+ # Object dtype cannot be converted into PyTorch Tensor.
150
+ if ndarray.dtype.type is np.object_:
151
+ raise RuntimeError(
152
+ "Numpy array of object dtype cannot be converted to a Torch Tensor. This "
153
+ "may because the numpy array is a ragged tensor--it contains items of "
154
+ "different sizes. If using `iter_torch_batches()` API, you can pass in a "
155
+ "`collate_fn` argument to specify custom logic to convert the Numpy array "
156
+ "batch to a Torch tensor batch."
157
+ )
158
+
159
+ # The numpy array is not always writeable as it can come from the Ray object store.
160
+ # Numpy will throw a verbose warning here, which we suppress, as we don't write
161
+ # to the tensors. We also don't want to copy the array to avoid memory overhead.
162
+ # Original warning: https://github.com/pytorch/pytorch/blob/v1.13.0/
163
+ # torch/csrc/utils/tensor_numpy.cpp#L198-L206
164
+ with warnings.catch_warnings():
165
+ warnings.simplefilter("ignore")
166
+ return torch.as_tensor(ndarray, dtype=dtype, device=device)
167
+
168
+
169
+ def convert_ndarray_batch_to_torch_tensor_batch(
170
+ ndarrays: Union[np.ndarray, Dict[str, np.ndarray]],
171
+ dtypes: Optional[Union[torch.dtype, Dict[str, torch.dtype]]] = None,
172
+ device: Optional[str] = None,
173
+ ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
174
+ """Convert a NumPy ndarray batch to a Torch Tensor batch.
175
+
176
+ Args:
177
+ ndarray: A (dict of) NumPy ndarray(s) that we wish to convert to a Torch Tensor.
178
+ dtype: A (dict of) Torch dtype(s) for the created tensor; if None, the dtype
179
+ will be inferred from the NumPy ndarray data.
180
+ device: The device on which the tensor(s) should be placed; if None, the Torch
181
+ tensor(s) will be constructed on the CPU.
182
+
183
+ Returns: A (dict of) Torch Tensor(s).
184
+ """
185
+ if isinstance(ndarrays, np.ndarray):
186
+ # Single-tensor case.
187
+ if isinstance(dtypes, dict):
188
+ if len(dtypes) != 1:
189
+ raise ValueError(
190
+ "When constructing a single-tensor batch, only a single dtype "
191
+ f"should be given, instead got: {dtypes}"
192
+ )
193
+ dtypes = next(iter(dtypes.values()))
194
+ batch = convert_ndarray_to_torch_tensor(ndarrays, dtype=dtypes, device=device)
195
+ else:
196
+ # Multi-tensor case.
197
+ batch = {
198
+ col_name: convert_ndarray_to_torch_tensor(
199
+ col_ndarray,
200
+ dtype=dtypes[col_name] if isinstance(dtypes, dict) else dtypes,
201
+ device=device,
202
+ )
203
+ for col_name, col_ndarray in ndarrays.items()
204
+ }
205
+
206
+ return batch
207
+
208
+
209
+ def load_torch_model(
210
+ saved_model: Union[torch.nn.Module, Dict],
211
+ model_definition: Optional[torch.nn.Module] = None,
212
+ ) -> torch.nn.Module:
213
+ """Loads a PyTorch model from the provided ``saved_model``.
214
+
215
+ ``model_definition`` is only used when ``saved_model`` is
216
+ a torch state dict, which will be loaded into ``model_definition``.
217
+ Otherwise, ``model_definition`` is discarded.
218
+ """
219
+ if isinstance(saved_model, torch.nn.Module):
220
+ return saved_model
221
+ elif isinstance(saved_model, dict):
222
+ if not model_definition:
223
+ raise ValueError(
224
+ "Attempting to load torch model from a "
225
+ "state_dict, but no `model_definition` was "
226
+ "provided."
227
+ )
228
+ model_definition.load_state_dict(saved_model)
229
+ return model_definition
230
+ else:
231
+ raise ValueError(
232
+ f"Saved model is of type {type(saved_model)}. "
233
+ f"The model saved in the checkpoint is expected "
234
+ f"to be of type `torch.nn.Module`, or a model "
235
+ f"state dict of type dict."
236
+ )
237
+
238
+
239
+ def contains_tensor(obj):
240
+ if isinstance(obj, torch.Tensor):
241
+ return True
242
+ elif isinstance(obj, dict):
243
+ for k, v in obj.items():
244
+ if contains_tensor(k):
245
+ return True
246
+ if contains_tensor(v):
247
+ return True
248
+ elif isinstance(obj, (list, tuple)):
249
+ for v in obj:
250
+ if contains_tensor(v):
251
+ return True
252
+ return False
253
+
254
+
255
+ # Not present in torch<=1.7.0
256
+ # Adapted from https://github.com/pytorch/pytorch/blob/\
257
+ # c18da597e0bb1c1aecc97c77a73fed1849057fa4/torch/nn/modules/utils.py
258
+ def consume_prefix_in_state_dict_if_present_not_in_place(
259
+ state_dict: Dict[str, Any], prefix: str
260
+ ) -> Dict[str, Any]:
261
+ """Strip the prefix in state_dict, if any and return a new dict.
262
+
263
+ Adapted from https://github.com/pytorch/pytorch/blob/\
264
+ c18da597e0bb1c1aecc97c77a73fed1849057fa4/torch/nn/modules/utils.py
265
+ The original method modified the dict in-place.
266
+
267
+ Args:
268
+ state_dict: a state-dict to be loaded to the model.
269
+ prefix: prefix.
270
+
271
+ """
272
+ copied = False
273
+
274
+ for key in state_dict:
275
+ if key.startswith(prefix):
276
+ newkey = key[len(prefix) :]
277
+ if not copied:
278
+ # We are doing shallow copies here, so the performance
279
+ # impact should be negligible anyway, but this is
280
+ # a simple optimization.
281
+ state_dict = state_dict.copy()
282
+ copied = True
283
+ state_dict[newkey] = state_dict.pop(key)
284
+
285
+ if "_metadata" in state_dict:
286
+ state_dict["_metadata"] = state_dict["_metadata"].copy()
287
+ metadata = state_dict["_metadata"]
288
+ for key in metadata:
289
+ if len(key) == 0:
290
+ continue
291
+ newkey = key[len(prefix) :]
292
+ metadata[newkey] = metadata.pop(key)
293
+
294
+ return state_dict
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/uri_utils.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import urllib.parse
3
+ from pathlib import Path
4
+ from typing import Union
5
+
6
+
7
+ class URI:
8
+ """Represents a URI, supporting path appending and retrieving parent URIs.
9
+
10
+ Example Usage:
11
+
12
+ >>> s3_uri = URI("s3://bucket/a?scheme=http&param=1")
13
+ >>> s3_uri
14
+ URI<s3://bucket/a?scheme=http&param=1>
15
+ >>> str(s3_uri / "b" / "c")
16
+ 's3://bucket/a/b/c?scheme=http&param=1'
17
+ >>> str(s3_uri.parent)
18
+ 's3://bucket?scheme=http&param=1'
19
+ >>> str(s3_uri)
20
+ 's3://bucket/a?scheme=http&param=1'
21
+ >>> s3_uri.parent.name, s3_uri.name
22
+ ('bucket', 'a')
23
+ >>> local_path = URI("/tmp/local")
24
+ >>> str(local_path)
25
+ '/tmp/local'
26
+ >>> str(local_path.parent)
27
+ '/tmp'
28
+ >>> str(local_path / "b" / "c")
29
+ '/tmp/local/b/c'
30
+
31
+ Args:
32
+ uri: The URI to represent.
33
+ Ex: s3://bucket?scheme=http&endpoint_override=localhost%3A900
34
+ Ex: file:///a/b/c/d
35
+ """
36
+
37
+ def __init__(self, uri: str):
38
+ self._parsed = urllib.parse.urlparse(uri)
39
+ if not self._parsed.scheme:
40
+ # Just treat this as a regular path
41
+ self._path = Path(uri)
42
+ else:
43
+ self._path = Path(os.path.normpath(self._parsed.netloc + self._parsed.path))
44
+
45
+ def rstrip_subpath(self, subpath: Path) -> "URI":
46
+ """Returns a new URI that strips the given subpath from the end of this URI.
47
+
48
+ Example:
49
+ >>> uri = URI("s3://bucket/a/b/c/?param=1")
50
+ >>> str(uri.rstrip_subpath(Path("b/c")))
51
+ 's3://bucket/a?param=1'
52
+
53
+ >>> uri = URI("/tmp/a/b/c/")
54
+ >>> str(uri.rstrip_subpath(Path("/b/c/.//")))
55
+ '/tmp/a'
56
+
57
+ """
58
+ assert str(self._path).endswith(str(subpath)), (self._path, subpath)
59
+ stripped_path = str(self._path).replace(str(subpath), "")
60
+ return URI(self._get_str_representation(self._parsed, stripped_path))
61
+
62
+ @property
63
+ def name(self) -> str:
64
+ return self._path.name
65
+
66
+ @property
67
+ def parent(self) -> "URI":
68
+ assert self._path.parent != ".", f"{str(self)} has no valid parent URI"
69
+ return URI(self._get_str_representation(self._parsed, self._path.parent))
70
+
71
+ @property
72
+ def scheme(self) -> str:
73
+ return self._parsed.scheme
74
+
75
+ @property
76
+ def path(self) -> str:
77
+ return str(self._path)
78
+
79
+ def __truediv__(self, path_to_append):
80
+ assert isinstance(path_to_append, str)
81
+ return URI(
82
+ self._get_str_representation(self._parsed, self._path / path_to_append)
83
+ )
84
+
85
+ @classmethod
86
+ def _get_str_representation(
87
+ cls, parsed_uri: urllib.parse.ParseResult, path: Union[str, Path]
88
+ ) -> str:
89
+ if not parsed_uri.scheme:
90
+ return str(path)
91
+ return parsed_uri._replace(netloc=str(path), path="").geturl()
92
+
93
+ def __repr__(self):
94
+ return f"URI<{str(self)}>"
95
+
96
+ def __str__(self):
97
+ return self._get_str_representation(self._parsed, self._path)
98
+
99
+
100
+ def is_uri(path: str) -> bool:
101
+ return bool(urllib.parse.urlparse(path).scheme)
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/util.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import logging
3
+ import os
4
+ import queue
5
+ import socket
6
+ import threading
7
+ from contextlib import closing
8
+ from typing import Optional
9
+
10
+ import numpy as np
11
+
12
+ from ray.air.constants import _ERROR_REPORT_TIMEOUT
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def find_free_port():
18
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
19
+ s.bind(("", 0))
20
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
21
+ return s.getsockname()[1]
22
+
23
+
24
+ def is_nan(value):
25
+ return np.isnan(value)
26
+
27
+
28
+ def is_nan_or_inf(value):
29
+ return is_nan(value) or np.isinf(value)
30
+
31
+
32
+ class StartTraceback(Exception):
33
+ """These exceptions (and their tracebacks) can be skipped with `skip_exceptions`"""
34
+
35
+ pass
36
+
37
+
38
+ class StartTracebackWithWorkerRank(StartTraceback):
39
+ def __init__(self, worker_rank: int) -> None:
40
+ super().__init__()
41
+ self.worker_rank = worker_rank
42
+
43
+ def __reduce__(self):
44
+ return (self.__class__, (self.worker_rank,))
45
+
46
+
47
+ def skip_exceptions(exc: Optional[Exception]) -> Exception:
48
+ """Skip all contained `StartTracebacks` to reduce traceback output.
49
+
50
+ Returns a shallow copy of the exception with all `StartTracebacks` removed.
51
+
52
+ If the RAY_AIR_FULL_TRACEBACKS environment variable is set,
53
+ the original exception (not a copy) is returned.
54
+ """
55
+ should_not_shorten = bool(int(os.environ.get("RAY_AIR_FULL_TRACEBACKS", "0")))
56
+
57
+ if should_not_shorten:
58
+ return exc
59
+
60
+ if isinstance(exc, StartTraceback):
61
+ # If this is a StartTraceback, skip
62
+ return skip_exceptions(exc.__cause__)
63
+
64
+ # Perform a shallow copy to prevent recursive __cause__/__context__.
65
+ new_exc = copy.copy(exc).with_traceback(exc.__traceback__)
66
+
67
+ # Make sure nested exceptions are properly skipped.
68
+ cause = getattr(exc, "__cause__", None)
69
+ if cause:
70
+ new_exc.__cause__ = skip_exceptions(cause)
71
+
72
+ return new_exc
73
+
74
+
75
+ def exception_cause(exc: Optional[Exception]) -> Optional[Exception]:
76
+ if not exc:
77
+ return None
78
+
79
+ return getattr(exc, "__cause__", None)
80
+
81
+
82
+ class RunnerThread(threading.Thread):
83
+ """Supervisor thread that runs your script."""
84
+
85
+ def __init__(self, *args, error_queue, **kwargs):
86
+ threading.Thread.__init__(self, *args, **kwargs)
87
+ self._error_queue = error_queue
88
+ self._ret = None
89
+
90
+ def _propagate_exception(self, e: BaseException):
91
+ try:
92
+ # report the error but avoid indefinite blocking which would
93
+ # prevent the exception from being propagated in the unlikely
94
+ # case that something went terribly wrong
95
+ self._error_queue.put(e, block=True, timeout=_ERROR_REPORT_TIMEOUT)
96
+ except queue.Full:
97
+ logger.critical(
98
+ (
99
+ "Runner Thread was unable to report error to main "
100
+ "function runner thread. This means a previous error "
101
+ "was not processed. This should never happen."
102
+ )
103
+ )
104
+
105
+ def run(self):
106
+ try:
107
+ self._ret = self._target(*self._args, **self._kwargs)
108
+ except StopIteration:
109
+ logger.debug(
110
+ (
111
+ "Thread runner raised StopIteration. Interpreting it as a "
112
+ "signal to terminate the thread without error."
113
+ )
114
+ )
115
+ except SystemExit as e:
116
+ # Do not propagate up for graceful termination.
117
+ if e.code == 0:
118
+ logger.debug(
119
+ (
120
+ "Thread runner raised SystemExit with error code 0. "
121
+ "Interpreting it as a signal to terminate the thread "
122
+ "without error."
123
+ )
124
+ )
125
+ else:
126
+ # If non-zero exit code, then raise exception to main thread.
127
+ self._propagate_exception(e)
128
+ except BaseException as e:
129
+ # Propagate all other exceptions to the main thread.
130
+ self._propagate_exception(e)
131
+
132
+ def join(self, timeout=None):
133
+ super(RunnerThread, self).join(timeout)
134
+ return self._ret
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/check_ingest.cpython-310.pyc ADDED
Binary file (5.82 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (188 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/arrow.cpython-310.pyc ADDED
Binary file (34.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/pandas.cpython-310.pyc ADDED
Binary file (44.6 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/arrow.py ADDED
@@ -0,0 +1,1216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import itertools
3
+ import json
4
+ import logging
5
+ import sys
6
+ from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
7
+
8
+ import numpy as np
9
+ import pyarrow as pa
10
+ from packaging.version import parse as parse_version
11
+
12
+ from ray._private.utils import _get_pyarrow_version
13
+ from ray.air.constants import TENSOR_COLUMN_NAME
14
+ from ray.air.util.tensor_extensions.utils import (
15
+ _is_ndarray_tensor,
16
+ _is_ndarray_variable_shaped_tensor,
17
+ create_ragged_ndarray,
18
+ )
19
+ from ray.data._internal.util import GiB
20
+ from ray.util import log_once
21
+ from ray.util.annotations import DeveloperAPI, PublicAPI
22
+
23
+ PYARROW_VERSION = _get_pyarrow_version()
24
+ if PYARROW_VERSION is not None:
25
+ PYARROW_VERSION = parse_version(PYARROW_VERSION)
26
+ # Minimum version of Arrow that supports ExtensionScalars.
27
+ # TODO(Clark): Remove conditional definition once we only support Arrow 8.0.0+.
28
+ MIN_PYARROW_VERSION_SCALAR = parse_version("8.0.0")
29
+ # Minimum version of Arrow that supports subclassable ExtensionScalars.
30
+ # TODO(Clark): Remove conditional definition once we only support Arrow 9.0.0+.
31
+ MIN_PYARROW_VERSION_SCALAR_SUBCLASS = parse_version("9.0.0")
32
+ # Minimum version supporting `zero_copy_only` flag in `ChunkedArray.to_numpy`
33
+ MIN_PYARROW_VERSION_CHUNKED_ARRAY_TO_NUMPY_ZERO_COPY_ONLY = parse_version("13.0.0")
34
+
35
+ NUM_BYTES_PER_UNICODE_CHAR = 4
36
+
37
+ # NOTE: Overflow threshold in bytes for most Arrow types using int32 as
38
+ # its offsets
39
+ INT32_OVERFLOW_THRESHOLD = 2 * GiB
40
+
41
+ logger = logging.getLogger(__name__)
42
+
43
+
44
+ @DeveloperAPI
45
+ class ArrowConversionError(Exception):
46
+ """Error raised when there is an issue converting data to Arrow."""
47
+
48
+ MAX_DATA_STR_LEN = 200
49
+
50
+ def __init__(self, data_str: str):
51
+ if len(data_str) > self.MAX_DATA_STR_LEN:
52
+ data_str = data_str[: self.MAX_DATA_STR_LEN] + "..."
53
+ message = f"Error converting data to Arrow: {data_str}"
54
+ super().__init__(message)
55
+
56
+
57
+ def _arrow_supports_extension_scalars():
58
+ """
59
+ Whether Arrow ExtensionScalars are supported in the current pyarrow version.
60
+
61
+ This returns True if the pyarrow version is 8.0.0+, or if the pyarrow version is
62
+ unknown.
63
+ """
64
+ # TODO(Clark): Remove utility once we only support Arrow 8.0.0+.
65
+ return PYARROW_VERSION is None or PYARROW_VERSION >= MIN_PYARROW_VERSION_SCALAR
66
+
67
+
68
+ def _arrow_extension_scalars_are_subclassable():
69
+ """
70
+ Whether Arrow ExtensionScalars support subclassing in the current pyarrow version.
71
+
72
+ This returns True if the pyarrow version is 9.0.0+, or if the pyarrow version is
73
+ unknown.
74
+ """
75
+ # TODO(Clark): Remove utility once we only support Arrow 9.0.0+.
76
+ return (
77
+ PYARROW_VERSION is None
78
+ or PYARROW_VERSION >= MIN_PYARROW_VERSION_SCALAR_SUBCLASS
79
+ )
80
+
81
+
82
+ @DeveloperAPI
83
+ def pyarrow_table_from_pydict(
84
+ pydict: Dict[str, Union[List[Any], pa.Array]],
85
+ ) -> pa.Table:
86
+ """
87
+ Convert a Python dictionary to a pyarrow Table.
88
+
89
+ Raises:
90
+ ArrowConversionError: if the conversion fails.
91
+ """
92
+ try:
93
+ return pa.Table.from_pydict(pydict)
94
+ except Exception as e:
95
+ raise ArrowConversionError(str(pydict)) from e
96
+
97
+
98
+ @DeveloperAPI(stability="alpha")
99
+ def convert_to_pyarrow_array(column_values: np.ndarray, column_name: str) -> pa.Array:
100
+ """Converts provided NumPy `ndarray` into PyArrow's `array` while utilizing
101
+ both Arrow's natively supported types as well as custom extension types:
102
+
103
+ - ArrowTensorArray (for tensors)
104
+ - ArrowPythonObjectArray (for user-defined python class objects, as well as
105
+ any python object that aren't represented by a corresponding Arrow's native
106
+ scalar type)
107
+ """
108
+
109
+ try:
110
+ # Since Arrow does NOT support tensors (aka multidimensional arrays) natively,
111
+ # we have to make sure that we handle this case utilizing `ArrowTensorArray`
112
+ # extension type
113
+ if column_name == TENSOR_COLUMN_NAME or _is_ndarray_tensor(column_values):
114
+ from ray.data.extensions.tensor_extension import ArrowTensorArray
115
+
116
+ return ArrowTensorArray.from_numpy(column_values, column_name)
117
+ else:
118
+ return _convert_to_pyarrow_native_array(column_values, column_name)
119
+
120
+ except ArrowConversionError as ace:
121
+ from ray.data.extensions.object_extension import (
122
+ ArrowPythonObjectArray,
123
+ _object_extension_type_allowed,
124
+ )
125
+
126
+ if not _object_extension_type_allowed():
127
+ should_serialize_as_object_ext_type = False
128
+ object_ext_type_detail = (
129
+ "skipping fallback to serialize as pickled python"
130
+ f" objects (due to unsupported Arrow version {PYARROW_VERSION}, "
131
+ f"min required version is {MIN_PYARROW_VERSION_SCALAR_SUBCLASS})"
132
+ )
133
+ else:
134
+ from ray.data import DataContext
135
+
136
+ if not DataContext.get_current().enable_fallback_to_arrow_object_ext_type:
137
+ should_serialize_as_object_ext_type = False
138
+ object_ext_type_detail = (
139
+ "skipping fallback to serialize as pickled python objects "
140
+ "(due to DataContext.enable_fallback_to_arrow_object_ext_type "
141
+ "= False)"
142
+ )
143
+ else:
144
+ should_serialize_as_object_ext_type = True
145
+ object_ext_type_detail = (
146
+ "falling back to serialize as pickled python objects"
147
+ )
148
+
149
+ # NOTE: To avoid logging following warning for every block it's
150
+ # only going to be logged in following cases
151
+ # - When fallback is disabled, or
152
+ # - It's being logged for the first time
153
+ if not should_serialize_as_object_ext_type or log_once(
154
+ "_fallback_to_arrow_object_extension_type_warning"
155
+ ):
156
+ logger.warning(
157
+ f"Failed to convert column '{column_name}' into pyarrow "
158
+ f"array due to: {ace}; {object_ext_type_detail}",
159
+ exc_info=ace,
160
+ )
161
+
162
+ # If `ArrowPythonObjectType` is not supported raise original exception
163
+ if not should_serialize_as_object_ext_type:
164
+ raise
165
+
166
+ # Otherwise, attempt to fall back to serialize as python objects
167
+ return ArrowPythonObjectArray.from_objects(column_values)
168
+
169
+
170
+ def _convert_to_pyarrow_native_array(
171
+ column_values: np.ndarray, column_name: str
172
+ ) -> pa.Array:
173
+ """Converts provided NumPy `ndarray` into PyArrow's `array` while only utilizing
174
+ Arrow's natively supported types (ie no custom extension types)"""
175
+
176
+ try:
177
+ # NOTE: We explicitly infer PyArrow `DataType` so that
178
+ # we can perform upcasting to be able to accommodate
179
+ # blocks that are larger than 2Gb in size (limited
180
+ # by int32 offsets used by Arrow internally)
181
+ dtype = _infer_pyarrow_type(column_values)
182
+
183
+ logger.log(
184
+ logging.getLevelName("TRACE"),
185
+ f"Inferred dtype of '{dtype}' for column '{column_name}'",
186
+ )
187
+
188
+ return pa.array(column_values, type=dtype)
189
+ except Exception as e:
190
+ raise ArrowConversionError(str(column_values)) from e
191
+
192
+
193
+ def _infer_pyarrow_type(column_values: np.ndarray) -> Optional[pa.DataType]:
194
+ """Infers target Pyarrow `DataType` based on the provided
195
+ columnar values.
196
+
197
+ NOTE: This is a wrapper on top of `pa.infer_type(...)` utility
198
+ performing up-casting of `binary` and `string` types to
199
+ corresponding `large_binary` and `large_string` types in case
200
+ any of the array elements exceeds 2Gb in size therefore
201
+ making it impossible for original types to accommodate such
202
+ values.
203
+
204
+ Unfortunately, for unknown reasons PA doesn't perform
205
+ that upcasting itself henceforth we have to do perform
206
+ it manually
207
+
208
+ Args:
209
+ column_values: List of columnar values
210
+
211
+ Returns:
212
+ Instance of PyArrow's `DataType` based on the provided
213
+ column values
214
+ """
215
+
216
+ if len(column_values) == 0:
217
+ return None
218
+
219
+ inferred_pa_dtype = pa.infer_type(column_values)
220
+
221
+ def _len_gt_overflow_threshold(obj: Any) -> bool:
222
+ # NOTE: This utility could be seeing objects other than strings or bytes in
223
+ # cases when column contains non-scalar non-homogeneous object types as
224
+ # column values, therefore making Arrow unable to infer corresponding
225
+ # column type appropriately, therefore falling back to assume the type
226
+ # of the first element in the list.
227
+ #
228
+ # Check out test cases for this method for an additional context.
229
+ if isinstance(obj, (str, bytes)):
230
+ return len(obj) > INT32_OVERFLOW_THRESHOLD
231
+
232
+ return False
233
+
234
+ if pa.types.is_binary(inferred_pa_dtype) and any(
235
+ [_len_gt_overflow_threshold(v) for v in column_values]
236
+ ):
237
+ return pa.large_binary()
238
+ elif pa.types.is_string(inferred_pa_dtype) and any(
239
+ [_len_gt_overflow_threshold(v) for v in column_values]
240
+ ):
241
+ return pa.large_string()
242
+
243
+ return inferred_pa_dtype
244
+
245
+
246
+ @DeveloperAPI
247
+ def get_arrow_extension_tensor_types():
248
+ """Returns list of extension types of Arrow Array holding
249
+ multidimensional tensors
250
+ """
251
+ return (
252
+ *get_arrow_extension_fixed_shape_tensor_types(),
253
+ *get_arrow_extension_variable_shape_tensor_types(),
254
+ )
255
+
256
+
257
+ @DeveloperAPI
258
+ def get_arrow_extension_fixed_shape_tensor_types():
259
+ """Returns list of Arrow extension types holding multidimensional
260
+ tensors of *fixed* shape
261
+ """
262
+ return ArrowTensorType, ArrowTensorTypeV2
263
+
264
+
265
+ @DeveloperAPI
266
+ def get_arrow_extension_variable_shape_tensor_types():
267
+ """Returns list of Arrow extension types holding multidimensional
268
+ tensors of *fixed* shape
269
+ """
270
+ return (ArrowVariableShapedTensorType,)
271
+
272
+
273
+ class _BaseFixedShapeArrowTensorType(pa.ExtensionType, abc.ABC):
274
+ """
275
+ Arrow ExtensionType for an array of fixed-shaped, homogeneous-typed
276
+ tensors.
277
+
278
+ This is the Arrow side of TensorDtype.
279
+
280
+ See Arrow extension type docs:
281
+ https://arrow.apache.org/docs/python/extending_types.html#defining-extension-types-user-defined-types
282
+ """
283
+
284
+ def __init__(
285
+ self, shape: Tuple[int, ...], tensor_dtype: pa.DataType, ext_type_id: str
286
+ ):
287
+ self._shape = shape
288
+
289
+ super().__init__(tensor_dtype, ext_type_id)
290
+
291
+ @property
292
+ def shape(self):
293
+ """
294
+ Shape of contained tensors.
295
+ """
296
+ return self._shape
297
+
298
+ @property
299
+ def scalar_type(self):
300
+ """Returns the type of the underlying tensor elements."""
301
+ return self.storage_type.value_type
302
+
303
+ def to_pandas_dtype(self):
304
+ """
305
+ Convert Arrow extension type to corresponding Pandas dtype.
306
+
307
+ Returns:
308
+ An instance of pd.api.extensions.ExtensionDtype.
309
+ """
310
+ from ray.air.util.tensor_extensions.pandas import TensorDtype
311
+
312
+ return TensorDtype(self._shape, self.scalar_type.to_pandas_dtype())
313
+
314
+ def __reduce__(self):
315
+ return self.__arrow_ext_deserialize__, (
316
+ self.storage_type,
317
+ self.__arrow_ext_serialize__(),
318
+ )
319
+
320
+ def __arrow_ext_serialize__(self):
321
+ return json.dumps(self._shape).encode()
322
+
323
+ def __arrow_ext_class__(self):
324
+ """
325
+ ExtensionArray subclass with custom logic for this array of tensors
326
+ type.
327
+
328
+ Returns:
329
+ A subclass of pd.api.extensions.ExtensionArray.
330
+ """
331
+ return ArrowTensorArray
332
+
333
+ if _arrow_extension_scalars_are_subclassable():
334
+ # TODO(Clark): Remove this version guard once we only support Arrow 9.0.0+.
335
+ def __arrow_ext_scalar_class__(self):
336
+ """
337
+ ExtensionScalar subclass with custom logic for this array of tensors type.
338
+ """
339
+ return ArrowTensorScalar
340
+
341
+ if _arrow_supports_extension_scalars():
342
+ # TODO(Clark): Remove this version guard once we only support Arrow 8.0.0+.
343
+ def _extension_scalar_to_ndarray(
344
+ self, scalar: pa.ExtensionScalar
345
+ ) -> np.ndarray:
346
+ """
347
+ Convert an ExtensionScalar to a tensor element.
348
+ """
349
+ raw_values = scalar.value.values
350
+ shape = scalar.type.shape
351
+ value_type = raw_values.type
352
+ offset = raw_values.offset
353
+ data_buffer = raw_values.buffers()[1]
354
+ return _to_ndarray_helper(shape, value_type, offset, data_buffer)
355
+
356
+ def __str__(self) -> str:
357
+ return (
358
+ f"numpy.ndarray(shape={self.shape}, dtype={self.storage_type.value_type})"
359
+ )
360
+
361
+ def __repr__(self) -> str:
362
+ return str(self)
363
+
364
+ @classmethod
365
+ def _need_variable_shaped_tensor_array(
366
+ cls,
367
+ array_types: Sequence[
368
+ Union[
369
+ "ArrowTensorType", "ArrowTensorTypeV2", "ArrowVariableShapedTensorType"
370
+ ]
371
+ ],
372
+ ) -> bool:
373
+ """
374
+ Whether the provided list of tensor types needs a variable-shaped
375
+ representation (i.e. `ArrowVariableShapedTensorType`) when concatenating
376
+ or chunking. If one or more of the tensor types in `array_types` are
377
+ variable-shaped and/or any of the tensor arrays have a different shape
378
+ than the others, a variable-shaped tensor array representation will be
379
+ required and this method will return True.
380
+
381
+ Args:
382
+ array_types: List of tensor types to check if a variable-shaped
383
+ representation is required for concatenation
384
+
385
+ Returns:
386
+ True if concatenating arrays with types `array_types` requires
387
+ a variable-shaped representation
388
+ """
389
+ shape = None
390
+ for arr_type in array_types:
391
+ # If at least one of the arrays is variable-shaped, we can immediately
392
+ # short-circuit since we require a variable-shaped representation.
393
+ if isinstance(arr_type, ArrowVariableShapedTensorType):
394
+ return True
395
+ if not isinstance(arr_type, get_arrow_extension_fixed_shape_tensor_types()):
396
+ raise ValueError(
397
+ "All provided array types must be an instance of either "
398
+ "ArrowTensorType or ArrowVariableShapedTensorType, but "
399
+ f"got {arr_type}"
400
+ )
401
+ # We need variable-shaped representation if any of the tensor arrays have
402
+ # different shapes.
403
+ if shape is not None and arr_type.shape != shape:
404
+ return True
405
+ shape = arr_type.shape
406
+ return False
407
+
408
+
409
+ @PublicAPI(stability="beta")
410
+ class ArrowTensorType(_BaseFixedShapeArrowTensorType):
411
+ """Arrow ExtensionType (v1) for tensors.
412
+
413
+ NOTE: This type does *NOT* support tensors larger than 4Gb (due to
414
+ overflow of int32 offsets utilized inside Pyarrow `ListType`)
415
+ """
416
+
417
+ OFFSET_DTYPE = np.int32
418
+
419
+ def __init__(self, shape: Tuple[int, ...], dtype: pa.DataType):
420
+ """
421
+ Construct the Arrow extension type for array of fixed-shaped tensors.
422
+
423
+ Args:
424
+ shape: Shape of contained tensors.
425
+ dtype: pyarrow dtype of tensor elements.
426
+ """
427
+
428
+ super().__init__(shape, pa.list_(dtype), "ray.data.arrow_tensor")
429
+
430
+ @classmethod
431
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
432
+ shape = tuple(json.loads(serialized))
433
+ return cls(shape, storage_type.value_type)
434
+
435
+
436
+ @PublicAPI(stability="alpha")
437
+ class ArrowTensorTypeV2(_BaseFixedShapeArrowTensorType):
438
+ """Arrow ExtensionType (v2) for tensors (supporting tensors > 4Gb)."""
439
+
440
+ OFFSET_DTYPE = np.int64
441
+
442
+ def __init__(self, shape: Tuple[int, ...], dtype: pa.DataType):
443
+ """
444
+ Construct the Arrow extension type for array of fixed-shaped tensors.
445
+
446
+ Args:
447
+ shape: Shape of contained tensors.
448
+ dtype: pyarrow dtype of tensor elements.
449
+ """
450
+
451
+ super().__init__(shape, pa.large_list(dtype), "ray.data.arrow_tensor_v2")
452
+
453
+ @classmethod
454
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
455
+ shape = tuple(json.loads(serialized))
456
+ return cls(shape, storage_type.value_type)
457
+
458
+
459
+ if _arrow_extension_scalars_are_subclassable():
460
+ # TODO(Clark): Remove this version guard once we only support Arrow 9.0.0+.
461
+ @PublicAPI(stability="beta")
462
+ class ArrowTensorScalar(pa.ExtensionScalar):
463
+ def as_py(self) -> np.ndarray:
464
+ return self.type._extension_scalar_to_ndarray(self)
465
+
466
+ def __array__(self) -> np.ndarray:
467
+ return self.as_py()
468
+
469
+
470
+ # TODO(Clark): Remove this mixin once we only support Arrow 9.0.0+.
471
+ class _ArrowTensorScalarIndexingMixin:
472
+ """
473
+ A mixin providing support for scalar indexing in tensor extension arrays for
474
+ Arrow < 9.0.0, before full ExtensionScalar support was added. This mixin overrides
475
+ __getitem__, __iter__, and to_pylist.
476
+ """
477
+
478
+ # This mixin will be a no-op (no methods added) for Arrow 9.0.0+.
479
+ if not _arrow_extension_scalars_are_subclassable():
480
+ # NOTE: These __iter__ and to_pylist definitions are shared for both
481
+ # Arrow < 8.0.0 and Arrow 8.*.
482
+ def __iter__(self):
483
+ # Override pa.Array.__iter__() in order to return an iterator of
484
+ # properly shaped tensors instead of an iterator of flattened tensors.
485
+ # See comment in above __getitem__ method.
486
+ for i in range(len(self)):
487
+ # Use overridden __getitem__ method.
488
+ yield self.__getitem__(i)
489
+
490
+ def to_pylist(self):
491
+ # Override pa.Array.to_pylist() due to a lack of ExtensionScalar
492
+ # support (see comment in __getitem__).
493
+ return list(self)
494
+
495
+ if _arrow_supports_extension_scalars():
496
+ # NOTE(Clark): This __getitem__ override is only needed for Arrow 8.*,
497
+ # before ExtensionScalar subclassing support was added.
498
+ # TODO(Clark): Remove these methods once we only support Arrow 9.0.0+.
499
+ def __getitem__(self, key):
500
+ # This __getitem__ hook allows us to support proper indexing when
501
+ # accessing a single tensor (a "scalar" item of the array). Without this
502
+ # hook for integer keys, the indexing will fail on pyarrow < 9.0.0 due
503
+ # to a lack of ExtensionScalar subclassing support.
504
+
505
+ # NOTE(Clark): We'd like to override the pa.Array.getitem() helper
506
+ # instead, which would obviate the need for overriding __iter__(), but
507
+ # unfortunately overriding Cython cdef methods with normal Python
508
+ # methods isn't allowed.
509
+ item = super().__getitem__(key)
510
+ if not isinstance(key, slice):
511
+ item = item.type._extension_scalar_to_ndarray(item)
512
+ return item
513
+
514
+ else:
515
+ # NOTE(Clark): This __getitem__ override is only needed for Arrow < 8.0.0,
516
+ # before any ExtensionScalar support was added.
517
+ # TODO(Clark): Remove these methods once we only support Arrow 8.0.0+.
518
+ def __getitem__(self, key):
519
+ # This __getitem__ hook allows us to support proper indexing when
520
+ # accessing a single tensor (a "scalar" item of the array). Without this
521
+ # hook for integer keys, the indexing will fail on pyarrow < 8.0.0 due
522
+ # to a lack of ExtensionScalar support.
523
+
524
+ # NOTE(Clark): We'd like to override the pa.Array.getitem() helper
525
+ # instead, which would obviate the need for overriding __iter__(), but
526
+ # unfortunately overriding Cython cdef methods with normal Python
527
+ # methods isn't allowed.
528
+ if isinstance(key, slice):
529
+ return super().__getitem__(key)
530
+ return self._to_numpy(key)
531
+
532
+
533
+ # NOTE: We need to inherit from the mixin before pa.ExtensionArray to ensure that the
534
+ # mixin's overriding methods appear first in the MRO.
535
+ # TODO(Clark): Remove this mixin once we only support Arrow 9.0.0+.
536
+ @PublicAPI(stability="beta")
537
+ class ArrowTensorArray(_ArrowTensorScalarIndexingMixin, pa.ExtensionArray):
538
+ """
539
+ An array of fixed-shape, homogeneous-typed tensors.
540
+
541
+ This is the Arrow side of TensorArray.
542
+
543
+ See Arrow docs for customizing extension arrays:
544
+ https://arrow.apache.org/docs/python/extending_types.html#custom-extension-array-class
545
+ """
546
+
547
+ @classmethod
548
+ def from_numpy(
549
+ cls,
550
+ arr: Union[np.ndarray, Iterable[np.ndarray]],
551
+ column_name: Optional[str] = None,
552
+ ) -> Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]:
553
+ """
554
+ Convert an ndarray or an iterable of ndarrays to an array of homogeneous-typed
555
+ tensors. If given fixed-shape tensor elements, this will return an
556
+ ``ArrowTensorArray``; if given variable-shape tensor elements, this will return
557
+ an ``ArrowVariableShapedTensorArray``.
558
+
559
+ Args:
560
+ arr: An ndarray or an iterable of ndarrays.
561
+ column_name: Optional. Used only in logging outputs to provide
562
+ additional details.
563
+
564
+ Returns:
565
+ - If fixed-shape tensor elements, an ``ArrowTensorArray`` containing
566
+ ``len(arr)`` tensors of fixed shape.
567
+ - If variable-shaped tensor elements, an ``ArrowVariableShapedTensorArray``
568
+ containing ``len(arr)`` tensors of variable shape.
569
+ - If scalar elements, a ``pyarrow.Array``.
570
+ """
571
+ if not isinstance(arr, np.ndarray) and isinstance(arr, Iterable):
572
+ arr = list(arr)
573
+
574
+ if isinstance(arr, (list, tuple)) and arr and isinstance(arr[0], np.ndarray):
575
+ # Stack ndarrays and pass through to ndarray handling logic below.
576
+ try:
577
+ arr = np.stack(arr, axis=0)
578
+ except ValueError as ve:
579
+ logger.warning(
580
+ f"Failed to stack lists due to: {ve}; "
581
+ f"falling back to using np.array(..., dtype=object)",
582
+ exc_info=ve,
583
+ )
584
+
585
+ # ndarray stacking may fail if the arrays are heterogeneously-shaped.
586
+ arr = np.array(arr, dtype=object)
587
+ if not isinstance(arr, np.ndarray):
588
+ raise ValueError(
589
+ f"Must give ndarray or iterable of ndarrays, got {type(arr)} {arr}"
590
+ )
591
+
592
+ try:
593
+ return cls._from_numpy(arr)
594
+ except Exception as e:
595
+ data_str = ""
596
+ if column_name:
597
+ data_str += f"column: '{column_name}', "
598
+ data_str += f"shape: {arr.shape}, dtype: {arr.dtype}, data: {arr}"
599
+ raise ArrowConversionError(data_str) from e
600
+
601
+ @classmethod
602
+ def _from_numpy(
603
+ cls,
604
+ arr: np.ndarray,
605
+ ) -> Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]:
606
+ if len(arr) > 0 and np.isscalar(arr[0]):
607
+ # Elements are scalar so a plain Arrow Array will suffice.
608
+ return pa.array(arr)
609
+ if _is_ndarray_variable_shaped_tensor(arr):
610
+ # Tensor elements have variable shape, so we delegate to
611
+ # ArrowVariableShapedTensorArray.
612
+ return ArrowVariableShapedTensorArray.from_numpy(arr)
613
+ if not arr.flags.c_contiguous:
614
+ # We only natively support C-contiguous ndarrays.
615
+ arr = np.ascontiguousarray(arr)
616
+ scalar_dtype = pa.from_numpy_dtype(arr.dtype)
617
+ if pa.types.is_string(scalar_dtype):
618
+ if arr.dtype.byteorder == ">" or (
619
+ arr.dtype.byteorder == "=" and sys.byteorder == "big"
620
+ ):
621
+ raise ValueError(
622
+ "Only little-endian string tensors are supported, "
623
+ f"but got: {arr.dtype}",
624
+ )
625
+ scalar_dtype = pa.binary(arr.dtype.itemsize)
626
+ outer_len = arr.shape[0]
627
+ element_shape = arr.shape[1:]
628
+ total_num_items = arr.size
629
+ num_items_per_element = np.prod(element_shape) if element_shape else 1
630
+
631
+ # Data buffer.
632
+ if pa.types.is_boolean(scalar_dtype):
633
+ # NumPy doesn't represent boolean arrays as bit-packed, so we manually
634
+ # bit-pack the booleans before handing the buffer off to Arrow.
635
+ # NOTE: Arrow expects LSB bit-packed ordering.
636
+ # NOTE: This creates a copy.
637
+ arr = np.packbits(arr, bitorder="little")
638
+ data_buffer = pa.py_buffer(arr)
639
+ data_array = pa.Array.from_buffers(
640
+ scalar_dtype, total_num_items, [None, data_buffer]
641
+ )
642
+
643
+ from ray.data import DataContext
644
+
645
+ if DataContext.get_current().use_arrow_tensor_v2:
646
+ pa_type_ = ArrowTensorTypeV2(element_shape, scalar_dtype)
647
+ else:
648
+ pa_type_ = ArrowTensorType(element_shape, scalar_dtype)
649
+
650
+ # Create Offset buffer
651
+ offset_buffer = pa.py_buffer(
652
+ pa_type_.OFFSET_DTYPE(
653
+ [i * num_items_per_element for i in range(outer_len + 1)]
654
+ )
655
+ )
656
+
657
+ storage = pa.Array.from_buffers(
658
+ pa_type_.storage_type,
659
+ outer_len,
660
+ [None, offset_buffer],
661
+ children=[data_array],
662
+ )
663
+
664
+ return pa.ExtensionArray.from_storage(pa_type_, storage)
665
+
666
+ def _to_numpy(self, index: Optional[int] = None, zero_copy_only: bool = False):
667
+ """
668
+ Helper for getting either an element of the array of tensors as an
669
+ ndarray, or the entire array of tensors as a single ndarray.
670
+
671
+ Args:
672
+ index: The index of the tensor element that we wish to return as
673
+ an ndarray. If not given, the entire array of tensors is
674
+ returned as an ndarray.
675
+ zero_copy_only: If True, an exception will be raised if the
676
+ conversion to a NumPy array would require copying the
677
+ underlying data (e.g. in presence of nulls, or for
678
+ non-primitive types). This argument is currently ignored, so
679
+ zero-copy isn't enforced even if this argument is true.
680
+
681
+ Returns:
682
+ The corresponding tensor element as an ndarray if an index was
683
+ given, or the entire array of tensors as an ndarray otherwise.
684
+ """
685
+ # TODO(Clark): Enforce zero_copy_only.
686
+ # TODO(Clark): Support strides?
687
+ # Buffers schema:
688
+ # [None, offset_buffer, None, data_buffer]
689
+ buffers = self.buffers()
690
+ data_buffer = buffers[3]
691
+ storage_list_type = self.storage.type
692
+ value_type = storage_list_type.value_type
693
+ ext_dtype = value_type.to_pandas_dtype()
694
+ shape = self.type.shape
695
+ if pa.types.is_boolean(value_type):
696
+ # Arrow boolean array buffers are bit-packed, with 8 entries per byte,
697
+ # and are accessed via bit offsets.
698
+ buffer_item_width = value_type.bit_width
699
+ else:
700
+ # We assume all other array types are accessed via byte array
701
+ # offsets.
702
+ buffer_item_width = value_type.bit_width // 8
703
+ # Number of items per inner ndarray.
704
+ num_items_per_element = np.prod(shape) if shape else 1
705
+ # Base offset into data buffer, e.g. due to zero-copy slice.
706
+ buffer_offset = self.offset * num_items_per_element
707
+ # Offset of array data in buffer.
708
+ offset = buffer_item_width * buffer_offset
709
+ if index is not None:
710
+ # Getting a single tensor element of the array.
711
+ offset_buffer = buffers[1]
712
+ offset_array = np.ndarray(
713
+ (len(self),), buffer=offset_buffer, dtype=self.type.OFFSET_DTYPE
714
+ )
715
+ # Offset into array to reach logical index.
716
+ index_offset = offset_array[index]
717
+ # Add the index offset to the base offset.
718
+ offset += buffer_item_width * index_offset
719
+ else:
720
+ # Getting the entire array of tensors.
721
+ shape = (len(self),) + shape
722
+ if pa.types.is_boolean(value_type):
723
+ # Special handling for boolean arrays, since Arrow bit-packs boolean arrays
724
+ # while NumPy does not.
725
+ # Cast as uint8 array and let NumPy unpack into a boolean view.
726
+ # Offset into uint8 array, where each element is a bucket for 8 booleans.
727
+ byte_bucket_offset = offset // 8
728
+ # Offset for a specific boolean, within a uint8 array element.
729
+ bool_offset = offset % 8
730
+ # The number of uint8 array elements (buckets) that our slice spans.
731
+ # Note that, due to the offset for a specific boolean, the slice can span
732
+ # byte boundaries even if it contains less than 8 booleans.
733
+ num_boolean_byte_buckets = 1 + ((bool_offset + np.prod(shape) - 1) // 8)
734
+ # Construct the uint8 array view on the buffer.
735
+ arr = np.ndarray(
736
+ (num_boolean_byte_buckets,),
737
+ dtype=np.uint8,
738
+ buffer=data_buffer,
739
+ offset=byte_bucket_offset,
740
+ )
741
+ # Unpack into a byte per boolean, using LSB bit-packed ordering.
742
+ arr = np.unpackbits(arr, bitorder="little")
743
+ # Interpret buffer as boolean array.
744
+ return np.ndarray(shape, dtype=np.bool_, buffer=arr, offset=bool_offset)
745
+ # Special handling of binary/string types. Assumes unicode string tensor columns
746
+ if pa.types.is_fixed_size_binary(value_type):
747
+ ext_dtype = np.dtype(
748
+ f"<U{value_type.byte_width // NUM_BYTES_PER_UNICODE_CHAR}"
749
+ )
750
+ return np.ndarray(shape, dtype=ext_dtype, buffer=data_buffer, offset=offset)
751
+
752
+ def to_numpy(self, zero_copy_only: bool = True):
753
+ """
754
+ Convert the entire array of tensors into a single ndarray.
755
+
756
+ Args:
757
+ zero_copy_only: If True, an exception will be raised if the
758
+ conversion to a NumPy array would require copying the
759
+ underlying data (e.g. in presence of nulls, or for
760
+ non-primitive types). This argument is currently ignored, so
761
+ zero-copy isn't enforced even if this argument is true.
762
+
763
+ Returns:
764
+ A single ndarray representing the entire array of tensors.
765
+ """
766
+ return self._to_numpy(zero_copy_only=zero_copy_only)
767
+
768
+ @classmethod
769
+ def _concat_same_type(
770
+ cls,
771
+ to_concat: Sequence[
772
+ Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]
773
+ ],
774
+ ) -> Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]:
775
+ """
776
+ Concatenate multiple tensor arrays.
777
+
778
+ If one or more of the tensor arrays in to_concat are variable-shaped and/or any
779
+ of the tensor arrays have a different shape than the others, a variable-shaped
780
+ tensor array will be returned.
781
+ """
782
+ to_concat_types = [arr.type for arr in to_concat]
783
+ if ArrowTensorType._need_variable_shaped_tensor_array(to_concat_types):
784
+ # Need variable-shaped tensor array.
785
+ # TODO(Clark): Eliminate this NumPy roundtrip by directly constructing the
786
+ # underlying storage array buffers (NumPy roundtrip will not be zero-copy
787
+ # for e.g. boolean arrays).
788
+ # NOTE(Clark): Iterating over a tensor extension array converts each element
789
+ # to an ndarray view.
790
+ return ArrowVariableShapedTensorArray.from_numpy(
791
+ [e for a in to_concat for e in a]
792
+ )
793
+ else:
794
+ storage = pa.concat_arrays([c.storage for c in to_concat])
795
+
796
+ return ArrowTensorArray.from_storage(to_concat[0].type, storage)
797
+
798
+ @classmethod
799
+ def _chunk_tensor_arrays(
800
+ cls, arrs: Sequence[Union["ArrowTensorArray", "ArrowVariableShapedTensorArray"]]
801
+ ) -> pa.ChunkedArray:
802
+ """
803
+ Create a ChunkedArray from multiple tensor arrays.
804
+ """
805
+ arrs_types = [arr.type for arr in arrs]
806
+ if ArrowTensorType._need_variable_shaped_tensor_array(arrs_types):
807
+ new_arrs = []
808
+ for a in arrs:
809
+ if isinstance(a.type, get_arrow_extension_fixed_shape_tensor_types()):
810
+ a = a.to_variable_shaped_tensor_array()
811
+ assert isinstance(a.type, ArrowVariableShapedTensorType)
812
+ new_arrs.append(a)
813
+ arrs = new_arrs
814
+ return pa.chunked_array(arrs)
815
+
816
+ def to_variable_shaped_tensor_array(self) -> "ArrowVariableShapedTensorArray":
817
+ """
818
+ Convert this tensor array to a variable-shaped tensor array.
819
+
820
+ This is primarily used when concatenating multiple chunked tensor arrays where
821
+ at least one chunked array is already variable-shaped and/or the shapes of the
822
+ chunked arrays differ, in which case the resulting concatenated tensor array
823
+ will need to be in the variable-shaped representation.
824
+ """
825
+ # TODO(Clark): Eliminate this NumPy roundtrip by directly constructing the
826
+ # underlying storage array buffers (NumPy roundtrip will not be zero-copy for
827
+ # e.g. boolean arrays).
828
+ return ArrowVariableShapedTensorArray.from_numpy(self.to_numpy())
829
+
830
+
831
+ @PublicAPI(stability="alpha")
832
+ class ArrowVariableShapedTensorType(pa.ExtensionType):
833
+ """
834
+ Arrow ExtensionType for an array of heterogeneous-shaped, homogeneous-typed
835
+ tensors.
836
+
837
+ This is the Arrow side of TensorDtype for tensor elements with different shapes.
838
+ Note that this extension only supports non-ragged tensor elements; i.e., when
839
+ considering each tensor element in isolation, they must have a well-defined,
840
+ non-ragged shape.
841
+
842
+ See Arrow extension type docs:
843
+ https://arrow.apache.org/docs/python/extending_types.html#defining-extension-types-user-defined-types
844
+ """
845
+
846
+ def __init__(self, dtype: pa.DataType, ndim: int):
847
+ """
848
+ Construct the Arrow extension type for array of heterogeneous-shaped tensors.
849
+
850
+ Args:
851
+ dtype: pyarrow dtype of tensor elements.
852
+ ndim: The number of dimensions in the tensor elements.
853
+ """
854
+ self._ndim = ndim
855
+ super().__init__(
856
+ pa.struct(
857
+ [("data", pa.large_list(dtype)), ("shape", pa.list_(pa.int64()))]
858
+ ),
859
+ "ray.data.arrow_variable_shaped_tensor",
860
+ )
861
+
862
+ def to_pandas_dtype(self):
863
+ """
864
+ Convert Arrow extension type to corresponding Pandas dtype.
865
+
866
+ Returns:
867
+ An instance of pd.api.extensions.ExtensionDtype.
868
+ """
869
+ from ray.air.util.tensor_extensions.pandas import TensorDtype
870
+
871
+ return TensorDtype(
872
+ (None,) * self.ndim,
873
+ self.storage_type["data"].type.value_type.to_pandas_dtype(),
874
+ )
875
+
876
+ @property
877
+ def ndim(self) -> int:
878
+ """Return the number of dimensions in the tensor elements."""
879
+ return self._ndim
880
+
881
+ @property
882
+ def scalar_type(self):
883
+ """Returns the type of the underlying tensor elements."""
884
+ data_field_index = self.storage_type.get_field_index("data")
885
+ return self.storage_type[data_field_index].type.value_type
886
+
887
+ def __reduce__(self):
888
+ return self.__arrow_ext_deserialize__, (
889
+ self.storage_type,
890
+ self.__arrow_ext_serialize__(),
891
+ )
892
+
893
+ def __arrow_ext_serialize__(self):
894
+ return json.dumps(self._ndim).encode()
895
+
896
+ @classmethod
897
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
898
+ ndim = json.loads(serialized)
899
+ dtype = storage_type["data"].type.value_type
900
+ return cls(dtype, ndim)
901
+
902
+ def __arrow_ext_class__(self):
903
+ """
904
+ ExtensionArray subclass with custom logic for this array of tensors
905
+ type.
906
+
907
+ Returns:
908
+ A subclass of pd.api.extensions.ExtensionArray.
909
+ """
910
+ return ArrowVariableShapedTensorArray
911
+
912
+ if _arrow_extension_scalars_are_subclassable():
913
+ # TODO(Clark): Remove this version guard once we only support Arrow 9.0.0+.
914
+ def __arrow_ext_scalar_class__(self):
915
+ """
916
+ ExtensionScalar subclass with custom logic for this array of tensors type.
917
+ """
918
+ return ArrowTensorScalar
919
+
920
+ def __str__(self) -> str:
921
+ dtype = self.storage_type["data"].type.value_type
922
+ return f"numpy.ndarray(ndim={self.ndim}, dtype={dtype})"
923
+
924
+ def __repr__(self) -> str:
925
+ return str(self)
926
+
927
+ if _arrow_supports_extension_scalars():
928
+ # TODO(Clark): Remove this version guard once we only support Arrow 8.0.0+.
929
+ def _extension_scalar_to_ndarray(
930
+ self, scalar: pa.ExtensionScalar
931
+ ) -> np.ndarray:
932
+ """
933
+ Convert an ExtensionScalar to a tensor element.
934
+ """
935
+ data = scalar.value.get("data")
936
+ raw_values = data.values
937
+
938
+ shape = tuple(scalar.value.get("shape").as_py())
939
+ value_type = raw_values.type
940
+ offset = raw_values.offset
941
+ data_buffer = raw_values.buffers()[1]
942
+ return _to_ndarray_helper(shape, value_type, offset, data_buffer)
943
+
944
+
945
+ # NOTE: We need to inherit from the mixin before pa.ExtensionArray to ensure that the
946
+ # mixin's overriding methods appear first in the MRO.
947
+ # TODO(Clark): Remove this mixin once we only support Arrow 9.0.0+.
948
+ @PublicAPI(stability="alpha")
949
+ class ArrowVariableShapedTensorArray(
950
+ _ArrowTensorScalarIndexingMixin, pa.ExtensionArray
951
+ ):
952
+ """
953
+ An array of heterogeneous-shaped, homogeneous-typed tensors.
954
+
955
+ This is the Arrow side of TensorArray for tensor elements that have differing
956
+ shapes. Note that this extension only supports non-ragged tensor elements; i.e.,
957
+ when considering each tensor element in isolation, they must have a well-defined
958
+ shape. This extension also only supports tensor elements that all have the same
959
+ number of dimensions.
960
+
961
+ See Arrow docs for customizing extension arrays:
962
+ https://arrow.apache.org/docs/python/extending_types.html#custom-extension-array-class
963
+ """
964
+
965
+ @classmethod
966
+ def from_numpy(
967
+ cls, arr: Union[np.ndarray, List[np.ndarray], Tuple[np.ndarray]]
968
+ ) -> "ArrowVariableShapedTensorArray":
969
+ """
970
+ Convert an ndarray or an iterable of heterogeneous-shaped ndarrays to an array
971
+ of heterogeneous-shaped, homogeneous-typed tensors.
972
+
973
+ Args:
974
+ arr: An ndarray or an iterable of heterogeneous-shaped ndarrays.
975
+
976
+ Returns:
977
+ An ArrowVariableShapedTensorArray containing len(arr) tensors of
978
+ heterogeneous shape.
979
+ """
980
+ # Implementation note - Arrow representation of ragged tensors:
981
+ #
982
+ # We represent an array of ragged tensors using a struct array containing two
983
+ # fields:
984
+ # - data: a variable-sized list array, where each element in the array is a
985
+ # tensor element stored in a 1D (raveled) variable-sized list of the
986
+ # underlying scalar data type.
987
+ # - shape: a variable-sized list array containing the shapes of each tensor
988
+ # element.
989
+ if not isinstance(arr, (list, tuple, np.ndarray)):
990
+ raise ValueError(
991
+ "ArrowVariableShapedTensorArray can only be constructed from an "
992
+ f"ndarray or a list/tuple of ndarrays, but got: {type(arr)}"
993
+ )
994
+ if len(arr) == 0:
995
+ # Empty ragged tensor arrays are not supported.
996
+ raise ValueError("Creating empty ragged tensor arrays is not supported.")
997
+
998
+ # Whether all subndarrays are contiguous views of the same ndarray.
999
+ shapes, sizes, raveled = [], [], []
1000
+ ndim = None
1001
+ for a in arr:
1002
+ a = np.asarray(a)
1003
+ if ndim is not None and a.ndim != ndim:
1004
+ raise ValueError(
1005
+ "ArrowVariableShapedTensorArray only supports tensor elements that "
1006
+ "all have the same number of dimensions, but got tensor elements "
1007
+ f"with dimensions: {ndim}, {a.ndim}"
1008
+ )
1009
+ ndim = a.ndim
1010
+ shapes.append(a.shape)
1011
+ sizes.append(a.size)
1012
+ # Convert to 1D array view; this should be zero-copy in the common case.
1013
+ # NOTE: If array is not in C-contiguous order, this will convert it to
1014
+ # C-contiguous order, incurring a copy.
1015
+ a = np.ravel(a, order="C")
1016
+ raveled.append(a)
1017
+ # Get size offsets and total size.
1018
+ sizes = np.array(sizes)
1019
+ size_offsets = np.cumsum(sizes)
1020
+ total_size = size_offsets[-1]
1021
+ # Concatenate 1D views into a contiguous 1D array.
1022
+ if all(_is_contiguous_view(curr, prev) for prev, curr in _pairwise(raveled)):
1023
+ # An optimized zero-copy path if raveled tensor elements are already
1024
+ # contiguous in memory, e.g. if this tensor array has already done a
1025
+ # roundtrip through our Arrow representation.
1026
+ np_data_buffer = raveled[-1].base
1027
+ else:
1028
+ np_data_buffer = np.concatenate(raveled)
1029
+ dtype = np_data_buffer.dtype
1030
+ pa_dtype = pa.from_numpy_dtype(dtype)
1031
+ if pa.types.is_string(pa_dtype):
1032
+ if dtype.byteorder == ">" or (
1033
+ dtype.byteorder == "=" and sys.byteorder == "big"
1034
+ ):
1035
+ raise ValueError(
1036
+ "Only little-endian string tensors are supported, "
1037
+ f"but got: {dtype}"
1038
+ )
1039
+ pa_dtype = pa.binary(dtype.itemsize)
1040
+ if dtype.type is np.bool_:
1041
+ # NumPy doesn't represent boolean arrays as bit-packed, so we manually
1042
+ # bit-pack the booleans before handing the buffer off to Arrow.
1043
+ # NOTE: Arrow expects LSB bit-packed ordering.
1044
+ # NOTE: This creates a copy.
1045
+ np_data_buffer = np.packbits(np_data_buffer, bitorder="little")
1046
+ data_buffer = pa.py_buffer(np_data_buffer)
1047
+ # Construct underlying data array.
1048
+ value_array = pa.Array.from_buffers(pa_dtype, total_size, [None, data_buffer])
1049
+ # Construct array for offsets into the 1D data array, where each offset
1050
+ # corresponds to a tensor element.
1051
+ size_offsets = np.insert(size_offsets, 0, 0)
1052
+ offset_array = pa.array(size_offsets)
1053
+ data_array = pa.LargeListArray.from_arrays(offset_array, value_array)
1054
+ # We store the tensor element shapes so we can reconstruct each tensor when
1055
+ # converting back to NumPy ndarrays.
1056
+ shape_array = pa.array(shapes)
1057
+ # Build storage array containing tensor data and the tensor element shapes.
1058
+ storage = pa.StructArray.from_arrays(
1059
+ [data_array, shape_array],
1060
+ ["data", "shape"],
1061
+ )
1062
+ type_ = ArrowVariableShapedTensorType(pa_dtype, ndim)
1063
+ return pa.ExtensionArray.from_storage(type_, storage)
1064
+
1065
+ def _to_numpy(self, index: Optional[int] = None, zero_copy_only: bool = False):
1066
+ """
1067
+ Helper for getting either an element of the array of tensors as an ndarray, or
1068
+ the entire array of tensors as a single ndarray.
1069
+
1070
+ Args:
1071
+ index: The index of the tensor element that we wish to return as an
1072
+ ndarray. If not given, the entire array of tensors is returned as an
1073
+ ndarray.
1074
+ zero_copy_only: If True, an exception will be raised if the conversion to a
1075
+ NumPy array would require copying the underlying data (e.g. in presence
1076
+ of nulls, or for non-primitive types). This argument is currently
1077
+ ignored, so zero-copy isn't enforced even if this argument is true.
1078
+
1079
+ Returns:
1080
+ The corresponding tensor element as an ndarray if an index was given, or
1081
+ the entire array of tensors as an ndarray otherwise.
1082
+ """
1083
+ # TODO(Clark): Enforce zero_copy_only.
1084
+ # TODO(Clark): Support strides?
1085
+ if index is None:
1086
+ # Get individual ndarrays for each tensor element.
1087
+ arrs = [self._to_numpy(i, zero_copy_only) for i in range(len(self))]
1088
+ # Return ragged NumPy ndarray in the ndarray of ndarray pointers
1089
+ # representation.
1090
+ return create_ragged_ndarray(arrs)
1091
+ data = self.storage.field("data")
1092
+ shapes = self.storage.field("shape")
1093
+
1094
+ shape = shapes[index].as_py()
1095
+ value_type = data.type.value_type
1096
+ offset = data.offsets[index].as_py()
1097
+ data_buffer = data.buffers()[3]
1098
+ return _to_ndarray_helper(shape, value_type, offset, data_buffer)
1099
+
1100
+ def to_numpy(self, zero_copy_only: bool = True):
1101
+ """
1102
+ Convert the entire array of tensors into a single ndarray.
1103
+
1104
+ Args:
1105
+ zero_copy_only: If True, an exception will be raised if the conversion to a
1106
+ NumPy array would require copying the underlying data (e.g. in presence
1107
+ of nulls, or for non-primitive types). This argument is currently
1108
+ ignored, so zero-copy isn't enforced even if this argument is true.
1109
+
1110
+ Returns:
1111
+ A single ndarray representing the entire array of tensors.
1112
+ """
1113
+ return self._to_numpy(zero_copy_only=zero_copy_only)
1114
+
1115
+
1116
+ def _is_contiguous_view(curr: np.ndarray, prev: Optional[np.ndarray]) -> bool:
1117
+ """Check if the provided tensor element is contiguous with the previous tensor
1118
+ element.
1119
+
1120
+ Args:
1121
+ curr: The tensor element whose contiguity that we wish to check.
1122
+ prev: The previous tensor element in the tensor array.
1123
+
1124
+ Returns:
1125
+ Whether the provided tensor element is contiguous with the previous tensor
1126
+ element.
1127
+ """
1128
+ if (
1129
+ curr.base is None
1130
+ or not curr.data.c_contiguous
1131
+ or (prev is not None and curr.base is not prev.base)
1132
+ ):
1133
+ # curr is either:
1134
+ # - not a view,
1135
+ # - not in C-contiguous order,
1136
+ # - a view that does not share its base with the other subndarrays.
1137
+ return False
1138
+ else:
1139
+ # curr is a C-contiguous view that shares the same base with the seen
1140
+ # subndarrays, but we need to confirm that it is contiguous with the
1141
+ # previous subndarray.
1142
+ if prev is not None and (
1143
+ _get_buffer_address(curr) - _get_buffer_address(prev)
1144
+ != prev.base.dtype.itemsize * prev.size
1145
+ ):
1146
+ # This view is not contiguous with the previous view.
1147
+ return False
1148
+ else:
1149
+ return True
1150
+
1151
+
1152
+ def _get_buffer_address(arr: np.ndarray) -> int:
1153
+ """Get the address of the buffer underlying the provided NumPy ndarray."""
1154
+ return arr.__array_interface__["data"][0]
1155
+
1156
+
1157
+ def _pairwise(iterable):
1158
+ # pairwise('ABCDEFG') --> AB BC CD DE EF FG
1159
+ # Backport of itertools.pairwise for Python < 3.10.
1160
+ a, b = itertools.tee(iterable)
1161
+ next(b, None)
1162
+ return zip(a, b)
1163
+
1164
+
1165
+ def _to_ndarray_helper(shape, value_type, offset, data_buffer):
1166
+ if pa.types.is_boolean(value_type):
1167
+ # Arrow boolean array buffers are bit-packed, with 8 entries per byte,
1168
+ # and are accessed via bit offsets.
1169
+ buffer_item_width = value_type.bit_width
1170
+ else:
1171
+ # We assume all other array types are accessed via byte array
1172
+ # offsets.
1173
+ buffer_item_width = value_type.bit_width // 8
1174
+ data_offset = buffer_item_width * offset
1175
+
1176
+ if pa.types.is_boolean(value_type):
1177
+ # Special handling for boolean arrays, since Arrow
1178
+ # bit-packs boolean arrays while NumPy does not.
1179
+ # Cast as uint8 array and let NumPy unpack into a boolean view.
1180
+ # Offset into uint8 array, where each element is
1181
+ # a bucket for 8 booleans.
1182
+ byte_bucket_offset = data_offset // 8
1183
+ # Offset for a specific boolean, within a uint8 array element.
1184
+ bool_offset = data_offset % 8
1185
+ # The number of uint8 array elements (buckets) that our slice spans.
1186
+ # Note that, due to the offset for a specific boolean,
1187
+ # the slice can span byte boundaries even if it contains
1188
+ # less than 8 booleans.
1189
+ num_boolean_byte_buckets = 1 + ((bool_offset + np.prod(shape) - 1) // 8)
1190
+ # Construct the uint8 array view on the buffer.
1191
+ arr = np.ndarray(
1192
+ (num_boolean_byte_buckets,),
1193
+ dtype=np.uint8,
1194
+ buffer=data_buffer,
1195
+ offset=byte_bucket_offset,
1196
+ )
1197
+ # Unpack into a byte per boolean, using LSB bit-packed ordering.
1198
+ arr = np.unpackbits(arr, bitorder="little")
1199
+ # Interpret buffer as boolean array.
1200
+ return np.ndarray(shape, dtype=np.bool_, buffer=arr, offset=bool_offset)
1201
+ ext_dtype = value_type.to_pandas_dtype()
1202
+ # Special handling of ragged string tensors
1203
+ if pa.types.is_fixed_size_binary(value_type):
1204
+ ext_dtype = np.dtype(f"<U{value_type.byte_width // NUM_BYTES_PER_UNICODE_CHAR}")
1205
+ return np.ndarray(shape, dtype=ext_dtype, buffer=data_buffer, offset=data_offset)
1206
+
1207
+
1208
+ try:
1209
+ # Registration needs an extension type instance, but then works for any instance of
1210
+ # the same subclass regardless of parametrization of the type.
1211
+ pa.register_extension_type(ArrowTensorType((0,), pa.int64()))
1212
+ pa.register_extension_type(ArrowTensorTypeV2((0,), pa.int64()))
1213
+ pa.register_extension_type(ArrowVariableShapedTensorType(pa.int64(), 0))
1214
+ except pa.ArrowKeyError:
1215
+ # Extension types are already registered.
1216
+ pass
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/pandas.py ADDED
@@ -0,0 +1,1451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from
2
+ # https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/tensor.py
3
+ # and
4
+ # https://github.com/CODAIT/text-extensions-for-pandas/blob/dc03278689fe1c5f131573658ae19815ba25f33e/text_extensions_for_pandas/array/arrow_conversion.py
5
+
6
+ #
7
+ # Copyright (c) 2020 IBM Corp.
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ # Modifications:
21
+ # - Added ArrowTensorType.to_pandas_type()
22
+ # - Added ArrowTensorArray.__getitem__()
23
+ # - Added ArrowTensorArray.__iter__()
24
+ # - Added support for column casts to extension types.
25
+ # - Fleshed out docstrings and examples.
26
+ # - Fixed TensorArray.isna() so it returns an appropriate ExtensionArray.
27
+ # - Added different (more vectorized) TensorArray.take() operation.
28
+ # - Added support for more reducers (agg funcs) to TensorArray.
29
+ # - Added support for logical operators to TensorArray(Element).
30
+ # - Added support for heterogeneously-shaped tensors.
31
+ # - Miscellaneous small bug fixes and optimizations.
32
+
33
+ import numbers
34
+ import os
35
+ from typing import Any, Callable, List, Optional, Sequence, Tuple, Union
36
+
37
+ import numpy as np
38
+ import pandas as pd
39
+ import pyarrow as pa
40
+ from packaging.version import Version
41
+ from pandas._typing import Dtype
42
+ from pandas.compat import set_function_name
43
+ from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
44
+ from pandas.core.indexers import check_array_indexer, validate_indices
45
+
46
+ from ray.air.util.tensor_extensions.utils import (
47
+ _create_possibly_ragged_ndarray,
48
+ _is_ndarray_variable_shaped_tensor,
49
+ )
50
+ from ray.util.annotations import PublicAPI
51
+
52
+ try:
53
+ from pandas.core.dtypes.generic import ABCIndex
54
+ except ImportError:
55
+ # ABCIndexClass changed to ABCIndex in Pandas 1.3
56
+ from pandas.core.dtypes.generic import ABCIndexClass as ABCIndex
57
+
58
+
59
+ #############################################
60
+ # Begin patching of ExtensionArrayFormatter #
61
+ #############################################
62
+
63
+
64
+ def _format_strings_patched(self) -> List[str]:
65
+ from pandas.core.construction import extract_array
66
+ from pandas.io.formats.format import format_array
67
+
68
+ if not isinstance(self.values, TensorArray):
69
+ return self._format_strings_orig()
70
+
71
+ values = extract_array(self.values, extract_numpy=True)
72
+ array = np.asarray(values)
73
+
74
+ if array.ndim == 1:
75
+ return self._format_strings_orig()
76
+
77
+ def format_array_wrap(array_, formatter_):
78
+ fmt_values = format_array(
79
+ array_,
80
+ formatter_,
81
+ float_format=self.float_format,
82
+ na_rep=self.na_rep,
83
+ digits=self.digits,
84
+ space=self.space,
85
+ justify=self.justify,
86
+ decimal=self.decimal,
87
+ leading_space=self.leading_space,
88
+ quoting=self.quoting,
89
+ )
90
+ return fmt_values
91
+
92
+ flat_formatter = self.formatter
93
+ if flat_formatter is None:
94
+ flat_formatter = values._formatter(boxed=True)
95
+
96
+ # Flatten array, call function, reshape (use ravel_compat in v1.3.0)
97
+ flat_array = array.ravel("K")
98
+ fmt_flat_array = np.asarray(format_array_wrap(flat_array, flat_formatter))
99
+ order = "F" if array.flags.f_contiguous else "C"
100
+ fmt_array = fmt_flat_array.reshape(array.shape, order=order)
101
+
102
+ # Format the array of nested strings, use default formatter
103
+ return format_array_wrap(fmt_array, None)
104
+
105
+
106
+ def _format_strings_patched_v1_0_0(self) -> List[str]:
107
+ from functools import partial
108
+
109
+ from pandas.core.construction import extract_array
110
+ from pandas.io.formats.format import format_array
111
+ from pandas.io.formats.printing import pprint_thing
112
+
113
+ if not isinstance(self.values, TensorArray):
114
+ return self._format_strings_orig()
115
+
116
+ values = extract_array(self.values, extract_numpy=True)
117
+ array = np.asarray(values)
118
+
119
+ if array.ndim == 1:
120
+ return self._format_strings_orig()
121
+
122
+ def format_array_wrap(array_, formatter_):
123
+ fmt_values = format_array(
124
+ array_,
125
+ formatter_,
126
+ float_format=self.float_format,
127
+ na_rep=self.na_rep,
128
+ digits=self.digits,
129
+ space=self.space,
130
+ justify=self.justify,
131
+ decimal=self.decimal,
132
+ leading_space=self.leading_space,
133
+ )
134
+ return fmt_values
135
+
136
+ flat_formatter = self.formatter
137
+ if flat_formatter is None:
138
+ flat_formatter = values._formatter(boxed=True)
139
+
140
+ # Flatten array, call function, reshape (use ravel_compat in v1.3.0)
141
+ flat_array = array.ravel("K")
142
+ fmt_flat_array = np.asarray(format_array_wrap(flat_array, flat_formatter))
143
+ order = "F" if array.flags.f_contiguous else "C"
144
+ fmt_array = fmt_flat_array.reshape(array.shape, order=order)
145
+
146
+ # Slimmed down version of GenericArrayFormatter due to:
147
+ # https://github.com/pandas-dev/pandas/issues/33770
148
+ def format_strings_slim(array_, leading_space):
149
+ formatter = partial(
150
+ pprint_thing,
151
+ escape_chars=("\t", "\r", "\n"),
152
+ )
153
+
154
+ def _format(x):
155
+ return str(formatter(x))
156
+
157
+ fmt_values = []
158
+ for v in array_:
159
+ tpl = "{v}" if leading_space is False else " {v}"
160
+ fmt_values.append(tpl.format(v=_format(v)))
161
+ return fmt_values
162
+
163
+ return format_strings_slim(fmt_array, self.leading_space)
164
+
165
+
166
+ _FORMATTER_ENABLED_ENV_VAR = "TENSOR_COLUMN_EXTENSION_FORMATTER_ENABLED"
167
+
168
+ if os.getenv(_FORMATTER_ENABLED_ENV_VAR, "1") == "1":
169
+ if Version(pd.__version__) < Version("2.2.0"):
170
+ from pandas.io.formats.format import ExtensionArrayFormatter
171
+
172
+ formatter_cls = ExtensionArrayFormatter
173
+ else:
174
+ from pandas.io.formats.format import _ExtensionArrayFormatter
175
+
176
+ formatter_cls = _ExtensionArrayFormatter
177
+ formatter_cls._format_strings_orig = formatter_cls._format_strings
178
+ if Version("1.1.0") <= Version(pd.__version__) < Version("1.3.0"):
179
+ formatter_cls._format_strings = _format_strings_patched
180
+ else:
181
+ formatter_cls._format_strings = _format_strings_patched_v1_0_0
182
+ formatter_cls._patched_by_ray_datasets = True
183
+
184
+ ###########################################
185
+ # End patching of ExtensionArrayFormatter #
186
+ ###########################################
187
+
188
+
189
+ @PublicAPI(stability="beta")
190
+ @pd.api.extensions.register_extension_dtype
191
+ class TensorDtype(pd.api.extensions.ExtensionDtype):
192
+ """
193
+ Pandas extension type for a column of homogeneous-typed tensors.
194
+
195
+ This extension supports tensors in which the elements have different shapes.
196
+ However, each tensor element must be non-ragged, i.e. each tensor element must have
197
+ a well-defined, non-ragged shape.
198
+
199
+ See:
200
+ https://github.com/pandas-dev/pandas/blob/master/pandas/core/dtypes/base.py
201
+ for up-to-date interface documentation and the subclassing contract. The
202
+ docstrings of the below properties and methods were copied from the base
203
+ ExtensionDtype.
204
+
205
+ Examples:
206
+ >>> # Create a DataFrame with a list of ndarrays as a column.
207
+ >>> import pandas as pd
208
+ >>> import numpy as np
209
+ >>> import ray
210
+ >>> df = pd.DataFrame({
211
+ ... "one": [1, 2, 3],
212
+ ... "two": list(np.arange(24).reshape((3, 2, 2, 2)))})
213
+ >>> # Note the opaque np.object dtype for this column.
214
+ >>> df.dtypes # doctest: +SKIP
215
+ one int64
216
+ two object
217
+ dtype: object
218
+ >>> # Cast column to our TensorDtype extension type.
219
+ >>> from ray.data.extensions import TensorDtype
220
+ >>> df["two"] = df["two"].astype(TensorDtype(np.int64, (3, 2, 2, 2)))
221
+ >>> # Note that the column dtype is now TensorDtype instead of
222
+ >>> # np.object.
223
+ >>> df.dtypes # doctest: +SKIP
224
+ one int64
225
+ two TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
226
+ dtype: object
227
+ >>> # Pandas is now aware of this tensor column, and we can do the
228
+ >>> # typical DataFrame operations on this column.
229
+ >>> col = 2 * (df["two"] + 10)
230
+ >>> # The ndarrays underlying the tensor column will be manipulated,
231
+ >>> # but the column itself will continue to be a Pandas type.
232
+ >>> type(col) # doctest: +SKIP
233
+ pandas.core.series.Series
234
+ >>> col # doctest: +SKIP
235
+ 0 [[[ 2 4]
236
+ [ 6 8]]
237
+ [[10 12]
238
+ [14 16]]]
239
+ 1 [[[18 20]
240
+ [22 24]]
241
+ [[26 28]
242
+ [30 32]]]
243
+ 2 [[[34 36]
244
+ [38 40]]
245
+ [[42 44]
246
+ [46 48]]]
247
+ Name: two, dtype: TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
248
+ >>> # Once you do an aggregation on that column that returns a single
249
+ >>> # row's value, you get back our TensorArrayElement type.
250
+ >>> tensor = col.mean()
251
+ >>> type(tensor) # doctest: +SKIP
252
+ ray.data.extensions.tensor_extension.TensorArrayElement
253
+ >>> tensor # doctest: +SKIP
254
+ array([[[18., 20.],
255
+ [22., 24.]],
256
+ [[26., 28.],
257
+ [30., 32.]]])
258
+ >>> # This is a light wrapper around a NumPy ndarray, and can easily
259
+ >>> # be converted to an ndarray.
260
+ >>> type(tensor.to_numpy()) # doctest: +SKIP
261
+ numpy.ndarray
262
+ >>> # In addition to doing Pandas operations on the tensor column,
263
+ >>> # you can now put the DataFrame into a Dataset.
264
+ >>> ds = ray.data.from_pandas(df) # doctest: +SKIP
265
+ >>> # Internally, this column is represented the corresponding
266
+ >>> # Arrow tensor extension type.
267
+ >>> ds.schema() # doctest: +SKIP
268
+ one: int64
269
+ two: extension<arrow.py_extension_type<ArrowTensorType>>
270
+ >>> # You can write the dataset to Parquet.
271
+ >>> ds.write_parquet("/some/path") # doctest: +SKIP
272
+ >>> # And you can read it back.
273
+ >>> read_ds = ray.data.read_parquet("/some/path") # doctest: +SKIP
274
+ >>> read_ds.schema() # doctest: +SKIP
275
+ one: int64
276
+ two: extension<arrow.py_extension_type<ArrowTensorType>>
277
+ >>> read_df = ray.get(read_ds.to_pandas_refs())[0] # doctest: +SKIP
278
+ >>> read_df.dtypes # doctest: +SKIP
279
+ one int64
280
+ two TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
281
+ dtype: object
282
+ >>> # The tensor extension type is preserved along the
283
+ >>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
284
+ >>> # conversion chain.
285
+ >>> read_df.equals(df) # doctest: +SKIP
286
+ True
287
+ """
288
+
289
+ # NOTE(Clark): This is apparently required to prevent integer indexing
290
+ # errors, but is an undocumented ExtensionDtype attribute. See issue:
291
+ # https://github.com/CODAIT/text-extensions-for-pandas/issues/166
292
+ base = None
293
+
294
+ def __init__(self, shape: Tuple[Optional[int], ...], dtype: np.dtype):
295
+ self._shape = shape
296
+ self._dtype = dtype
297
+
298
+ @property
299
+ def type(self):
300
+ """
301
+ The scalar type for the array, e.g. ``int``
302
+ It's expected ``ExtensionArray[item]`` returns an instance
303
+ of ``ExtensionDtype.type`` for scalar ``item``, assuming
304
+ that value is valid (not NA). NA values do not need to be
305
+ instances of `type`.
306
+ """
307
+ return TensorArrayElement
308
+
309
+ @property
310
+ def element_dtype(self):
311
+ """
312
+ The dtype of the underlying tensor elements.
313
+ """
314
+ return self._dtype
315
+
316
+ @property
317
+ def element_shape(self):
318
+ """
319
+ The shape of the underlying tensor elements. This will be a tuple of Nones if
320
+ the corresponding TensorArray for this TensorDtype holds variable-shaped tensor
321
+ elements.
322
+ """
323
+ return self._shape
324
+
325
+ @property
326
+ def is_variable_shaped(self):
327
+ """
328
+ Whether the corresponding TensorArray for this TensorDtype holds variable-shaped
329
+ tensor elements.
330
+ """
331
+ return all(dim_size is None for dim_size in self.shape)
332
+
333
+ @property
334
+ def name(self) -> str:
335
+ """
336
+ A string identifying the data type.
337
+ Will be used for display in, e.g. ``Series.dtype``
338
+ """
339
+ return f"numpy.ndarray(shape={self._shape}, dtype={self._dtype})"
340
+
341
+ @classmethod
342
+ def construct_from_string(cls, string: str):
343
+ r"""
344
+ Construct this type from a string.
345
+
346
+ This is useful mainly for data types that accept parameters.
347
+ For example, a period dtype accepts a frequency parameter that
348
+ can be set as ``period[H]`` (where H means hourly frequency).
349
+
350
+ By default, in the abstract class, just the name of the type is
351
+ expected. But subclasses can overwrite this method to accept
352
+ parameters.
353
+
354
+ Parameters
355
+ ----------
356
+ string : str
357
+ The name of the type, for example ``category``.
358
+
359
+ Returns
360
+ -------
361
+ ExtensionDtype
362
+ Instance of the dtype.
363
+
364
+ Raises
365
+ ------
366
+ TypeError
367
+ If a class cannot be constructed from this 'string'.
368
+
369
+ Examples
370
+ --------
371
+ For extension dtypes with arguments the following may be an
372
+ adequate implementation.
373
+
374
+ >>> import re
375
+ >>> @classmethod
376
+ ... def construct_from_string(cls, string):
377
+ ... pattern = re.compile(r"^my_type\[(?P<arg_name>.+)\]$")
378
+ ... match = pattern.match(string)
379
+ ... if match:
380
+ ... return cls(**match.groupdict())
381
+ ... else:
382
+ ... raise TypeError(
383
+ ... f"Cannot construct a '{cls.__name__}' from '{string}'"
384
+ ... )
385
+ """
386
+ import ast
387
+ import re
388
+
389
+ if not isinstance(string, str):
390
+ raise TypeError(
391
+ f"'construct_from_string' expects a string, got {type(string)}"
392
+ )
393
+ # Upstream code uses exceptions as part of its normal control flow and
394
+ # will pass this method bogus class names.
395
+ regex = (
396
+ r"^(TensorDtype|numpy.ndarray)"
397
+ r"\(shape=(\((?:(?:\d+|None),?\s?)*\)), dtype=(\w+)\)$"
398
+ )
399
+ m = re.search(regex, string)
400
+ err_msg = (
401
+ f"Cannot construct a '{cls.__name__}' from '{string}'; expected a string "
402
+ "like 'TensorDtype(shape=(1, 2, 3), dtype=int64)'."
403
+ )
404
+ if m is None:
405
+ raise TypeError(err_msg)
406
+ groups = m.groups()
407
+ if len(groups) != 3:
408
+ raise TypeError(err_msg)
409
+ _, shape, dtype = groups
410
+ shape = ast.literal_eval(shape)
411
+ dtype = np.dtype(dtype)
412
+ return cls(shape, dtype)
413
+
414
+ @classmethod
415
+ def construct_array_type(cls):
416
+ """
417
+ Return the array type associated with this dtype.
418
+
419
+ Returns
420
+ -------
421
+ type
422
+ """
423
+ return TensorArray
424
+
425
+ def __from_arrow__(self, array: Union[pa.Array, pa.ChunkedArray]):
426
+ """
427
+ Convert a pyarrow (chunked) array to a TensorArray.
428
+
429
+ This and TensorArray.__arrow_array__ make up the
430
+ Pandas extension type + array <--> Arrow extension type + array
431
+ interoperability protocol. See
432
+ https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
433
+ for more information.
434
+ """
435
+ if isinstance(array, pa.ChunkedArray):
436
+ if array.num_chunks > 1:
437
+ # TODO(Clark): Remove concat and construct from list with
438
+ # shape.
439
+ values = np.concatenate(
440
+ [chunk.to_numpy() for chunk in array.iterchunks()]
441
+ )
442
+ else:
443
+ values = array.chunk(0).to_numpy()
444
+ else:
445
+ values = array.to_numpy()
446
+
447
+ return TensorArray(values)
448
+
449
+ def __str__(self) -> str:
450
+ return self.name
451
+
452
+ def __repr__(self) -> str:
453
+ return str(self)
454
+
455
+ @property
456
+ def _is_boolean(self):
457
+ """
458
+ Whether this extension array should be considered boolean.
459
+
460
+ By default, ExtensionArrays are assumed to be non-numeric.
461
+ Setting this to True will affect the behavior of several places,
462
+ e.g.
463
+
464
+ * is_bool
465
+ * boolean indexing
466
+
467
+ Returns
468
+ -------
469
+ bool
470
+ """
471
+ # This is needed to support returning a TensorArray from .isnan().
472
+ from pandas.core.dtypes.common import is_bool_dtype
473
+
474
+ return is_bool_dtype(self._dtype)
475
+
476
+
477
+ class _TensorOpsMixin(pd.api.extensions.ExtensionScalarOpsMixin):
478
+ """
479
+ Mixin for TensorArray operator support, applying operations on the
480
+ underlying ndarrays.
481
+ """
482
+
483
+ @classmethod
484
+ def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None):
485
+ """
486
+ Add support for binary operators by unwrapping, applying, and
487
+ rewrapping.
488
+ """
489
+
490
+ # NOTE(Clark): This overrides, but coerce_to_dtype, result_dtype might
491
+ # not be needed
492
+
493
+ def _binop(self, other):
494
+ lvalues = self._tensor
495
+
496
+ if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndex)):
497
+ # Rely on Pandas to unbox and dispatch to us.
498
+ return NotImplemented
499
+
500
+ # divmod returns a tuple
501
+ if op_name in ["__divmod__", "__rdivmod__"]:
502
+ # TODO(Clark): Add support for divmod and rdivmod.
503
+ # div, mod = result
504
+ raise NotImplementedError
505
+
506
+ if isinstance(other, (TensorArray, TensorArrayElement)):
507
+ rvalues = other._tensor
508
+ else:
509
+ rvalues = other
510
+
511
+ result = op(lvalues, rvalues)
512
+
513
+ # Force a TensorArray if rvalue is not a scalar.
514
+ if isinstance(self, TensorArrayElement) and (
515
+ not isinstance(other, TensorArrayElement) or not np.isscalar(other)
516
+ ):
517
+ result_wrapped = TensorArray(result)
518
+ else:
519
+ result_wrapped = cls(result)
520
+
521
+ return result_wrapped
522
+
523
+ op_name = f"__{op.__name__}__"
524
+ return set_function_name(_binop, op_name, cls)
525
+
526
+ @classmethod
527
+ def _create_logical_method(cls, op):
528
+ return cls._create_method(op)
529
+
530
+
531
+ class _TensorScalarCastMixin:
532
+ """
533
+ Mixin for casting scalar tensors to a particular numeric type.
534
+ """
535
+
536
+ def _scalarfunc(self, func: Callable[[Any], Any]):
537
+ return func(self._tensor)
538
+
539
+ def __complex__(self):
540
+ return self._scalarfunc(complex)
541
+
542
+ def __float__(self):
543
+ return self._scalarfunc(float)
544
+
545
+ def __int__(self):
546
+ return self._scalarfunc(int)
547
+
548
+ def __hex__(self):
549
+ return self._scalarfunc(hex)
550
+
551
+ def __oct__(self):
552
+ return self._scalarfunc(oct)
553
+
554
+
555
+ @PublicAPI(stability="beta")
556
+ class TensorArrayElement(_TensorOpsMixin, _TensorScalarCastMixin):
557
+ """
558
+ Single element of a TensorArray, wrapping an underlying ndarray.
559
+ """
560
+
561
+ def __init__(self, values: np.ndarray):
562
+ """
563
+ Construct a TensorArrayElement from a NumPy ndarray.
564
+
565
+ Args:
566
+ values: ndarray that underlies this TensorArray element.
567
+ """
568
+ self._tensor = values
569
+
570
+ def __repr__(self):
571
+ return self._tensor.__repr__()
572
+
573
+ def __str__(self):
574
+ return self._tensor.__str__()
575
+
576
+ @property
577
+ def numpy_dtype(self):
578
+ """
579
+ Get the dtype of the tensor.
580
+ :return: The numpy dtype of the backing ndarray
581
+ """
582
+ return self._tensor.dtype
583
+
584
+ @property
585
+ def numpy_ndim(self):
586
+ """
587
+ Get the number of tensor dimensions.
588
+ :return: integer for the number of dimensions
589
+ """
590
+ return self._tensor.ndim
591
+
592
+ @property
593
+ def numpy_shape(self):
594
+ """
595
+ Get the shape of the tensor.
596
+ :return: A tuple of integers for the numpy shape of the backing ndarray
597
+ """
598
+ return self._tensor.shape
599
+
600
+ @property
601
+ def numpy_size(self):
602
+ """
603
+ Get the size of the tensor.
604
+ :return: integer for the number of elements in the tensor
605
+ """
606
+ return self._tensor.size
607
+
608
+ def to_numpy(self):
609
+ """
610
+ Return the values of this element as a NumPy ndarray.
611
+ """
612
+ return np.asarray(self._tensor)
613
+
614
+ def __array__(self, dtype: np.dtype = None, **kwargs) -> np.ndarray:
615
+ return np.asarray(self._tensor, dtype=dtype, **kwargs)
616
+
617
+
618
+ @PublicAPI(stability="beta")
619
+ class TensorArray(
620
+ pd.api.extensions.ExtensionArray,
621
+ _TensorOpsMixin,
622
+ _TensorScalarCastMixin,
623
+ ):
624
+ """
625
+ Pandas `ExtensionArray` representing a tensor column, i.e. a column
626
+ consisting of ndarrays as elements.
627
+
628
+ This extension supports tensors in which the elements have different shapes.
629
+ However, each tensor element must be non-ragged, i.e. each tensor element must have
630
+ a well-defined, non-ragged shape.
631
+
632
+ Examples:
633
+ >>> # Create a DataFrame with a list of ndarrays as a column.
634
+ >>> import pandas as pd
635
+ >>> import numpy as np
636
+ >>> import ray
637
+ >>> from ray.data.extensions import TensorArray
638
+ >>> df = pd.DataFrame({
639
+ ... "one": [1, 2, 3],
640
+ ... "two": TensorArray(np.arange(24).reshape((3, 2, 2, 2)))})
641
+ >>> # Note that the column dtype is TensorDtype.
642
+ >>> df.dtypes # doctest: +SKIP
643
+ one int64
644
+ two TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
645
+ dtype: object
646
+ >>> # Pandas is aware of this tensor column, and we can do the
647
+ >>> # typical DataFrame operations on this column.
648
+ >>> col = 2 * (df["two"] + 10)
649
+ >>> # The ndarrays underlying the tensor column will be manipulated,
650
+ >>> # but the column itself will continue to be a Pandas type.
651
+ >>> type(col) # doctest: +SKIP
652
+ pandas.core.series.Series
653
+ >>> col # doctest: +SKIP
654
+ 0 [[[ 2 4]
655
+ [ 6 8]]
656
+ [[10 12]
657
+ [14 16]]]
658
+ 1 [[[18 20]
659
+ [22 24]]
660
+ [[26 28]
661
+ [30 32]]]
662
+ 2 [[[34 36]
663
+ [38 40]]
664
+ [[42 44]
665
+ [46 48]]]
666
+ Name: two, dtype: TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
667
+ >>> # Once you do an aggregation on that column that returns a single
668
+ >>> # row's value, you get back our TensorArrayElement type.
669
+ >>> tensor = col.mean() # doctest: +SKIP
670
+ >>> type(tensor) # doctest: +SKIP
671
+ ray.data.extensions.tensor_extension.TensorArrayElement
672
+ >>> tensor # doctest: +SKIP
673
+ array([[[18., 20.],
674
+ [22., 24.]],
675
+ [[26., 28.],
676
+ [30., 32.]]])
677
+ >>> # This is a light wrapper around a NumPy ndarray, and can easily
678
+ >>> # be converted to an ndarray.
679
+ >>> type(tensor.to_numpy()) # doctest: +SKIP
680
+ numpy.ndarray
681
+ >>> # In addition to doing Pandas operations on the tensor column,
682
+ >>> # you can now put the DataFrame into a Dataset.
683
+ >>> ds = ray.data.from_pandas(df) # doctest: +SKIP
684
+ >>> # Internally, this column is represented the corresponding
685
+ >>> # Arrow tensor extension type.
686
+ >>> ds.schema() # doctest: +SKIP
687
+ one: int64
688
+ two: extension<arrow.py_extension_type<ArrowTensorType>>
689
+ >>> # You can write the dataset to Parquet.
690
+ >>> ds.write_parquet("/some/path") # doctest: +SKIP
691
+ >>> # And you can read it back.
692
+ >>> read_ds = ray.data.read_parquet("/some/path") # doctest: +SKIP
693
+ >>> read_ds.schema() # doctest: +SKIP
694
+ one: int64
695
+ two: extension<arrow.py_extension_type<ArrowTensorType>>
696
+
697
+ >>> read_df = ray.get(read_ds.to_pandas_refs())[0] # doctest: +SKIP
698
+ >>> read_df.dtypes # doctest: +SKIP
699
+ one int64
700
+ two TensorDtype(shape=(3, 2, 2, 2), dtype=int64)
701
+ dtype: object
702
+ >>> # The tensor extension type is preserved along the
703
+ >>> # Pandas --> Arrow --> Parquet --> Arrow --> Pandas
704
+ >>> # conversion chain.
705
+ >>> read_df.equals(df) # doctest: +SKIP
706
+ True
707
+ """
708
+
709
+ SUPPORTED_REDUCERS = {
710
+ "sum": np.sum,
711
+ "all": np.all,
712
+ "any": np.any,
713
+ "min": np.min,
714
+ "max": np.max,
715
+ "mean": np.mean,
716
+ "median": np.median,
717
+ "prod": np.prod,
718
+ "std": np.std,
719
+ "var": np.var,
720
+ }
721
+
722
+ # See https://github.com/pandas-dev/pandas/blob/master/pandas/core/arrays/base.py
723
+ # for interface documentation and the subclassing contract.
724
+ def __init__(
725
+ self,
726
+ values: Union[
727
+ np.ndarray,
728
+ ABCSeries,
729
+ Sequence[Union[np.ndarray, TensorArrayElement]],
730
+ TensorArrayElement,
731
+ Any,
732
+ ],
733
+ ):
734
+ """
735
+ Args:
736
+ values: A NumPy ndarray or sequence of NumPy ndarrays of equal
737
+ shape.
738
+ """
739
+ # Try to convert some well-known objects to ndarrays before handing off to
740
+ # ndarray handling logic.
741
+ if isinstance(values, ABCSeries):
742
+ values = _create_possibly_ragged_ndarray(values)
743
+ elif isinstance(values, Sequence):
744
+ values = [
745
+ np.asarray(v) if isinstance(v, TensorArrayElement) else v
746
+ for v in values
747
+ ]
748
+ values = _create_possibly_ragged_ndarray(values)
749
+ elif isinstance(values, TensorArrayElement):
750
+ values = np.array([np.asarray(values)], copy=False)
751
+
752
+ if isinstance(values, np.ndarray):
753
+ if values.dtype.type is np.object_:
754
+ if len(values) == 0:
755
+ # Tensor is empty, pass through to create empty TensorArray.
756
+ pass
757
+ elif all(
758
+ isinstance(v, (np.ndarray, TensorArrayElement, Sequence))
759
+ and not isinstance(v, str)
760
+ for v in values
761
+ ):
762
+ values = [np.asarray(v) for v in values]
763
+ # Try to convert ndarrays of ndarrays/TensorArrayElements with an
764
+ # opaque object type to a properly typed ndarray of ndarrays.
765
+ values = _create_possibly_ragged_ndarray(values)
766
+ else:
767
+ raise TypeError(
768
+ "Expected a well-typed ndarray or an object-typed ndarray of "
769
+ "ndarray pointers, but got an object-typed ndarray whose "
770
+ f"subndarrays are of type {type(values[0])}."
771
+ )
772
+ elif isinstance(values, TensorArray):
773
+ raise TypeError("Use the copy() method to create a copy of a TensorArray.")
774
+ else:
775
+ raise TypeError(
776
+ "Expected a numpy.ndarray or sequence of numpy.ndarray, "
777
+ f"but received {values} of type {type(values).__name__} instead."
778
+ )
779
+ assert isinstance(values, np.ndarray)
780
+ self._tensor = values
781
+ self._is_variable_shaped = None
782
+
783
+ @classmethod
784
+ def _from_sequence(
785
+ cls, scalars, *, dtype: Optional[Dtype] = None, copy: bool = False
786
+ ):
787
+ """
788
+ Construct a new ExtensionArray from a sequence of scalars.
789
+
790
+ Parameters
791
+ ----------
792
+ scalars : Sequence
793
+ Each element will be an instance of the scalar type for this
794
+ array, ``cls.dtype.type`` or be converted into this type in this
795
+ method.
796
+ dtype : dtype, optional
797
+ Construct for this particular dtype. This should be a Dtype
798
+ compatible with the ExtensionArray.
799
+ copy : bool, default False
800
+ If True, copy the underlying data.
801
+
802
+ Returns
803
+ -------
804
+ ExtensionArray
805
+ """
806
+ if copy and isinstance(scalars, np.ndarray):
807
+ scalars = scalars.copy()
808
+ elif isinstance(scalars, TensorArray):
809
+ scalars = scalars._tensor.copy() if copy else scalars._tensor
810
+ return TensorArray(scalars)
811
+
812
+ @classmethod
813
+ def _from_factorized(
814
+ cls, values: np.ndarray, original: pd.api.extensions.ExtensionArray
815
+ ):
816
+ """
817
+ Reconstruct an ExtensionArray after factorization.
818
+
819
+ Parameters
820
+ ----------
821
+ values : ndarray
822
+ An integer ndarray with the factorized values.
823
+ original : ExtensionArray
824
+ The original ExtensionArray that factorize was called on.
825
+
826
+ See Also
827
+ --------
828
+ factorize : Top-level factorize method that dispatches here.
829
+ ExtensionArray.factorize : Encode the extension array as an enumerated
830
+ type.
831
+ """
832
+ raise NotImplementedError
833
+
834
+ def __getitem__(
835
+ self, item: Union[int, slice, np.ndarray]
836
+ ) -> Union["TensorArray", "TensorArrayElement"]:
837
+ """
838
+ Select a subset of self.
839
+
840
+ Parameters
841
+ ----------
842
+ item : int, slice, or ndarray
843
+ * int: The position in 'self' to get.
844
+ * slice: A slice object, where 'start', 'stop', and 'step' are
845
+ integers or None
846
+ * ndarray: A 1-d boolean NumPy ndarray the same length as 'self'
847
+
848
+ Returns
849
+ -------
850
+ item : scalar or ExtensionArray
851
+
852
+ Notes
853
+ -----
854
+ For scalar ``item``, return a scalar value suitable for the array's
855
+ type. This should be an instance of ``self.dtype.type``.
856
+ For slice ``key``, return an instance of ``ExtensionArray``, even
857
+ if the slice is length 0 or 1.
858
+ For a boolean mask, return an instance of ``ExtensionArray``, filtered
859
+ to the values where ``item`` is True.
860
+ """
861
+ # Return scalar if single value is selected, a TensorArrayElement for
862
+ # single array element, or TensorArray for slice.
863
+ if isinstance(item, int):
864
+ value = self._tensor[item]
865
+ if np.isscalar(value):
866
+ return value
867
+ else:
868
+ return TensorArrayElement(value)
869
+ else:
870
+ # BEGIN workaround for Pandas issue #42430
871
+ if isinstance(item, tuple) and len(item) > 1 and item[0] == Ellipsis:
872
+ if len(item) > 2:
873
+ # Hopefully this case is not possible, but can't be sure
874
+ raise ValueError(
875
+ "Workaround Pandas issue #42430 not "
876
+ "implemented for tuple length > 2"
877
+ )
878
+ item = item[1]
879
+ # END workaround for issue #42430
880
+ if isinstance(item, TensorArray):
881
+ item = np.asarray(item)
882
+ item = check_array_indexer(self, item)
883
+ return TensorArray(self._tensor[item])
884
+
885
+ def __len__(self) -> int:
886
+ """
887
+ Length of this array.
888
+
889
+ Returns
890
+ -------
891
+ length : int
892
+ """
893
+ return len(self._tensor)
894
+
895
+ @property
896
+ def dtype(self) -> pd.api.extensions.ExtensionDtype:
897
+ """
898
+ An instance of 'ExtensionDtype'.
899
+ """
900
+ if self.is_variable_shaped:
901
+ # A tensor is only considered variable-shaped if it's non-empty, so no
902
+ # non-empty check is needed here.
903
+ dtype = self._tensor[0].dtype
904
+ shape = (None,) * self._tensor[0].ndim
905
+ else:
906
+ dtype = self.numpy_dtype
907
+ shape = self.numpy_shape[1:]
908
+ return TensorDtype(shape, dtype)
909
+
910
+ @property
911
+ def is_variable_shaped(self):
912
+ """
913
+ Whether this TensorArray holds variable-shaped tensor elements.
914
+ """
915
+ if self._is_variable_shaped is None:
916
+ self._is_variable_shaped = _is_ndarray_variable_shaped_tensor(self._tensor)
917
+ return self._is_variable_shaped
918
+
919
+ @property
920
+ def nbytes(self) -> int:
921
+ """
922
+ The number of bytes needed to store this object in memory.
923
+ """
924
+ return self._tensor.nbytes
925
+
926
+ def isna(self) -> "TensorArray":
927
+ """
928
+ A 1-D array indicating if each value is missing.
929
+
930
+ Returns
931
+ -------
932
+ na_values : Union[np.ndarray, ExtensionArray]
933
+ In most cases, this should return a NumPy ndarray. For
934
+ exceptional cases like ``SparseArray``, where returning
935
+ an ndarray would be expensive, an ExtensionArray may be
936
+ returned.
937
+
938
+ Notes
939
+ -----
940
+ If returning an ExtensionArray, then
941
+
942
+ * ``na_values._is_boolean`` should be True
943
+ * `na_values` should implement :func:`ExtensionArray._reduce`
944
+ * ``na_values.any`` and ``na_values.all`` should be implemented
945
+ """
946
+ if self._tensor.dtype.type is np.object_:
947
+ # Avoid comparing with __eq__ because the elements of the tensor
948
+ # may do something funny with that operation.
949
+ return np.array(
950
+ [self._tensor[i] is None for i in range(len(self))], dtype=bool
951
+ )
952
+ elif self._tensor.dtype.type is np.str_:
953
+ return np.all(self._tensor == "", axis=tuple(range(1, self._tensor.ndim)))
954
+ else:
955
+ return np.all(
956
+ np.isnan(self._tensor), axis=tuple(range(1, self._tensor.ndim))
957
+ )
958
+
959
+ def take(
960
+ self, indices: Sequence[int], allow_fill: bool = False, fill_value: Any = None
961
+ ) -> "TensorArray":
962
+ """
963
+ Take elements from an array.
964
+
965
+ Parameters
966
+ ----------
967
+ indices : sequence of int
968
+ Indices to be taken.
969
+ allow_fill : bool, default False
970
+ How to handle negative values in `indices`.
971
+
972
+ * False: negative values in `indices` indicate positional indices
973
+ from the right (the default). This is similar to
974
+ :func:`numpy.take`.
975
+
976
+ * True: negative values in `indices` indicate
977
+ missing values. These values are set to `fill_value`. Any other
978
+ other negative values raise a ``ValueError``.
979
+
980
+ fill_value : any, optional
981
+ Fill value to use for NA-indices when `allow_fill` is True.
982
+ This may be ``None``, in which case the default NA value for
983
+ the type, ``self.dtype.na_value``, is used.
984
+
985
+ For many ExtensionArrays, there will be two representations of
986
+ `fill_value`: a user-facing "boxed" scalar, and a low-level
987
+ physical NA value. `fill_value` should be the user-facing version,
988
+ and the implementation should handle translating that to the
989
+ physical version for processing the take if necessary.
990
+
991
+ Returns
992
+ -------
993
+ ExtensionArray
994
+
995
+ Raises
996
+ ------
997
+ IndexError
998
+ When the indices are out of bounds for the array.
999
+ ValueError
1000
+ When `indices` contains negative values other than ``-1``
1001
+ and `allow_fill` is True.
1002
+
1003
+ See Also
1004
+ --------
1005
+ numpy.take : Take elements from an array along an axis.
1006
+ api.extensions.take : Take elements from an array.
1007
+
1008
+ Notes
1009
+ -----
1010
+ ExtensionArray.take is called by ``Series.__getitem__``, ``.loc``,
1011
+ ``iloc``, when `indices` is a sequence of values. Additionally,
1012
+ it's called by :meth:`Series.reindex`, or any other method
1013
+ that causes realignment, with a `fill_value`.
1014
+
1015
+ Examples
1016
+ --------
1017
+ Here's an example implementation, which relies on casting the
1018
+ extension array to object dtype. This uses the helper method
1019
+ :func:`pandas.api.extensions.take`.
1020
+
1021
+ .. code-block:: python
1022
+
1023
+ def take(self, indices, allow_fill=False, fill_value=None):
1024
+ from pandas.core.algorithms import take
1025
+
1026
+ # If the ExtensionArray is backed by an ndarray, then
1027
+ # just pass that here instead of coercing to object.
1028
+ data = self.astype(object)
1029
+
1030
+ if allow_fill and fill_value is None:
1031
+ fill_value = self.dtype.na_value
1032
+
1033
+ # fill value should always be translated from the scalar
1034
+ # type for the array, to the physical storage type for
1035
+ # the data, before passing to take.
1036
+
1037
+ result = take(data, indices, fill_value=fill_value,
1038
+ allow_fill=allow_fill)
1039
+ return self._from_sequence(result, dtype=self.dtype)
1040
+ """
1041
+ if allow_fill:
1042
+ # With allow_fill being True, negative values in `indices` indicate
1043
+ # missing values and should be set to `fill_value`.
1044
+ indices = np.asarray(indices, dtype=np.intp)
1045
+ validate_indices(indices, len(self._tensor))
1046
+
1047
+ # Check if there are missing indices to fill, otherwise we can
1048
+ # delegate to NumPy ndarray .take().
1049
+ has_missing = np.any(indices < 0)
1050
+ if has_missing:
1051
+ if fill_value is None:
1052
+ fill_value = np.nan
1053
+
1054
+ # Create an array populated with fill value.
1055
+ values = np.full((len(indices),) + self._tensor.shape[1:], fill_value)
1056
+
1057
+ # Put tensors at the given positive indices into array.
1058
+ is_nonneg = indices >= 0
1059
+ np.put(values, np.where(is_nonneg)[0], self._tensor[indices[is_nonneg]])
1060
+
1061
+ return TensorArray(values)
1062
+
1063
+ # Delegate take to NumPy array.
1064
+ values = self._tensor.take(indices, axis=0)
1065
+
1066
+ return TensorArray(values)
1067
+
1068
+ def copy(self) -> "TensorArray":
1069
+ """
1070
+ Return a copy of the array.
1071
+
1072
+ Returns
1073
+ -------
1074
+ ExtensionArray
1075
+ """
1076
+ # TODO(Clark): Copy cached properties.
1077
+ return TensorArray(self._tensor.copy())
1078
+
1079
+ @classmethod
1080
+ def _concat_same_type(cls, to_concat: Sequence["TensorArray"]) -> "TensorArray":
1081
+ """
1082
+ Concatenate multiple array of this dtype.
1083
+
1084
+ Parameters
1085
+ ----------
1086
+ to_concat : sequence of this type
1087
+
1088
+ Returns
1089
+ -------
1090
+ ExtensionArray
1091
+ """
1092
+ should_flatten = False
1093
+ shape = None
1094
+ for a in to_concat:
1095
+ if shape is None:
1096
+ shape = a.dtype.element_shape
1097
+ if a.is_variable_shaped or a.dtype.element_shape != shape:
1098
+ should_flatten = True
1099
+ break
1100
+ if should_flatten:
1101
+ concated = TensorArray(
1102
+ np.array([e for a in to_concat for e in a._tensor], dtype=object)
1103
+ )
1104
+ else:
1105
+ concated = TensorArray(np.concatenate([a._tensor for a in to_concat]))
1106
+ return concated
1107
+
1108
+ def __setitem__(self, key: Union[int, np.ndarray], value: Any) -> None:
1109
+ """
1110
+ Set one or more values inplace.
1111
+
1112
+ This method is not required to satisfy the pandas extension array
1113
+ interface.
1114
+
1115
+ Parameters
1116
+ ----------
1117
+ key : int, ndarray, or slice
1118
+ When called from, e.g. ``Series.__setitem__``, ``key`` will be
1119
+ one of
1120
+
1121
+ * scalar int
1122
+ * ndarray of integers.
1123
+ * boolean ndarray
1124
+ * slice object
1125
+
1126
+ value : ExtensionDtype.type, Sequence[ExtensionDtype.type], or object
1127
+ value or values to be set of ``key``.
1128
+
1129
+ Returns
1130
+ -------
1131
+ None
1132
+ """
1133
+ key = check_array_indexer(self, key)
1134
+ if isinstance(value, TensorArrayElement) or np.isscalar(value):
1135
+ value = np.asarray(value)
1136
+ if isinstance(value, list):
1137
+ value = [
1138
+ np.asarray(v) if isinstance(v, TensorArrayElement) else v for v in value
1139
+ ]
1140
+ if isinstance(value, ABCSeries) and isinstance(value.dtype, TensorDtype):
1141
+ value = value.values
1142
+ if value is None or isinstance(value, Sequence) and len(value) == 0:
1143
+ self._tensor[key] = np.full_like(self._tensor[key], np.nan)
1144
+ elif isinstance(key, (int, slice, np.ndarray)):
1145
+ self._tensor[key] = value
1146
+ else:
1147
+ raise NotImplementedError(
1148
+ f"__setitem__ with key type '{type(key)}' not implemented"
1149
+ )
1150
+
1151
+ def __contains__(self, item) -> bool:
1152
+ """
1153
+ Return for `item in self`.
1154
+ """
1155
+ if isinstance(item, TensorArrayElement):
1156
+ np_item = np.asarray(item)
1157
+ if np_item.size == 1 and np.isnan(np_item).all():
1158
+ return self.isna().any()
1159
+ return super().__contains__(item)
1160
+
1161
+ def __repr__(self):
1162
+ return self._tensor.__repr__()
1163
+
1164
+ def __str__(self):
1165
+ return self._tensor.__str__()
1166
+
1167
+ def _values_for_factorize(self) -> Tuple[np.ndarray, Any]:
1168
+ # TODO(Clark): return self._tensor, np.nan
1169
+ raise NotImplementedError
1170
+
1171
+ def _reduce(self, name: str, skipna: bool = True, **kwargs):
1172
+ """
1173
+ Return a scalar result of performing the reduction operation.
1174
+
1175
+ Parameters
1176
+ ----------
1177
+ name : str
1178
+ Name of the function, supported values are:
1179
+ { any, all, min, max, sum, mean, median, prod,
1180
+ std, var, sem, kurt, skew }.
1181
+ skipna : bool, default True
1182
+ If True, skip NaN values.
1183
+ **kwargs
1184
+ Additional keyword arguments passed to the reduction function.
1185
+ Currently, `ddof` is the only supported kwarg.
1186
+
1187
+ Returns
1188
+ -------
1189
+ scalar
1190
+
1191
+ Raises
1192
+ ------
1193
+ TypeError : subclass does not define reductions
1194
+ """
1195
+ supported_kwargs = ["ddof"]
1196
+ reducer_kwargs = {}
1197
+ for kw in supported_kwargs:
1198
+ try:
1199
+ reducer_kwargs[kw] = kwargs[kw]
1200
+ except KeyError:
1201
+ pass
1202
+ try:
1203
+ return TensorArrayElement(
1204
+ self.SUPPORTED_REDUCERS[name](self._tensor, axis=0, **reducer_kwargs)
1205
+ )
1206
+ except KeyError:
1207
+ raise NotImplementedError(f"'{name}' aggregate not implemented.") from None
1208
+
1209
+ def __array__(self, dtype: np.dtype = None, **kwargs) -> np.ndarray:
1210
+ return np.asarray(self._tensor, dtype=dtype, **kwargs)
1211
+
1212
+ def __array_ufunc__(self, ufunc: Callable, method: str, *inputs, **kwargs):
1213
+ """
1214
+ Supports NumPy ufuncs without requiring sloppy coercion to an
1215
+ ndarray.
1216
+ """
1217
+ out = kwargs.get("out", ())
1218
+ for x in inputs + out:
1219
+ if not isinstance(x, (TensorArray, np.ndarray, numbers.Number)):
1220
+ return NotImplemented
1221
+
1222
+ # Defer to the implementation of the ufunc on unwrapped values.
1223
+ inputs = tuple(x._tensor if isinstance(x, TensorArray) else x for x in inputs)
1224
+ if out:
1225
+ kwargs["out"] = tuple(
1226
+ x._tensor if isinstance(x, TensorArray) else x for x in out
1227
+ )
1228
+ result = getattr(ufunc, method)(*inputs, **kwargs)
1229
+
1230
+ if type(result) is tuple:
1231
+ # Multiple return values.
1232
+ return tuple(type(self)(x) for x in result)
1233
+ elif method == "at":
1234
+ # No return value.
1235
+ return None
1236
+ else:
1237
+ # One return value.
1238
+ return type(self)(result)
1239
+
1240
+ def to_numpy(
1241
+ self,
1242
+ dtype: np.dtype = None,
1243
+ copy: bool = False,
1244
+ na_value: Any = pd.api.extensions.no_default,
1245
+ ):
1246
+ """
1247
+ Convert to a NumPy ndarray.
1248
+
1249
+ .. versionadded:: 1.0.0
1250
+
1251
+ This is similar to :meth:`numpy.asarray`, but may provide additional
1252
+ control over how the conversion is done.
1253
+
1254
+ Parameters
1255
+ ----------
1256
+ dtype : str or numpy.dtype, optional
1257
+ The dtype to pass to :meth:`numpy.asarray`.
1258
+ copy : bool, default False
1259
+ Whether to ensure that the returned value is a not a view on
1260
+ another array. Note that ``copy=False`` does not *ensure* that
1261
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
1262
+ a copy is made, even if not strictly necessary.
1263
+ na_value : Any, optional
1264
+ The value to use for missing values. The default value depends
1265
+ on `dtype` and the type of the array.
1266
+
1267
+ Returns
1268
+ -------
1269
+ numpy.ndarray
1270
+ """
1271
+ if dtype is not None:
1272
+ dtype = pd.api.types.pandas_dtype(dtype)
1273
+ if copy:
1274
+ values = np.array(self._tensor, dtype=dtype, copy=True)
1275
+ else:
1276
+ values = self._tensor.astype(dtype)
1277
+ elif copy:
1278
+ values = self._tensor.copy()
1279
+ else:
1280
+ values = self._tensor
1281
+ return values
1282
+
1283
+ @property
1284
+ def numpy_dtype(self):
1285
+ """
1286
+ Get the dtype of the tensor.
1287
+ :return: The numpy dtype of the backing ndarray
1288
+ """
1289
+ return self._tensor.dtype
1290
+
1291
+ @property
1292
+ def numpy_ndim(self):
1293
+ """
1294
+ Get the number of tensor dimensions.
1295
+ :return: integer for the number of dimensions
1296
+ """
1297
+ return self._tensor.ndim
1298
+
1299
+ @property
1300
+ def numpy_shape(self):
1301
+ """
1302
+ Get the shape of the tensor.
1303
+ :return: A tuple of integers for the numpy shape of the backing ndarray
1304
+ """
1305
+ return self._tensor.shape
1306
+
1307
+ @property
1308
+ def numpy_size(self):
1309
+ """
1310
+ Get the size of the tensor.
1311
+ :return: integer for the number of elements in the tensor
1312
+ """
1313
+ return self._tensor.size
1314
+
1315
+ def astype(self, dtype, copy=True):
1316
+ """
1317
+ Cast to a NumPy array with 'dtype'.
1318
+
1319
+ Parameters
1320
+ ----------
1321
+ dtype : str or dtype
1322
+ Typecode or data-type to which the array is cast.
1323
+ copy : bool, default True
1324
+ Whether to copy the data, even if not necessary. If False,
1325
+ a copy is made only if the old dtype does not match the
1326
+ new dtype.
1327
+
1328
+ Returns
1329
+ -------
1330
+ array : ndarray
1331
+ NumPy ndarray with 'dtype' for its dtype.
1332
+ """
1333
+ dtype = pd.api.types.pandas_dtype(dtype)
1334
+
1335
+ if isinstance(dtype, TensorDtype):
1336
+ values = TensorArray(self._tensor.copy()) if copy else self
1337
+ elif not (
1338
+ pd.api.types.is_object_dtype(dtype) and pd.api.types.is_string_dtype(dtype)
1339
+ ):
1340
+ values = np.array([str(t) for t in self._tensor])
1341
+ if isinstance(dtype, pd.StringDtype):
1342
+ return dtype.construct_array_type()._from_sequence(values, copy=False)
1343
+ else:
1344
+ return values
1345
+ elif pd.api.types.is_object_dtype(dtype):
1346
+ # Interpret astype(object) as "cast to an array of numpy arrays"
1347
+ values = np.empty(len(self), dtype=object)
1348
+ for i in range(len(self)):
1349
+ values[i] = self._tensor[i]
1350
+ else:
1351
+ values = self._tensor.astype(dtype, copy=copy)
1352
+ return values
1353
+
1354
+ def any(self, axis=None, out=None, keepdims=False):
1355
+ """
1356
+ Test whether any array element along a given axis evaluates to True.
1357
+
1358
+ See numpy.any() documentation for more information
1359
+ https://numpy.org/doc/stable/reference/generated/numpy.any.html#numpy.any
1360
+
1361
+ :param axis: Axis or axes along which a logical OR reduction is
1362
+ performed.
1363
+ :param out: Alternate output array in which to place the result.
1364
+ :param keepdims: If this is set to True, the axes which are reduced are
1365
+ left in the result as dimensions with size one.
1366
+ :return: single boolean unless axis is not None else TensorArray
1367
+ """
1368
+ result = self._tensor.any(axis=axis, out=out, keepdims=keepdims)
1369
+ return result if axis is None else TensorArray(result)
1370
+
1371
+ def all(self, axis=None, out=None, keepdims=False):
1372
+ """
1373
+ Test whether all array elements along a given axis evaluate to True.
1374
+
1375
+ :param axis: Axis or axes along which a logical AND reduction is
1376
+ performed.
1377
+ :param out: Alternate output array in which to place the result.
1378
+ :param keepdims: If this is set to True, the axes which are reduced are
1379
+ left in the result as dimensions with size one.
1380
+ :return: single boolean unless axis is not None else TensorArray
1381
+ """
1382
+ result = self._tensor.all(axis=axis, out=out, keepdims=keepdims)
1383
+ return result if axis is None else TensorArray(result)
1384
+
1385
+ def __arrow_array__(self, type=None):
1386
+ """
1387
+ Convert this TensorArray to an ArrowTensorArray extension array.
1388
+
1389
+ This and TensorDtype.__from_arrow__ make up the
1390
+ Pandas extension type + array <--> Arrow extension type + array
1391
+ interoperability protocol. See
1392
+ https://pandas.pydata.org/pandas-docs/stable/development/extending.html#compatibility-with-apache-arrow
1393
+ for more information.
1394
+ """
1395
+ from ray.air.util.tensor_extensions.arrow import (
1396
+ ArrowTensorArray,
1397
+ ArrowVariableShapedTensorArray,
1398
+ )
1399
+
1400
+ if self.is_variable_shaped:
1401
+ return ArrowVariableShapedTensorArray.from_numpy(self._tensor)
1402
+ else:
1403
+ return ArrowTensorArray.from_numpy(self._tensor)
1404
+
1405
+ @property
1406
+ def _is_boolean(self):
1407
+ """
1408
+ Whether this extension array should be considered boolean.
1409
+
1410
+ By default, ExtensionArrays are assumed to be non-numeric.
1411
+ Setting this to True will affect the behavior of several places,
1412
+ e.g.
1413
+
1414
+ * is_bool
1415
+ * boolean indexing
1416
+
1417
+ Returns
1418
+ -------
1419
+ bool
1420
+ """
1421
+ # This is needed to support returning a TensorArray from .isnan().
1422
+ return self.dtype._is_boolean()
1423
+
1424
+
1425
+ # Add operators from the mixin to the TensorArrayElement and TensorArray
1426
+ # classes.
1427
+ TensorArrayElement._add_arithmetic_ops()
1428
+ TensorArrayElement._add_comparison_ops()
1429
+ TensorArrayElement._add_logical_ops()
1430
+ TensorArray._add_arithmetic_ops()
1431
+ TensorArray._add_comparison_ops()
1432
+ TensorArray._add_logical_ops()
1433
+
1434
+
1435
+ @PublicAPI(stability="beta")
1436
+ def column_needs_tensor_extension(s: pd.Series) -> bool:
1437
+ """Return whether the provided pandas Series column needs a tensor extension
1438
+ representation. This tensor extension representation provides more efficient slicing
1439
+ and interop with ML frameworks.
1440
+
1441
+ Args:
1442
+ s: The pandas Series column that may need to be represented using the tensor
1443
+ extension.
1444
+
1445
+ Returns:
1446
+ Whether the provided Series needs a tensor extension representation.
1447
+ """
1448
+ # NOTE: This is an O(1) check.
1449
+ return (
1450
+ s.dtype.type is np.object_ and not s.empty and isinstance(s.iloc[0], np.ndarray)
1451
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/utils.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ from typing import TYPE_CHECKING, Any, Sequence, Union
3
+
4
+ import numpy as np
5
+
6
+ from ray.util import PublicAPI
7
+
8
+ if TYPE_CHECKING:
9
+ from pandas.core.dtypes.generic import ABCSeries
10
+
11
+
12
+ def _is_ndarray_tensor(arr: np.ndarray) -> bool:
13
+ """Return whether the provided NumPy ndarray is comprised of tensors.
14
+
15
+ NOTE: Tensor is defined as a NumPy array such that `len(arr.shape) > 1`
16
+ """
17
+
18
+ # Case of uniform-shaped (ie non-ragged) tensor
19
+ if arr.ndim > 1:
20
+ return True
21
+
22
+ # Case of ragged tensor (as produced by `create_ragged_ndarray` utility)
23
+ elif (
24
+ arr.dtype.type is np.object_ and len(arr) > 0 and isinstance(arr[0], np.ndarray)
25
+ ):
26
+ return True
27
+
28
+ return False
29
+
30
+
31
+ def _is_ndarray_variable_shaped_tensor(arr: np.ndarray) -> bool:
32
+ """Return whether the provided NumPy ndarray is comprised of variable-shaped
33
+ tensors.
34
+
35
+ NOTE: This is an O(rows) check.
36
+ """
37
+ if arr.dtype.type is not np.object_:
38
+ return False
39
+ if len(arr) == 0:
40
+ return False
41
+ if not isinstance(arr[0], np.ndarray):
42
+ return False
43
+ shape = arr[0].shape
44
+ for a in arr[1:]:
45
+ if not isinstance(a, np.ndarray):
46
+ return False
47
+ if a.shape != shape:
48
+ return True
49
+ return True
50
+
51
+
52
+ def _create_possibly_ragged_ndarray(
53
+ values: Union[np.ndarray, "ABCSeries", Sequence[Any]]
54
+ ) -> np.ndarray:
55
+ """
56
+ Create a possibly ragged ndarray.
57
+ Using the np.array() constructor will fail to construct a ragged ndarray that has a
58
+ uniform first dimension (e.g. uniform channel dimension in imagery). This function
59
+ catches this failure and tries a create-and-fill method to construct the ragged
60
+ ndarray.
61
+ """
62
+ try:
63
+ with warnings.catch_warnings():
64
+ # For NumPy < 1.24, constructing a ragged ndarray directly via
65
+ # `np.array(...)` without the `dtype=object` parameter will raise a
66
+ # VisibleDeprecationWarning which we suppress.
67
+ # More details: https://stackoverflow.com/q/63097829
68
+ warnings.simplefilter("ignore", category=np.VisibleDeprecationWarning)
69
+ return np.array(values, copy=False)
70
+ except ValueError as e:
71
+ # Constructing a ragged ndarray directly via `np.array(...)`
72
+ # without the `dtype=object` parameter will raise a ValueError.
73
+ # For NumPy < 1.24, the message is of the form:
74
+ # "could not broadcast input array from shape..."
75
+ # For NumPy >= 1.24, the message is of the form:
76
+ # "The requested array has an inhomogeneous shape..."
77
+ # More details: https://github.com/numpy/numpy/pull/22004
78
+ error_str = str(e)
79
+ if (
80
+ "could not broadcast input array from shape" in error_str
81
+ or "The requested array has an inhomogeneous shape" in error_str
82
+ ):
83
+ # Fall back to strictly creating a ragged ndarray.
84
+ return create_ragged_ndarray(values)
85
+ else:
86
+ # Re-raise original error if the failure wasn't a broadcast error.
87
+ raise e from None
88
+
89
+
90
+ @PublicAPI(stability="alpha")
91
+ def create_ragged_ndarray(values: Sequence[Any]) -> np.ndarray:
92
+ """Create an array that contains arrays of different length
93
+
94
+ If you're working with variable-length arrays like images, use this function to
95
+ create ragged arrays instead of ``np.array``.
96
+
97
+ .. note::
98
+ ``np.array`` fails to construct ragged arrays if the input arrays have a uniform
99
+ first dimension:
100
+
101
+ .. testsetup::
102
+
103
+ import numpy as np
104
+ from ray.air.util.tensor_extensions.utils import create_ragged_ndarray
105
+
106
+ .. doctest::
107
+
108
+ >>> values = [np.zeros((3, 1)), np.zeros((3, 2))]
109
+ >>> np.array(values, dtype=object)
110
+ Traceback (most recent call last):
111
+ ...
112
+ ValueError: could not broadcast input array from shape (3,1) into shape (3,)
113
+ >>> create_ragged_ndarray(values)
114
+ array([array([[0.],
115
+ [0.],
116
+ [0.]]), array([[0., 0.],
117
+ [0., 0.],
118
+ [0., 0.]])], dtype=object)
119
+
120
+ Or if you're creating a ragged array from a single array:
121
+
122
+ .. doctest::
123
+
124
+ >>> values = [np.zeros((3, 1))]
125
+ >>> np.array(values, dtype=object)[0].dtype
126
+ dtype('O')
127
+ >>> create_ragged_ndarray(values)[0].dtype
128
+ dtype('float64')
129
+
130
+ ``create_ragged_ndarray`` avoids the limitations of ``np.array`` by creating an
131
+ empty array and filling it with pointers to the variable-length arrays.
132
+ """ # noqa: E501
133
+ # Create an empty object-dtyped 1D array.
134
+ arr = np.empty(len(values), dtype=object)
135
+ # Try to fill the 1D array of pointers with the (ragged) tensors.
136
+ arr[:] = list(values)
137
+ return arr
infer_4_37_2/lib/python3.10/site-packages/ray/core/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/agent_manager_pb2.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Generated by the protocol buffer compiler. DO NOT EDIT!
3
+ # source: src/ray/protobuf/agent_manager.proto
4
+ """Generated protocol buffer code."""
5
+ from google.protobuf.internal import enum_type_wrapper
6
+ from google.protobuf import descriptor as _descriptor
7
+ from google.protobuf import descriptor_pool as _descriptor_pool
8
+ from google.protobuf import message as _message
9
+ from google.protobuf import reflection as _reflection
10
+ from google.protobuf import symbol_database as _symbol_database
11
+ # @@protoc_insertion_point(imports)
12
+
13
+ _sym_db = _symbol_database.Default()
14
+
15
+
16
+
17
+
18
+ DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$src/ray/protobuf/agent_manager.proto\x12\x07ray.rpc*F\n\x0e\x41gentRpcStatus\x12\x17\n\x13\x41GENT_RPC_STATUS_OK\x10\x00\x12\x1b\n\x17\x41GENT_RPC_STATUS_FAILED\x10\x01\x42\x03\xf8\x01\x01\x62\x06proto3')
19
+
20
+ _AGENTRPCSTATUS = DESCRIPTOR.enum_types_by_name['AgentRpcStatus']
21
+ AgentRpcStatus = enum_type_wrapper.EnumTypeWrapper(_AGENTRPCSTATUS)
22
+ AGENT_RPC_STATUS_OK = 0
23
+ AGENT_RPC_STATUS_FAILED = 1
24
+
25
+
26
+ if _descriptor._USE_C_DESCRIPTORS == False:
27
+
28
+ DESCRIPTOR._options = None
29
+ DESCRIPTOR._serialized_options = b'\370\001\001'
30
+ _AGENTRPCSTATUS._serialized_start=49
31
+ _AGENTRPCSTATUS._serialized_end=119
32
+ # @@protoc_insertion_point(module_scope)
infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/autoscaler_pb2_grpc.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+
5
+ from . import autoscaler_pb2 as src_dot_ray_dot_protobuf_dot_autoscaler__pb2
6
+
7
+
8
+ class AutoscalerStateServiceStub(object):
9
+ """Missing associated documentation comment in .proto file."""
10
+
11
+ def __init__(self, channel):
12
+ """Constructor.
13
+
14
+ Args:
15
+ channel: A grpc.Channel.
16
+ """
17
+ self.GetClusterResourceState = channel.unary_unary(
18
+ '/ray.rpc.autoscaler.AutoscalerStateService/GetClusterResourceState',
19
+ request_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateRequest.SerializeToString,
20
+ response_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateReply.FromString,
21
+ )
22
+ self.ReportAutoscalingState = channel.unary_unary(
23
+ '/ray.rpc.autoscaler.AutoscalerStateService/ReportAutoscalingState',
24
+ request_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateRequest.SerializeToString,
25
+ response_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateReply.FromString,
26
+ )
27
+ self.RequestClusterResourceConstraint = channel.unary_unary(
28
+ '/ray.rpc.autoscaler.AutoscalerStateService/RequestClusterResourceConstraint',
29
+ request_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintRequest.SerializeToString,
30
+ response_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintReply.FromString,
31
+ )
32
+ self.GetClusterStatus = channel.unary_unary(
33
+ '/ray.rpc.autoscaler.AutoscalerStateService/GetClusterStatus',
34
+ request_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusRequest.SerializeToString,
35
+ response_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusReply.FromString,
36
+ )
37
+ self.DrainNode = channel.unary_unary(
38
+ '/ray.rpc.autoscaler.AutoscalerStateService/DrainNode',
39
+ request_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeRequest.SerializeToString,
40
+ response_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeReply.FromString,
41
+ )
42
+
43
+
44
+ class AutoscalerStateServiceServicer(object):
45
+ """Missing associated documentation comment in .proto file."""
46
+
47
+ def GetClusterResourceState(self, request, context):
48
+ """Missing associated documentation comment in .proto file."""
49
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
50
+ context.set_details('Method not implemented!')
51
+ raise NotImplementedError('Method not implemented!')
52
+
53
+ def ReportAutoscalingState(self, request, context):
54
+ """Missing associated documentation comment in .proto file."""
55
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
56
+ context.set_details('Method not implemented!')
57
+ raise NotImplementedError('Method not implemented!')
58
+
59
+ def RequestClusterResourceConstraint(self, request, context):
60
+ """Missing associated documentation comment in .proto file."""
61
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
62
+ context.set_details('Method not implemented!')
63
+ raise NotImplementedError('Method not implemented!')
64
+
65
+ def GetClusterStatus(self, request, context):
66
+ """Missing associated documentation comment in .proto file."""
67
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
68
+ context.set_details('Method not implemented!')
69
+ raise NotImplementedError('Method not implemented!')
70
+
71
+ def DrainNode(self, request, context):
72
+ """Missing associated documentation comment in .proto file."""
73
+ context.set_code(grpc.StatusCode.UNIMPLEMENTED)
74
+ context.set_details('Method not implemented!')
75
+ raise NotImplementedError('Method not implemented!')
76
+
77
+
78
+ def add_AutoscalerStateServiceServicer_to_server(servicer, server):
79
+ rpc_method_handlers = {
80
+ 'GetClusterResourceState': grpc.unary_unary_rpc_method_handler(
81
+ servicer.GetClusterResourceState,
82
+ request_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateRequest.FromString,
83
+ response_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateReply.SerializeToString,
84
+ ),
85
+ 'ReportAutoscalingState': grpc.unary_unary_rpc_method_handler(
86
+ servicer.ReportAutoscalingState,
87
+ request_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateRequest.FromString,
88
+ response_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateReply.SerializeToString,
89
+ ),
90
+ 'RequestClusterResourceConstraint': grpc.unary_unary_rpc_method_handler(
91
+ servicer.RequestClusterResourceConstraint,
92
+ request_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintRequest.FromString,
93
+ response_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintReply.SerializeToString,
94
+ ),
95
+ 'GetClusterStatus': grpc.unary_unary_rpc_method_handler(
96
+ servicer.GetClusterStatus,
97
+ request_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusRequest.FromString,
98
+ response_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusReply.SerializeToString,
99
+ ),
100
+ 'DrainNode': grpc.unary_unary_rpc_method_handler(
101
+ servicer.DrainNode,
102
+ request_deserializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeRequest.FromString,
103
+ response_serializer=src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeReply.SerializeToString,
104
+ ),
105
+ }
106
+ generic_handler = grpc.method_handlers_generic_handler(
107
+ 'ray.rpc.autoscaler.AutoscalerStateService', rpc_method_handlers)
108
+ server.add_generic_rpc_handlers((generic_handler,))
109
+
110
+
111
+ # This class is part of an EXPERIMENTAL API.
112
+ class AutoscalerStateService(object):
113
+ """Missing associated documentation comment in .proto file."""
114
+
115
+ @staticmethod
116
+ def GetClusterResourceState(request,
117
+ target,
118
+ options=(),
119
+ channel_credentials=None,
120
+ call_credentials=None,
121
+ insecure=False,
122
+ compression=None,
123
+ wait_for_ready=None,
124
+ timeout=None,
125
+ metadata=None):
126
+ return grpc.experimental.unary_unary(request, target, '/ray.rpc.autoscaler.AutoscalerStateService/GetClusterResourceState',
127
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateRequest.SerializeToString,
128
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterResourceStateReply.FromString,
129
+ options, channel_credentials,
130
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
131
+
132
+ @staticmethod
133
+ def ReportAutoscalingState(request,
134
+ target,
135
+ options=(),
136
+ channel_credentials=None,
137
+ call_credentials=None,
138
+ insecure=False,
139
+ compression=None,
140
+ wait_for_ready=None,
141
+ timeout=None,
142
+ metadata=None):
143
+ return grpc.experimental.unary_unary(request, target, '/ray.rpc.autoscaler.AutoscalerStateService/ReportAutoscalingState',
144
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateRequest.SerializeToString,
145
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.ReportAutoscalingStateReply.FromString,
146
+ options, channel_credentials,
147
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
148
+
149
+ @staticmethod
150
+ def RequestClusterResourceConstraint(request,
151
+ target,
152
+ options=(),
153
+ channel_credentials=None,
154
+ call_credentials=None,
155
+ insecure=False,
156
+ compression=None,
157
+ wait_for_ready=None,
158
+ timeout=None,
159
+ metadata=None):
160
+ return grpc.experimental.unary_unary(request, target, '/ray.rpc.autoscaler.AutoscalerStateService/RequestClusterResourceConstraint',
161
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintRequest.SerializeToString,
162
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.RequestClusterResourceConstraintReply.FromString,
163
+ options, channel_credentials,
164
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
165
+
166
+ @staticmethod
167
+ def GetClusterStatus(request,
168
+ target,
169
+ options=(),
170
+ channel_credentials=None,
171
+ call_credentials=None,
172
+ insecure=False,
173
+ compression=None,
174
+ wait_for_ready=None,
175
+ timeout=None,
176
+ metadata=None):
177
+ return grpc.experimental.unary_unary(request, target, '/ray.rpc.autoscaler.AutoscalerStateService/GetClusterStatus',
178
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusRequest.SerializeToString,
179
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.GetClusterStatusReply.FromString,
180
+ options, channel_credentials,
181
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
182
+
183
+ @staticmethod
184
+ def DrainNode(request,
185
+ target,
186
+ options=(),
187
+ channel_credentials=None,
188
+ call_credentials=None,
189
+ insecure=False,
190
+ compression=None,
191
+ wait_for_ready=None,
192
+ timeout=None,
193
+ metadata=None):
194
+ return grpc.experimental.unary_unary(request, target, '/ray.rpc.autoscaler.AutoscalerStateService/DrainNode',
195
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeRequest.SerializeToString,
196
+ src_dot_ray_dot_protobuf_dot_autoscaler__pb2.DrainNodeReply.FromString,
197
+ options, channel_credentials,
198
+ insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
infer_4_37_2/lib/python3.10/site-packages/ray/core/generated/common_pb2_grpc.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2
+ """Client and server classes corresponding to protobuf-defined services."""
3
+ import grpc
4
+