ZTWHHH commited on
Commit
e3776ef
·
verified ·
1 Parent(s): 7c29ffd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc +0 -0
  2. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/aggregate.cpython-310.pyc +0 -0
  3. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/block.cpython-310.pyc +0 -0
  4. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc +0 -0
  5. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/exceptions.cpython-310.pyc +0 -0
  6. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/grouped_data.cpython-310.pyc +0 -0
  7. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc +0 -0
  8. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/preprocessor.cpython-310.pyc +0 -0
  9. infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/random_access_dataset.cpython-310.pyc +0 -0
  10. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py +16 -0
  11. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc +0 -0
  12. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc +0 -0
  13. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py +79 -0
  14. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py +31 -0
  15. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py +58 -0
  16. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py +29 -0
  17. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py +34 -0
  18. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py +0 -0
  19. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc +0 -0
  20. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc +0 -0
  21. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc +0 -0
  22. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc +0 -0
  23. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc +0 -0
  24. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc +0 -0
  25. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc +0 -0
  26. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc +0 -0
  27. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc +0 -0
  28. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc +0 -0
  29. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py +163 -0
  30. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py +20 -0
  31. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py +105 -0
  32. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py +74 -0
  33. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py +293 -0
  34. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py +60 -0
  35. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py +80 -0
  36. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py +95 -0
  37. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py +35 -0
  38. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc +0 -0
  39. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc +0 -0
  40. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc +0 -0
  41. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc +0 -0
  42. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc +0 -0
  43. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc +0 -0
  44. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc +0 -0
  45. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc +0 -0
  46. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py +30 -0
  47. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py +133 -0
  48. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py +464 -0
  49. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py +77 -0
  50. infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py +132 -0
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.74 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/aggregate.cpython-310.pyc ADDED
Binary file (3.43 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/block.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc ADDED
Binary file (15.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/grouped_data.cpython-310.pyc ADDED
Binary file (21.4 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc ADDED
Binary file (37.2 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/preprocessor.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/random_access_dataset.cpython-310.pyc ADDED
Binary file (10.8 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .logical_operator import LogicalOperator
2
+ from .logical_plan import LogicalPlan
3
+ from .operator import Operator
4
+ from .optimizer import Optimizer, Rule
5
+ from .physical_plan import PhysicalPlan
6
+ from .plan import Plan
7
+
8
+ __all__ = [
9
+ "LogicalOperator",
10
+ "LogicalPlan",
11
+ "Operator",
12
+ "Optimizer",
13
+ "PhysicalPlan",
14
+ "Plan",
15
+ "Rule",
16
+ ]
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, Iterator, List, Optional
2
+
3
+ from .operator import Operator
4
+ from ray.data.block import BlockMetadata
5
+
6
+ if TYPE_CHECKING:
7
+ from ray.data._internal.execution.interfaces import RefBundle
8
+
9
+
10
+ class LogicalOperator(Operator):
11
+ """Abstract class for logical operators.
12
+
13
+ A logical operator describes transformation, and later is converted into
14
+ physical operator.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ name: str,
20
+ input_dependencies: List["LogicalOperator"],
21
+ num_outputs: Optional[int] = None,
22
+ ):
23
+ super().__init__(
24
+ name,
25
+ input_dependencies,
26
+ )
27
+ for x in input_dependencies:
28
+ assert isinstance(x, LogicalOperator), x
29
+ self._num_outputs = num_outputs
30
+
31
+ def estimated_num_outputs(self) -> Optional[int]:
32
+ """Returns the estimated number of blocks that
33
+ would be outputted by this logical operator.
34
+
35
+ This method does not execute the plan, so it does not take into consideration
36
+ block splitting. This method only considers high-level block constraints like
37
+ `Dataset.repartition(num_blocks=X)`. A more accurate estimation can be given by
38
+ `PhysicalOperator.num_outputs_total()` during execution.
39
+ """
40
+ if self._num_outputs is not None:
41
+ return self._num_outputs
42
+ elif len(self._input_dependencies) == 1:
43
+ return self._input_dependencies[0].estimated_num_outputs()
44
+ return None
45
+
46
+ # Override the following 3 methods to correct type hints.
47
+
48
+ @property
49
+ def input_dependencies(self) -> List["LogicalOperator"]:
50
+ return super().input_dependencies # type: ignore
51
+
52
+ @property
53
+ def output_dependencies(self) -> List["LogicalOperator"]:
54
+ return super().output_dependencies # type: ignore
55
+
56
+ def post_order_iter(self) -> Iterator["LogicalOperator"]:
57
+ return super().post_order_iter() # type: ignore
58
+
59
+ def output_data(self) -> Optional[List["RefBundle"]]:
60
+ """The output data of this operator, or ``None`` if not known."""
61
+ return None
62
+
63
+ def aggregate_output_metadata(self) -> BlockMetadata:
64
+ """A ``BlockMetadata`` that represents the aggregate metadata of the outputs.
65
+
66
+ This method is used by methods like :meth:`~ray.data.Dataset.schema` to
67
+ efficiently return metadata.
68
+ """
69
+ return BlockMetadata(None, None, None, None, None)
70
+
71
+ def is_lineage_serializable(self) -> bool:
72
+ """Returns whether the lineage of this operator can be serialized.
73
+
74
+ An operator is lineage serializable if you can serialize it on one machine and
75
+ deserialize it on another without losing information. Operators that store
76
+ object references (e.g., ``InputData``) aren't lineage serializable because the
77
+ objects aren't available on the deserialized machine.
78
+ """
79
+ return True
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, List
2
+
3
+ from .logical_operator import LogicalOperator
4
+ from .plan import Plan
5
+
6
+ if TYPE_CHECKING:
7
+ from ray.data import DataContext
8
+
9
+
10
+ class LogicalPlan(Plan):
11
+ """The plan with a DAG of logical operators."""
12
+
13
+ def __init__(self, dag: LogicalOperator, context: "DataContext"):
14
+ super().__init__(context)
15
+ self._dag = dag
16
+
17
+ @property
18
+ def dag(self) -> LogicalOperator:
19
+ """Get the DAG of logical operators."""
20
+ return self._dag
21
+
22
+ def sources(self) -> List[LogicalOperator]:
23
+ """List of operators that are sources for this plan's DAG."""
24
+ # If an operator has no input dependencies, it's a source.
25
+ if not any(self._dag.input_dependencies):
26
+ return [self._dag]
27
+
28
+ sources = []
29
+ for op in self._dag.input_dependencies:
30
+ sources.extend(LogicalPlan(op, self._context).sources())
31
+ return sources
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Iterator, List
2
+
3
+
4
+ class Operator:
5
+ """Abstract class for operators.
6
+
7
+ Operators live on the driver side of the Dataset only.
8
+ """
9
+
10
+ def __init__(
11
+ self,
12
+ name: str,
13
+ input_dependencies: List["Operator"],
14
+ ):
15
+ self._name = name
16
+ self._input_dependencies = input_dependencies
17
+ self._output_dependencies = []
18
+ for x in input_dependencies:
19
+ assert isinstance(x, Operator), x
20
+ x._output_dependencies.append(self)
21
+
22
+ @property
23
+ def name(self) -> str:
24
+ return self._name
25
+
26
+ @property
27
+ def input_dependencies(self) -> List["Operator"]:
28
+ """List of operators that provide inputs for this operator."""
29
+ assert hasattr(
30
+ self, "_input_dependencies"
31
+ ), "Operator.__init__() was not called."
32
+ return self._input_dependencies
33
+
34
+ @property
35
+ def output_dependencies(self) -> List["Operator"]:
36
+ """List of operators that consume outputs from this operator."""
37
+ assert hasattr(
38
+ self, "_output_dependencies"
39
+ ), "Operator.__init__() was not called."
40
+ return self._output_dependencies
41
+
42
+ def post_order_iter(self) -> Iterator["Operator"]:
43
+ """Depth-first traversal of this operator and its input dependencies."""
44
+ for op in self.input_dependencies:
45
+ yield from op.post_order_iter()
46
+ yield self
47
+
48
+ def __repr__(self) -> str:
49
+ if self.input_dependencies:
50
+ out_str = ", ".join([str(x) for x in self.input_dependencies])
51
+ out_str += " -> "
52
+ else:
53
+ out_str = ""
54
+ out_str += f"{self.__class__.__name__}[{self._name}]"
55
+ return out_str
56
+
57
+ def __str__(self) -> str:
58
+ return repr(self)
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from .plan import Plan
4
+
5
+
6
+ class Rule:
7
+ """Abstract class for optimization rule."""
8
+
9
+ def apply(self, plan: Plan) -> Plan:
10
+ """Apply the optimization rule to the execution plan."""
11
+ raise NotImplementedError
12
+
13
+
14
+ class Optimizer:
15
+ """Abstract class for optimizers.
16
+
17
+ An optimizers transforms a DAG of operators with a list of predefined rules.
18
+ """
19
+
20
+ @property
21
+ def rules(self) -> List[Rule]:
22
+ """List of predefined rules for this optimizer."""
23
+ raise NotImplementedError
24
+
25
+ def optimize(self, plan: Plan) -> Plan:
26
+ """Optimize operators with a list of rules."""
27
+ for rule in self.rules:
28
+ plan = rule.apply(plan)
29
+ return plan
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import TYPE_CHECKING, Dict
2
+
3
+ from .logical_operator import LogicalOperator
4
+ from .plan import Plan
5
+
6
+ if TYPE_CHECKING:
7
+ from ray.data import DataContext
8
+ from ray.data._internal.execution.interfaces import PhysicalOperator
9
+
10
+
11
+ class PhysicalPlan(Plan):
12
+ """The plan with a DAG of physical operators."""
13
+
14
+ def __init__(
15
+ self,
16
+ dag: "PhysicalOperator",
17
+ op_map: Dict["PhysicalOperator", LogicalOperator],
18
+ context: "DataContext",
19
+ ):
20
+ super().__init__(context)
21
+ self._dag = dag
22
+ self._op_map = op_map
23
+
24
+ @property
25
+ def dag(self) -> "PhysicalOperator":
26
+ """Get the DAG of physical operators."""
27
+ return self._dag
28
+
29
+ @property
30
+ def op_map(self) -> Dict["PhysicalOperator", LogicalOperator]:
31
+ """
32
+ Get a mapping from physical operators to their corresponding logical operator.
33
+ """
34
+ return self._op_map
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py ADDED
File without changes
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (194 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc ADDED
Binary file (1.06 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc ADDED
Binary file (3.54 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc ADDED
Binary file (9.9 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc ADDED
Binary file (2.24 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, List, Optional
2
+
3
+ from ray.data._internal.logical.interfaces import LogicalOperator
4
+ from ray.data._internal.planner.exchange.interfaces import ExchangeTaskSpec
5
+ from ray.data._internal.planner.exchange.shuffle_task_spec import ShuffleTaskSpec
6
+ from ray.data._internal.planner.exchange.sort_task_spec import SortKey, SortTaskSpec
7
+ from ray.data.aggregate import AggregateFn
8
+ from ray.data.block import BlockMetadata
9
+
10
+
11
+ class AbstractAllToAll(LogicalOperator):
12
+ """Abstract class for logical operators should be converted to physical
13
+ AllToAllOperator.
14
+ """
15
+
16
+ def __init__(
17
+ self,
18
+ name: str,
19
+ input_op: LogicalOperator,
20
+ num_outputs: Optional[int] = None,
21
+ sub_progress_bar_names: Optional[List[str]] = None,
22
+ ray_remote_args: Optional[Dict[str, Any]] = None,
23
+ ):
24
+ """
25
+ Args:
26
+ name: Name for this operator. This is the name that will appear when
27
+ inspecting the logical plan of a Dataset.
28
+ input_op: The operator preceding this operator in the plan DAG. The outputs
29
+ of `input_op` will be the inputs to this operator.
30
+ num_outputs: The number of expected output bundles outputted by this
31
+ operator.
32
+ ray_remote_args: Args to provide to ray.remote.
33
+ """
34
+ super().__init__(name, [input_op], num_outputs)
35
+ self._num_outputs = num_outputs
36
+ self._ray_remote_args = ray_remote_args or {}
37
+ self._sub_progress_bar_names = sub_progress_bar_names
38
+
39
+
40
+ class RandomizeBlocks(AbstractAllToAll):
41
+ """Logical operator for randomize_block_order."""
42
+
43
+ def __init__(
44
+ self,
45
+ input_op: LogicalOperator,
46
+ seed: Optional[int] = None,
47
+ ):
48
+ super().__init__(
49
+ "RandomizeBlockOrder",
50
+ input_op,
51
+ )
52
+ self._seed = seed
53
+
54
+ def aggregate_output_metadata(self) -> BlockMetadata:
55
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
56
+ return self._input_dependencies[0].aggregate_output_metadata()
57
+
58
+
59
+ class RandomShuffle(AbstractAllToAll):
60
+ """Logical operator for random_shuffle."""
61
+
62
+ def __init__(
63
+ self,
64
+ input_op: LogicalOperator,
65
+ name: str = "RandomShuffle",
66
+ seed: Optional[int] = None,
67
+ ray_remote_args: Optional[Dict[str, Any]] = None,
68
+ ):
69
+ super().__init__(
70
+ name,
71
+ input_op,
72
+ sub_progress_bar_names=[
73
+ ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME,
74
+ ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME,
75
+ ],
76
+ ray_remote_args=ray_remote_args,
77
+ )
78
+ self._seed = seed
79
+
80
+ def aggregate_output_metadata(self) -> BlockMetadata:
81
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
82
+ return self._input_dependencies[0].aggregate_output_metadata()
83
+
84
+
85
+ class Repartition(AbstractAllToAll):
86
+ """Logical operator for repartition."""
87
+
88
+ def __init__(
89
+ self,
90
+ input_op: LogicalOperator,
91
+ num_outputs: int,
92
+ shuffle: bool,
93
+ ):
94
+ if shuffle:
95
+ sub_progress_bar_names = [
96
+ ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME,
97
+ ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME,
98
+ ]
99
+ else:
100
+ sub_progress_bar_names = [
101
+ ShuffleTaskSpec.SPLIT_REPARTITION_SUB_PROGRESS_BAR_NAME,
102
+ ]
103
+ super().__init__(
104
+ "Repartition",
105
+ input_op,
106
+ num_outputs=num_outputs,
107
+ sub_progress_bar_names=sub_progress_bar_names,
108
+ )
109
+ self._shuffle = shuffle
110
+
111
+ def aggregate_output_metadata(self) -> BlockMetadata:
112
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
113
+ return self._input_dependencies[0].aggregate_output_metadata()
114
+
115
+
116
+ class Sort(AbstractAllToAll):
117
+ """Logical operator for sort."""
118
+
119
+ def __init__(
120
+ self,
121
+ input_op: LogicalOperator,
122
+ sort_key: SortKey,
123
+ batch_format: Optional[str] = "default",
124
+ ):
125
+ super().__init__(
126
+ "Sort",
127
+ input_op,
128
+ sub_progress_bar_names=[
129
+ SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME,
130
+ ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME,
131
+ ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME,
132
+ ],
133
+ )
134
+ self._sort_key = sort_key
135
+ self._batch_format = batch_format
136
+
137
+ def aggregate_output_metadata(self) -> BlockMetadata:
138
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
139
+ return self._input_dependencies[0].aggregate_output_metadata()
140
+
141
+
142
+ class Aggregate(AbstractAllToAll):
143
+ """Logical operator for aggregate."""
144
+
145
+ def __init__(
146
+ self,
147
+ input_op: LogicalOperator,
148
+ key: Optional[str],
149
+ aggs: List[AggregateFn],
150
+ batch_format: Optional[str] = "default",
151
+ ):
152
+ super().__init__(
153
+ "Aggregate",
154
+ input_op,
155
+ sub_progress_bar_names=[
156
+ SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME,
157
+ ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME,
158
+ ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME,
159
+ ],
160
+ )
161
+ self._key = key
162
+ self._aggs = aggs
163
+ self._batch_format = batch_format
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+
3
+ from ray.data._internal.logical.interfaces import LogicalOperator
4
+
5
+
6
+ class Count(LogicalOperator):
7
+ """Logical operator that represents counting the number of rows in inputs.
8
+
9
+ Physical operators that implement this logical operator should produce one or more
10
+ rows with a single column named `Count.COLUMN_NAME`. When you sum the values in
11
+ this column, you should get the total number of rows in the dataset.
12
+ """
13
+
14
+ COLUMN_NAME = "__num_rows"
15
+
16
+ def __init__(
17
+ self,
18
+ input_dependencies: List["LogicalOperator"],
19
+ ):
20
+ super().__init__("Count", input_dependencies)
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ import functools
3
+ from typing import TYPE_CHECKING, List, Optional, Union
4
+
5
+ from ray.data._internal.execution.interfaces import RefBundle
6
+ from ray.data._internal.logical.interfaces import LogicalOperator
7
+ from ray.data._internal.util import unify_block_metadata_schema
8
+ from ray.data.block import Block, BlockMetadata
9
+ from ray.types import ObjectRef
10
+
11
+ if TYPE_CHECKING:
12
+ import pyarrow as pa
13
+
14
+ ArrowTable = Union["pa.Table", bytes]
15
+
16
+
17
+ class AbstractFrom(LogicalOperator, metaclass=abc.ABCMeta):
18
+ """Abstract logical operator for `from_*`."""
19
+
20
+ def __init__(
21
+ self,
22
+ input_blocks: List[ObjectRef[Block]],
23
+ input_metadata: List[BlockMetadata],
24
+ ):
25
+ super().__init__(self.__class__.__name__, [], len(input_blocks))
26
+ assert len(input_blocks) == len(input_metadata), (
27
+ len(input_blocks),
28
+ len(input_metadata),
29
+ )
30
+ # `owns_blocks` is False because this op may be shared by multiple Datasets.
31
+ self._input_data = [
32
+ RefBundle([(input_blocks[i], input_metadata[i])], owns_blocks=False)
33
+ for i in range(len(input_blocks))
34
+ ]
35
+
36
+ @property
37
+ def input_data(self) -> List[RefBundle]:
38
+ return self._input_data
39
+
40
+ def output_data(self) -> Optional[List[RefBundle]]:
41
+ return self._input_data
42
+
43
+ def aggregate_output_metadata(self) -> BlockMetadata:
44
+ return self._cached_output_metadata
45
+
46
+ @functools.cached_property
47
+ def _cached_output_metadata(self) -> BlockMetadata:
48
+ return BlockMetadata(
49
+ num_rows=self._num_rows(),
50
+ size_bytes=self._size_bytes(),
51
+ schema=self._schema(),
52
+ input_files=None,
53
+ exec_stats=None,
54
+ )
55
+
56
+ def _num_rows(self):
57
+ if all(bundle.num_rows() is not None for bundle in self._input_data):
58
+ return sum(bundle.num_rows() for bundle in self._input_data)
59
+ else:
60
+ return None
61
+
62
+ def _size_bytes(self):
63
+ metadata = [m for bundle in self._input_data for m in bundle.metadata]
64
+ if all(m.size_bytes is not None for m in metadata):
65
+ return sum(m.size_bytes for m in metadata)
66
+ else:
67
+ return None
68
+
69
+ def _schema(self):
70
+ metadata = [m for bundle in self._input_data for m in bundle.metadata]
71
+ return unify_block_metadata_schema(metadata)
72
+
73
+ def is_lineage_serializable(self) -> bool:
74
+ # This operator isn't serializable because it contains ObjectRefs.
75
+ return False
76
+
77
+
78
+ class FromItems(AbstractFrom):
79
+ """Logical operator for `from_items`."""
80
+
81
+ pass
82
+
83
+
84
+ class FromBlocks(AbstractFrom):
85
+ """Logical operator for `from_blocks`."""
86
+
87
+ pass
88
+
89
+
90
+ class FromNumpy(AbstractFrom):
91
+ """Logical operator for `from_numpy`."""
92
+
93
+ pass
94
+
95
+
96
+ class FromArrow(AbstractFrom):
97
+ """Logical operator for `from_arrow`."""
98
+
99
+ pass
100
+
101
+
102
+ class FromPandas(AbstractFrom):
103
+ """Logical operator for `from_pandas`."""
104
+
105
+ pass
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import Callable, List, Optional
3
+
4
+ from ray.data._internal.execution.interfaces import RefBundle
5
+ from ray.data._internal.logical.interfaces import LogicalOperator
6
+ from ray.data._internal.util import unify_block_metadata_schema
7
+ from ray.data.block import BlockMetadata
8
+
9
+
10
+ class InputData(LogicalOperator):
11
+ """Logical operator for input data.
12
+
13
+ This may hold cached blocks from a previous Dataset execution, or
14
+ the arguments for read tasks.
15
+ """
16
+
17
+ def __init__(
18
+ self,
19
+ input_data: Optional[List[RefBundle]] = None,
20
+ input_data_factory: Optional[Callable[[int], List[RefBundle]]] = None,
21
+ ):
22
+ assert (input_data is None) != (
23
+ input_data_factory is None
24
+ ), "Only one of input_data and input_data_factory should be set."
25
+ super().__init__(
26
+ "InputData", [], len(input_data) if input_data is not None else None
27
+ )
28
+ self.input_data = input_data
29
+ self.input_data_factory = input_data_factory
30
+
31
+ def output_data(self) -> Optional[List[RefBundle]]:
32
+ if self.input_data is None:
33
+ return None
34
+ return self.input_data
35
+
36
+ def aggregate_output_metadata(self) -> BlockMetadata:
37
+ return self._cached_output_metadata
38
+
39
+ @functools.cached_property
40
+ def _cached_output_metadata(self) -> BlockMetadata:
41
+ if self.input_data is None:
42
+ return BlockMetadata(None, None, None, None, None)
43
+
44
+ return BlockMetadata(
45
+ num_rows=self._num_rows(),
46
+ size_bytes=self._size_bytes(),
47
+ schema=self._schema(),
48
+ input_files=None,
49
+ exec_stats=None,
50
+ )
51
+
52
+ def _num_rows(self):
53
+ assert self.input_data is not None
54
+ if all(bundle.num_rows() is not None for bundle in self.input_data):
55
+ return sum(bundle.num_rows() for bundle in self.input_data)
56
+ else:
57
+ return None
58
+
59
+ def _size_bytes(self):
60
+ assert self.input_data is not None
61
+ metadata = [m for bundle in self.input_data for m in bundle.metadata]
62
+ if all(m.size_bytes is not None for m in metadata):
63
+ return sum(m.size_bytes for m in metadata)
64
+ else:
65
+ return None
66
+
67
+ def _schema(self):
68
+ assert self.input_data is not None
69
+ metadata = [m for bundle in self.input_data for m in bundle.metadata]
70
+ return unify_block_metadata_schema(metadata)
71
+
72
+ def is_lineage_serializable(self) -> bool:
73
+ # This operator isn't serializable because it contains ObjectRefs.
74
+ return False
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ import logging
3
+ from typing import Any, Callable, Dict, Iterable, List, Optional, Union
4
+
5
+ from ray.data._internal.compute import ComputeStrategy, TaskPoolStrategy
6
+ from ray.data._internal.logical.interfaces import LogicalOperator
7
+ from ray.data._internal.logical.operators.one_to_one_operator import AbstractOneToOne
8
+ from ray.data.block import UserDefinedFunction
9
+ from ray.data.context import DEFAULT_BATCH_SIZE
10
+ from ray.data.preprocessor import Preprocessor
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class AbstractMap(AbstractOneToOne):
16
+ """Abstract class for logical operators that should be converted to physical
17
+ MapOperator.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ name: str,
23
+ input_op: Optional[LogicalOperator] = None,
24
+ num_outputs: Optional[int] = None,
25
+ *,
26
+ min_rows_per_bundled_input: Optional[int] = None,
27
+ ray_remote_args: Optional[Dict[str, Any]] = None,
28
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
29
+ ):
30
+ """
31
+ Args:
32
+ name: Name for this operator. This is the name that will appear when
33
+ inspecting the logical plan of a Dataset.
34
+ input_op: The operator preceding this operator in the plan DAG. The outputs
35
+ of `input_op` will be the inputs to this operator.
36
+ min_rows_per_bundled_input: The target number of rows to pass to
37
+ ``MapOperator._add_bundled_input()``.
38
+ ray_remote_args: Args to provide to ray.remote.
39
+ ray_remote_args_fn: A function that returns a dictionary of remote args
40
+ passed to each map worker. The purpose of this argument is to generate
41
+ dynamic arguments for each actor/task, and will be called each time
42
+ prior to initializing the worker. Args returned from this dict will
43
+ always override the args in ``ray_remote_args``. Note: this is an
44
+ advanced, experimental feature.
45
+ """
46
+ super().__init__(name, input_op, num_outputs)
47
+ self._min_rows_per_bundled_input = min_rows_per_bundled_input
48
+ self._ray_remote_args = ray_remote_args or {}
49
+ self._ray_remote_args_fn = ray_remote_args_fn
50
+
51
+
52
+ class AbstractUDFMap(AbstractMap):
53
+ """Abstract class for logical operators performing a UDF that should be converted
54
+ to physical MapOperator.
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ name: str,
60
+ input_op: LogicalOperator,
61
+ fn: UserDefinedFunction,
62
+ fn_args: Optional[Iterable[Any]] = None,
63
+ fn_kwargs: Optional[Dict[str, Any]] = None,
64
+ fn_constructor_args: Optional[Iterable[Any]] = None,
65
+ fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
66
+ min_rows_per_bundled_input: Optional[int] = None,
67
+ compute: Optional[Union[str, ComputeStrategy]] = None,
68
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
69
+ ray_remote_args: Optional[Dict[str, Any]] = None,
70
+ ):
71
+ """
72
+ Args:
73
+ name: Name for this operator. This is the name that will appear when
74
+ inspecting the logical plan of a Dataset.
75
+ input_op: The operator preceding this operator in the plan DAG. The outputs
76
+ of `input_op` will be the inputs to this operator.
77
+ fn: User-defined function to be called.
78
+ fn_args: Arguments to `fn`.
79
+ fn_kwargs: Keyword arguments to `fn`.
80
+ fn_constructor_args: Arguments to provide to the initializor of `fn` if
81
+ `fn` is a callable class.
82
+ fn_constructor_kwargs: Keyword Arguments to provide to the initializor of
83
+ `fn` if `fn` is a callable class.
84
+ min_rows_per_bundled_input: The target number of rows to pass to
85
+ ``MapOperator._add_bundled_input()``.
86
+ compute: The compute strategy, either ``"tasks"`` (default) to use Ray
87
+ tasks, or ``"actors"`` to use an autoscaling actor pool.
88
+ ray_remote_args_fn: A function that returns a dictionary of remote args
89
+ passed to each map worker. The purpose of this argument is to generate
90
+ dynamic arguments for each actor/task, and will be called each time
91
+ prior to initializing the worker. Args returned from this dict will
92
+ always override the args in ``ray_remote_args``. Note: this is an
93
+ advanced, experimental feature.
94
+ ray_remote_args: Args to provide to ray.remote.
95
+ """
96
+ name = self._get_operator_name(name, fn)
97
+ super().__init__(
98
+ name,
99
+ input_op,
100
+ min_rows_per_bundled_input=min_rows_per_bundled_input,
101
+ ray_remote_args=ray_remote_args,
102
+ )
103
+ self._fn = fn
104
+ self._fn_args = fn_args
105
+ self._fn_kwargs = fn_kwargs
106
+ self._fn_constructor_args = fn_constructor_args
107
+ self._fn_constructor_kwargs = fn_constructor_kwargs
108
+ self._compute = compute or TaskPoolStrategy()
109
+ self._ray_remote_args_fn = ray_remote_args_fn
110
+
111
+ def _get_operator_name(self, op_name: str, fn: UserDefinedFunction):
112
+ """Gets the Operator name including the map `fn` UDF name."""
113
+ # If the input `fn` is a Preprocessor, the
114
+ # name is simply the name of the Preprocessor class.
115
+ if inspect.ismethod(fn) and isinstance(fn.__self__, Preprocessor):
116
+ return fn.__self__.__class__.__name__
117
+
118
+ # Otherwise, it takes the form of `<MapOperator class>(<UDF name>)`,
119
+ # e.g. `MapBatches(my_udf)`.
120
+ try:
121
+ if inspect.isclass(fn):
122
+ # callable class
123
+ return f"{op_name}({fn.__name__})"
124
+ elif inspect.ismethod(fn):
125
+ # class method
126
+ return f"{op_name}({fn.__self__.__class__.__name__}.{fn.__name__})"
127
+ elif inspect.isfunction(fn):
128
+ # normal function or lambda function.
129
+ return f"{op_name}({fn.__name__})"
130
+ else:
131
+ # callable object.
132
+ return f"{op_name}({fn.__class__.__name__})"
133
+ except AttributeError as e:
134
+ logger.error("Failed to get name of UDF %s: %s", fn, e)
135
+ return "<unknown>"
136
+
137
+
138
+ class MapBatches(AbstractUDFMap):
139
+ """Logical operator for map_batches."""
140
+
141
+ def __init__(
142
+ self,
143
+ input_op: LogicalOperator,
144
+ fn: UserDefinedFunction,
145
+ batch_size: Optional[int] = DEFAULT_BATCH_SIZE,
146
+ batch_format: str = "default",
147
+ zero_copy_batch: bool = False,
148
+ fn_args: Optional[Iterable[Any]] = None,
149
+ fn_kwargs: Optional[Dict[str, Any]] = None,
150
+ fn_constructor_args: Optional[Iterable[Any]] = None,
151
+ fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
152
+ min_rows_per_bundled_input: Optional[int] = None,
153
+ compute: Optional[Union[str, ComputeStrategy]] = None,
154
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
155
+ ray_remote_args: Optional[Dict[str, Any]] = None,
156
+ ):
157
+ super().__init__(
158
+ "MapBatches",
159
+ input_op,
160
+ fn,
161
+ fn_args=fn_args,
162
+ fn_kwargs=fn_kwargs,
163
+ fn_constructor_args=fn_constructor_args,
164
+ fn_constructor_kwargs=fn_constructor_kwargs,
165
+ min_rows_per_bundled_input=min_rows_per_bundled_input,
166
+ compute=compute,
167
+ ray_remote_args_fn=ray_remote_args_fn,
168
+ ray_remote_args=ray_remote_args,
169
+ )
170
+ self._batch_size = batch_size
171
+ self._batch_format = batch_format
172
+ self._zero_copy_batch = zero_copy_batch
173
+
174
+ @property
175
+ def can_modify_num_rows(self) -> bool:
176
+ return False
177
+
178
+
179
+ class MapRows(AbstractUDFMap):
180
+ """Logical operator for map."""
181
+
182
+ def __init__(
183
+ self,
184
+ input_op: LogicalOperator,
185
+ fn: UserDefinedFunction,
186
+ fn_args: Optional[Iterable[Any]] = None,
187
+ fn_kwargs: Optional[Dict[str, Any]] = None,
188
+ fn_constructor_args: Optional[Iterable[Any]] = None,
189
+ fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
190
+ compute: Optional[Union[str, ComputeStrategy]] = None,
191
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
192
+ ray_remote_args: Optional[Dict[str, Any]] = None,
193
+ ):
194
+ super().__init__(
195
+ "Map",
196
+ input_op,
197
+ fn,
198
+ fn_args=fn_args,
199
+ fn_kwargs=fn_kwargs,
200
+ fn_constructor_args=fn_constructor_args,
201
+ fn_constructor_kwargs=fn_constructor_kwargs,
202
+ compute=compute,
203
+ ray_remote_args_fn=ray_remote_args_fn,
204
+ ray_remote_args=ray_remote_args,
205
+ )
206
+
207
+ @property
208
+ def can_modify_num_rows(self) -> bool:
209
+ return False
210
+
211
+
212
+ class Filter(AbstractUDFMap):
213
+ """Logical operator for filter."""
214
+
215
+ def __init__(
216
+ self,
217
+ input_op: LogicalOperator,
218
+ fn: UserDefinedFunction,
219
+ compute: Optional[Union[str, ComputeStrategy]] = None,
220
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
221
+ ray_remote_args: Optional[Dict[str, Any]] = None,
222
+ ):
223
+ super().__init__(
224
+ "Filter",
225
+ input_op,
226
+ fn,
227
+ compute=compute,
228
+ ray_remote_args_fn=ray_remote_args_fn,
229
+ ray_remote_args=ray_remote_args,
230
+ )
231
+
232
+ @property
233
+ def can_modify_num_rows(self) -> bool:
234
+ return True
235
+
236
+
237
+ class Project(AbstractMap):
238
+ """Logical operator for select_columns."""
239
+
240
+ def __init__(
241
+ self,
242
+ input_op: LogicalOperator,
243
+ cols: List[str],
244
+ compute: Optional[Union[str, ComputeStrategy]] = None,
245
+ ray_remote_args: Optional[Dict[str, Any]] = None,
246
+ ):
247
+ super().__init__("Project", input_op=input_op, ray_remote_args=ray_remote_args)
248
+ self._compute = compute
249
+ self._batch_size = DEFAULT_BATCH_SIZE
250
+ self._cols = cols
251
+ self._batch_format = "pyarrow"
252
+ self._zero_copy_batch = True
253
+
254
+ @property
255
+ def cols(self) -> List[str]:
256
+ return self._cols
257
+
258
+ @property
259
+ def can_modify_num_rows(self) -> bool:
260
+ return False
261
+
262
+
263
+ class FlatMap(AbstractUDFMap):
264
+ """Logical operator for flat_map."""
265
+
266
+ def __init__(
267
+ self,
268
+ input_op: LogicalOperator,
269
+ fn: UserDefinedFunction,
270
+ fn_args: Optional[Iterable[Any]] = None,
271
+ fn_kwargs: Optional[Dict[str, Any]] = None,
272
+ fn_constructor_args: Optional[Iterable[Any]] = None,
273
+ fn_constructor_kwargs: Optional[Dict[str, Any]] = None,
274
+ compute: Optional[Union[str, ComputeStrategy]] = None,
275
+ ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None,
276
+ ray_remote_args: Optional[Dict[str, Any]] = None,
277
+ ):
278
+ super().__init__(
279
+ "FlatMap",
280
+ input_op,
281
+ fn,
282
+ fn_args=fn_args,
283
+ fn_kwargs=fn_kwargs,
284
+ fn_constructor_args=fn_constructor_args,
285
+ fn_constructor_kwargs=fn_constructor_kwargs,
286
+ compute=compute,
287
+ ray_remote_args_fn=ray_remote_args_fn,
288
+ ray_remote_args=ray_remote_args,
289
+ )
290
+
291
+ @property
292
+ def can_modify_num_rows(self) -> bool:
293
+ return True
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.data._internal.logical.interfaces import LogicalOperator
4
+
5
+
6
+ class NAry(LogicalOperator):
7
+ """Base class for n-ary operators, which take multiple input operators."""
8
+
9
+ def __init__(
10
+ self,
11
+ *input_ops: LogicalOperator,
12
+ num_outputs: Optional[int] = None,
13
+ ):
14
+ """
15
+ Args:
16
+ input_ops: The input operators.
17
+ """
18
+ super().__init__(self.__class__.__name__, list(input_ops), num_outputs)
19
+
20
+
21
+ class Zip(NAry):
22
+ """Logical operator for zip."""
23
+
24
+ def __init__(
25
+ self,
26
+ left_input_op: LogicalOperator,
27
+ right_input_op: LogicalOperator,
28
+ ):
29
+ """
30
+ Args:
31
+ left_input_ops: The input operator at left hand side.
32
+ right_input_op: The input operator at right hand side.
33
+ """
34
+ super().__init__(left_input_op, right_input_op)
35
+
36
+ def estimated_num_outputs(self):
37
+ left_num_outputs = self._input_dependencies[0].estimated_num_outputs()
38
+ right_num_outputs = self._input_dependencies[1].estimated_num_outputs()
39
+ if left_num_outputs is None or right_num_outputs is None:
40
+ return None
41
+ return max(left_num_outputs, right_num_outputs)
42
+
43
+
44
+ class Union(NAry):
45
+ """Logical operator for union."""
46
+
47
+ def __init__(
48
+ self,
49
+ *input_ops: LogicalOperator,
50
+ ):
51
+ super().__init__(*input_ops)
52
+
53
+ def estimated_num_outputs(self):
54
+ total_num_outputs = 0
55
+ for input in self._input_dependencies:
56
+ num_outputs = input.estimated_num_outputs()
57
+ if num_outputs is None:
58
+ return None
59
+ total_num_outputs += num_outputs
60
+ return total_num_outputs
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import abc
2
+ from typing import Optional
3
+
4
+ from ray.data._internal.logical.interfaces import LogicalOperator
5
+ from ray.data.block import BlockMetadata
6
+
7
+
8
+ class AbstractOneToOne(LogicalOperator):
9
+ """Abstract class for one-to-one logical operators, which
10
+ have one input and one output dependency.
11
+ """
12
+
13
+ def __init__(
14
+ self,
15
+ name: str,
16
+ input_op: Optional[LogicalOperator],
17
+ num_outputs: Optional[int] = None,
18
+ ):
19
+ """
20
+ Args:
21
+ name: Name for this operator. This is the name that will appear when
22
+ inspecting the logical plan of a Dataset.
23
+ input_op: The operator preceding this operator in the plan DAG. The outputs
24
+ of `input_op` will be the inputs to this operator.
25
+ """
26
+ super().__init__(name, [input_op] if input_op else [], num_outputs)
27
+
28
+ @property
29
+ def input_dependency(self) -> LogicalOperator:
30
+ return self._input_dependencies[0]
31
+
32
+ @property
33
+ @abc.abstractmethod
34
+ def can_modify_num_rows(self) -> bool:
35
+ """Whether this operator can modify the number of rows,
36
+ i.e. number of input rows != number of output rows."""
37
+
38
+
39
+ class Limit(AbstractOneToOne):
40
+ """Logical operator for limit."""
41
+
42
+ def __init__(
43
+ self,
44
+ input_op: LogicalOperator,
45
+ limit: int,
46
+ ):
47
+ super().__init__(
48
+ f"limit={limit}",
49
+ input_op,
50
+ )
51
+ self._limit = limit
52
+
53
+ @property
54
+ def can_modify_num_rows(self) -> bool:
55
+ return True
56
+
57
+ def aggregate_output_metadata(self) -> BlockMetadata:
58
+ return BlockMetadata(
59
+ num_rows=self._num_rows(),
60
+ size_bytes=None,
61
+ schema=self._schema(),
62
+ input_files=self._input_files(),
63
+ exec_stats=None,
64
+ )
65
+
66
+ def _schema(self):
67
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
68
+ return self._input_dependencies[0].aggregate_output_metadata().schema
69
+
70
+ def _num_rows(self):
71
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
72
+ input_rows = self._input_dependencies[0].aggregate_output_metadata().num_rows
73
+ if input_rows is not None:
74
+ return min(input_rows, self._limit)
75
+ else:
76
+ return None
77
+
78
+ def _input_files(self):
79
+ assert len(self._input_dependencies) == 1, len(self._input_dependencies)
80
+ return self._input_dependencies[0].aggregate_output_metadata().input_files
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from typing import Any, Dict, Optional, Union
3
+
4
+ from ray.data._internal.logical.operators.map_operator import AbstractMap
5
+ from ray.data._internal.util import unify_block_metadata_schema
6
+ from ray.data.block import BlockMetadata
7
+ from ray.data.datasource.datasource import Datasource, Reader
8
+
9
+
10
+ class Read(AbstractMap):
11
+ """Logical operator for read."""
12
+
13
+ def __init__(
14
+ self,
15
+ datasource: Datasource,
16
+ datasource_or_legacy_reader: Union[Datasource, Reader],
17
+ parallelism: int,
18
+ mem_size: Optional[int],
19
+ num_outputs: Optional[int] = None,
20
+ ray_remote_args: Optional[Dict[str, Any]] = None,
21
+ concurrency: Optional[int] = None,
22
+ ):
23
+ super().__init__(
24
+ f"Read{datasource.get_name()}",
25
+ None,
26
+ num_outputs,
27
+ ray_remote_args=ray_remote_args,
28
+ )
29
+ self._datasource = datasource
30
+ self._datasource_or_legacy_reader = datasource_or_legacy_reader
31
+ self._parallelism = parallelism
32
+ self._mem_size = mem_size
33
+ self._concurrency = concurrency
34
+ self._detected_parallelism = None
35
+
36
+ def set_detected_parallelism(self, parallelism: int):
37
+ """
38
+ Set the true parallelism that should be used during execution. This
39
+ should be specified by the user or detected by the optimizer.
40
+ """
41
+ self._detected_parallelism = parallelism
42
+
43
+ def get_detected_parallelism(self) -> int:
44
+ """
45
+ Get the true parallelism that should be used during execution.
46
+ """
47
+ return self._detected_parallelism
48
+
49
+ def aggregate_output_metadata(self) -> BlockMetadata:
50
+ """A ``BlockMetadata`` that represents the aggregate metadata of the outputs.
51
+
52
+ This method gets metadata from the read tasks. It doesn't trigger any actual
53
+ execution.
54
+ """
55
+ return self._cached_output_metadata
56
+
57
+ @functools.cached_property
58
+ def _cached_output_metadata(self) -> BlockMetadata:
59
+ # Legacy datasources might not implement `get_read_tasks`.
60
+ if self._datasource.should_create_reader:
61
+ return BlockMetadata(None, None, None, None, None)
62
+
63
+ # HACK: Try to get a single read task to get the metadata.
64
+ read_tasks = self._datasource.get_read_tasks(1)
65
+ if len(read_tasks) == 0:
66
+ # If there are no read tasks, the dataset is probably empty.
67
+ return BlockMetadata(None, None, None, None, None)
68
+
69
+ # `get_read_tasks` isn't guaranteed to return exactly one read task.
70
+ metadata = [read_task.metadata for read_task in read_tasks]
71
+
72
+ if all(meta.num_rows is not None for meta in metadata):
73
+ num_rows = sum(meta.num_rows for meta in metadata)
74
+ else:
75
+ num_rows = None
76
+
77
+ if all(meta.size_bytes is not None for meta in metadata):
78
+ size_bytes = sum(meta.size_bytes for meta in metadata)
79
+ else:
80
+ size_bytes = None
81
+
82
+ schema = unify_block_metadata_schema(metadata)
83
+
84
+ input_files = []
85
+ for meta in metadata:
86
+ if meta.input_files is not None:
87
+ input_files.extend(meta.input_files)
88
+
89
+ return BlockMetadata(
90
+ num_rows=num_rows,
91
+ size_bytes=size_bytes,
92
+ schema=schema,
93
+ input_files=input_files,
94
+ exec_stats=None,
95
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Dict, Optional, Union
2
+
3
+ from ray.data._internal.logical.interfaces import LogicalOperator
4
+ from ray.data._internal.logical.operators.map_operator import AbstractMap
5
+ from ray.data.datasource.datasink import Datasink
6
+ from ray.data.datasource.datasource import Datasource
7
+
8
+
9
+ class Write(AbstractMap):
10
+ """Logical operator for write."""
11
+
12
+ def __init__(
13
+ self,
14
+ input_op: LogicalOperator,
15
+ datasink_or_legacy_datasource: Union[Datasink, Datasource],
16
+ ray_remote_args: Optional[Dict[str, Any]] = None,
17
+ concurrency: Optional[int] = None,
18
+ **write_args,
19
+ ):
20
+ if isinstance(datasink_or_legacy_datasource, Datasink):
21
+ min_rows_per_bundled_input = (
22
+ datasink_or_legacy_datasource.num_rows_per_write
23
+ )
24
+ else:
25
+ min_rows_per_bundled_input = None
26
+
27
+ super().__init__(
28
+ "Write",
29
+ input_op,
30
+ min_rows_per_bundled_input=min_rows_per_bundled_input,
31
+ ray_remote_args=ray_remote_args,
32
+ )
33
+ self._datasink_or_legacy_datasource = datasink_or_legacy_datasource
34
+ self._write_args = write_args
35
+ self._concurrency = concurrency
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (410 Bytes). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc ADDED
Binary file (3.5 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc ADDED
Binary file (10.3 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc ADDED
Binary file (2.13 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc ADDED
Binary file (4.27 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc ADDED
Binary file (3.11 kB). View file
 
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+
3
+ from ray.data._internal.execution.interfaces import PhysicalOperator
4
+ from ray.data._internal.logical.interfaces import PhysicalPlan, Rule
5
+
6
+
7
+ class InheritTargetMaxBlockSizeRule(Rule):
8
+ """For each op that has overridden the default target max block size,
9
+ propagate to upstream ops until we reach an op that has also overridden the
10
+ target max block size."""
11
+
12
+ def apply(self, plan: PhysicalPlan) -> PhysicalPlan:
13
+ self._propagate_target_max_block_size_to_upstream_ops(plan.dag)
14
+ return plan
15
+
16
+ def _propagate_target_max_block_size_to_upstream_ops(
17
+ self, dag: PhysicalOperator, target_max_block_size: Optional[int] = None
18
+ ):
19
+ if dag.target_max_block_size is not None:
20
+ # Set the target block size to inherit for
21
+ # upstream ops.
22
+ target_max_block_size = dag.target_max_block_size
23
+ elif target_max_block_size is not None:
24
+ # Inherit from downstream op.
25
+ dag.set_target_max_block_size(target_max_block_size)
26
+
27
+ for upstream_op in dag.input_dependencies:
28
+ self._propagate_target_max_block_size_to_upstream_ops(
29
+ upstream_op, target_max_block_size
30
+ )
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import deque
3
+ from typing import Iterable, List
4
+
5
+ from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule
6
+ from ray.data._internal.logical.operators.one_to_one_operator import (
7
+ AbstractOneToOne,
8
+ Limit,
9
+ )
10
+ from ray.data._internal.logical.operators.read_operator import Read
11
+
12
+
13
+ class LimitPushdownRule(Rule):
14
+ """Rule for pushing down the limit operator.
15
+
16
+ When a limit operator is present, we apply the limit on the
17
+ most upstream operator that supports it. Notably, we move the
18
+ Limit operator downstream from Read op, any other non-OneToOne operator,
19
+ or any operator which could potentially change the number of output rows.
20
+
21
+ In addition, we also fuse consecutive Limit operators into a single
22
+ Limit operator, i.e. `Limit[n] -> Limit[m]` becomes `Limit[min(n, m)]`.
23
+ """
24
+
25
+ def apply(self, plan: LogicalPlan) -> LogicalPlan:
26
+ optimized_dag = self._apply_limit_pushdown(plan.dag)
27
+ optimized_dag = self._apply_limit_fusion(optimized_dag)
28
+ return LogicalPlan(dag=optimized_dag, context=plan.context)
29
+
30
+ def _apply_limit_pushdown(self, op: LogicalOperator) -> LogicalOperator:
31
+ """Given a DAG of LogicalOperators, traverse the DAG and push down
32
+ Limit operators, i.e. move Limit operators as far upstream as possible.
33
+
34
+ Returns a new LogicalOperator with the Limit operators pushed down."""
35
+ # Post-order traversal.
36
+ nodes: Iterable[LogicalOperator] = deque()
37
+ for node in op.post_order_iter():
38
+ nodes.appendleft(node)
39
+
40
+ while len(nodes) > 0:
41
+ current_op = nodes.pop()
42
+
43
+ # If we encounter a Limit op, move it upstream until it reaches:
44
+ # - Read operator
45
+ # - A non-AbstractOneToOne operator (e.g. AbstractAllToAll)
46
+ # - An AbstractOneToOne operator that could change the number of output rows
47
+
48
+ # TODO(scottjlee): in our current abstraction, we have Read extend
49
+ # AbstractMap (with no input dependency), which extends AbstractOneToOne.
50
+ # So we have to explicitly separate the Read op in its own check.
51
+ # We should remove this case once we refactor Read op to no longer
52
+ # be an AbstractOneToOne op.
53
+ if isinstance(current_op, Limit):
54
+ limit_op_copy = copy.copy(current_op)
55
+
56
+ # Traverse up the DAG until we reach the first operator that meets
57
+ # one of the conditions above, which will serve as the new input
58
+ # into the Limit operator.
59
+ new_input_into_limit = current_op.input_dependency
60
+ ops_between_new_input_and_limit: List[LogicalOperator] = []
61
+ while (
62
+ isinstance(new_input_into_limit, AbstractOneToOne)
63
+ and not isinstance(new_input_into_limit, Read)
64
+ and not getattr(new_input_into_limit, "can_modify_num_rows", False)
65
+ ):
66
+ new_input_into_limit_copy = copy.copy(new_input_into_limit)
67
+ ops_between_new_input_and_limit.append(new_input_into_limit_copy)
68
+ new_input_into_limit = new_input_into_limit.input_dependency
69
+
70
+ # Link the Limit operator and its newly designated input op from above.
71
+ limit_op_copy._input_dependencies = [new_input_into_limit]
72
+ new_input_into_limit._output_dependencies = [limit_op_copy]
73
+
74
+ # Build the chain of operator dependencies between the new
75
+ # input and the Limit operator, using copies of traversed operators.
76
+ ops_between_new_input_and_limit.append(limit_op_copy)
77
+ for idx in range(len(ops_between_new_input_and_limit) - 1):
78
+ curr_op, up_op = (
79
+ ops_between_new_input_and_limit[idx],
80
+ ops_between_new_input_and_limit[idx + 1],
81
+ )
82
+ curr_op._input_dependencies = [up_op]
83
+ up_op._output_dependencies = [curr_op]
84
+ # Add the copied operator to the list of nodes to be traversed.
85
+ nodes.append(curr_op)
86
+
87
+ # Link the Limit operator to its new input operator.
88
+ for limit_output_op in current_op.output_dependencies:
89
+ limit_output_op._input_dependencies = [
90
+ ops_between_new_input_and_limit[0]
91
+ ]
92
+ last_op = ops_between_new_input_and_limit[0]
93
+ last_op._output_dependencies = current_op.output_dependencies
94
+
95
+ return current_op
96
+
97
+ def _apply_limit_fusion(self, op: LogicalOperator) -> LogicalOperator:
98
+ """Given a DAG of LogicalOperators, traverse the DAG and fuse all
99
+ back-to-back Limit operators, i.e.
100
+ Limit[n] -> Limit[m] becomes Limit[min(n, m)].
101
+
102
+ Returns a new LogicalOperator with the Limit operators fusion applied."""
103
+
104
+ # Post-order traversal.
105
+ nodes: Iterable[LogicalOperator] = deque()
106
+ for node in op.post_order_iter():
107
+ nodes.appendleft(node)
108
+
109
+ while len(nodes) > 0:
110
+ current_op = nodes.pop()
111
+
112
+ # If we encounter two back-to-back Limit operators, fuse them.
113
+ if isinstance(current_op, Limit):
114
+ upstream_op = current_op.input_dependency
115
+ if isinstance(upstream_op, Limit):
116
+ new_limit = min(current_op._limit, upstream_op._limit)
117
+ fused_limit_op = Limit(upstream_op.input_dependency, new_limit)
118
+
119
+ # Link the fused Limit operator to its input and output ops, i.e.:
120
+ # `upstream_input -> limit_upstream -> limit_downstream -> downstream_output` # noqa: E501
121
+ # becomes `upstream_input -> fused_limit -> downstream_output`
122
+ fused_limit_op._input_dependencies = upstream_op.input_dependencies
123
+ fused_limit_op._output_dependencies = current_op.output_dependencies
124
+
125
+ # Replace occurrences of the upstream Limit operator in
126
+ # output_dependencies with the newly fused Limit operator.
127
+ upstream_input = upstream_op.input_dependency
128
+ upstream_input._output_dependencies = [fused_limit_op]
129
+
130
+ for current_output in current_op.output_dependencies:
131
+ current_output._input_dependencies = [fused_limit_op]
132
+ nodes.append(fused_limit_op)
133
+ return current_op
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional, Tuple
2
+
3
+ # TODO(Clark): Remove compute dependency once we delete the legacy compute.
4
+ from ray.data._internal.compute import get_compute, is_task_compute
5
+ from ray.data._internal.execution.interfaces import (
6
+ PhysicalOperator,
7
+ RefBundle,
8
+ TaskContext,
9
+ )
10
+ from ray.data._internal.execution.operators.actor_pool_map_operator import (
11
+ ActorPoolMapOperator,
12
+ )
13
+ from ray.data._internal.execution.operators.base_physical_operator import (
14
+ AllToAllOperator,
15
+ )
16
+ from ray.data._internal.execution.operators.map_operator import MapOperator
17
+ from ray.data._internal.execution.operators.task_pool_map_operator import (
18
+ TaskPoolMapOperator,
19
+ )
20
+ from ray.data._internal.logical.interfaces import PhysicalPlan, Rule
21
+ from ray.data._internal.logical.operators.all_to_all_operator import (
22
+ AbstractAllToAll,
23
+ RandomShuffle,
24
+ Repartition,
25
+ )
26
+ from ray.data._internal.logical.operators.map_operator import AbstractUDFMap
27
+ from ray.data._internal.stats import StatsDict
28
+ from ray.data.context import DataContext
29
+
30
+ # Scheduling strategy can be inherited from upstream operator if not specified.
31
+ INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"]
32
+
33
+
34
+ class OperatorFusionRule(Rule):
35
+ """Fuses linear chains of compatible physical operators."""
36
+
37
+ def apply(self, plan: PhysicalPlan) -> PhysicalPlan:
38
+ self._op_map = plan.op_map.copy()
39
+ # Do DFS fusion on compatible pairwise operators in two passes.
40
+ # In the first pass, only fuse back-to-back map operators together.
41
+ fused_dag = self._fuse_map_operators_in_dag(plan.dag)
42
+
43
+ # Now that we have fused together all back-to-back map operators,
44
+ # we fuse together MapOperator -> AllToAllOperator pairs.
45
+ fused_dag = self._fuse_all_to_all_operators_in_dag(fused_dag)
46
+
47
+ # Update output dependencies after fusion.
48
+ # TODO(hchen): Instead of updating the depdencies manually,
49
+ # we need a better abstraction for manipulating the DAG.
50
+ self._remove_output_depes(fused_dag)
51
+ self._update_output_depes(fused_dag)
52
+
53
+ new_plan = PhysicalPlan(fused_dag, self._op_map, plan.context)
54
+ return new_plan
55
+
56
+ def _remove_output_depes(self, op: PhysicalOperator) -> None:
57
+ for input in op._input_dependencies:
58
+ input._output_dependencies = []
59
+ self._remove_output_depes(input)
60
+
61
+ def _update_output_depes(self, op: PhysicalOperator) -> None:
62
+ for input in op._input_dependencies:
63
+ input._output_dependencies.append(op)
64
+ self._update_output_depes(input)
65
+
66
+ def _fuse_map_operators_in_dag(self, dag: PhysicalOperator) -> MapOperator:
67
+ """Starting at the given operator, traverses up the DAG of operators
68
+ and recursively fuses compatible MapOperator -> MapOperator pairs.
69
+ Returns the current (root) operator after completing upstream operator fusions.
70
+ """
71
+ upstream_ops = dag.input_dependencies
72
+ while (
73
+ len(upstream_ops) == 1
74
+ and isinstance(dag, MapOperator)
75
+ and isinstance(upstream_ops[0], MapOperator)
76
+ and self._can_fuse(dag, upstream_ops[0])
77
+ ):
78
+ # Fuse operator with its upstream op.
79
+ dag = self._get_fused_map_operator(dag, upstream_ops[0])
80
+ upstream_ops = dag.input_dependencies
81
+
82
+ # Done fusing back-to-back map operators together here,
83
+ # move up the DAG to find the next map operators to fuse.
84
+ dag._input_dependencies = [
85
+ self._fuse_map_operators_in_dag(upstream_op) for upstream_op in upstream_ops
86
+ ]
87
+ return dag
88
+
89
+ def _fuse_all_to_all_operators_in_dag(
90
+ self, dag: AllToAllOperator
91
+ ) -> AllToAllOperator:
92
+ """Starting at the given operator, traverses up the DAG of operators
93
+ and recursively fuses compatible MapOperator -> AllToAllOperator pairs.
94
+
95
+ Also, sets the target block size of the immediately upstream map op to
96
+ match the shuffle block size. We use a larger block size for shuffles
97
+ because tiny blocks are bad for I/O performance.
98
+
99
+ Returns the current (root) operator after completing upstream operator fusions.
100
+ """
101
+ upstream_ops = dag.input_dependencies
102
+ while (
103
+ len(upstream_ops) == 1
104
+ and isinstance(dag, AllToAllOperator)
105
+ and isinstance(upstream_ops[0], MapOperator)
106
+ and self._can_fuse(dag, upstream_ops[0])
107
+ ):
108
+ # Fuse operator with its upstream op.
109
+ dag = self._get_fused_all_to_all_operator(dag, upstream_ops[0])
110
+ upstream_ops = dag.input_dependencies
111
+
112
+ # Done fusing MapOperator -> AllToAllOperator together here,
113
+ # move up the DAG to find the next pair of operators to fuse.
114
+ dag._input_dependencies = [
115
+ self._fuse_all_to_all_operators_in_dag(upstream_op)
116
+ for upstream_op in upstream_ops
117
+ ]
118
+ return dag
119
+
120
+ def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool:
121
+ """Returns whether the provided downstream operator can be fused with the given
122
+ upstream operator.
123
+
124
+ We currently support fusing two operators if the following are all true:
125
+ * We are fusing either MapOperator -> MapOperator or
126
+ MapOperator -> AllToAllOperator.
127
+ * They either use the same compute configuration, or the upstream operator
128
+ uses a task pool while the downstream operator uses an actor pool.
129
+ * If both operators involve callable classes, the callable classes are
130
+ the same class AND constructor args are the same for both.
131
+ * They have compatible remote arguments.
132
+ """
133
+ from ray.data._internal.logical.operators.map_operator import (
134
+ AbstractMap,
135
+ AbstractUDFMap,
136
+ )
137
+
138
+ if not up_op.supports_fusion() or not down_op.supports_fusion():
139
+ return False
140
+
141
+ # We currently only support fusing for the following cases:
142
+ # - TaskPoolMapOperator -> TaskPoolMapOperator/ActorPoolMapOperator
143
+ # - TaskPoolMapOperator -> AllToAllOperator
144
+ # (only RandomShuffle and Repartition LogicalOperators are currently supported)
145
+ if not (
146
+ (
147
+ isinstance(up_op, TaskPoolMapOperator)
148
+ and isinstance(down_op, (TaskPoolMapOperator, ActorPoolMapOperator))
149
+ )
150
+ or (
151
+ isinstance(up_op, TaskPoolMapOperator)
152
+ and isinstance(down_op, AllToAllOperator)
153
+ )
154
+ ):
155
+ return False
156
+
157
+ down_logical_op = self._op_map[down_op]
158
+ up_logical_op = self._op_map[up_op]
159
+
160
+ if up_op.get_additional_split_factor() > 1:
161
+ return False
162
+
163
+ # If the downstream operator takes no input, it cannot be fused with
164
+ # the upstream operator.
165
+ if not down_logical_op._input_dependencies:
166
+ return False
167
+
168
+ # We currently only support fusing for the following cases:
169
+ # - AbstractMap -> AbstractMap
170
+ # - AbstractMap -> RandomShuffle
171
+ # - AbstractMap -> Repartition (shuffle=True)
172
+ if not (
173
+ (
174
+ isinstance(up_logical_op, AbstractMap)
175
+ and isinstance(down_logical_op, AbstractMap)
176
+ )
177
+ or (
178
+ isinstance(up_logical_op, AbstractMap)
179
+ and isinstance(down_logical_op, RandomShuffle)
180
+ )
181
+ or (
182
+ isinstance(up_logical_op, AbstractMap)
183
+ and isinstance(down_logical_op, Repartition)
184
+ )
185
+ ):
186
+ return False
187
+
188
+ # Do not fuse Repartition operator if shuffle is disabled
189
+ # (i.e. using split shuffle).
190
+ if isinstance(down_logical_op, Repartition) and not down_logical_op._shuffle:
191
+ return False
192
+
193
+ if isinstance(down_logical_op, AbstractUDFMap) and isinstance(
194
+ up_logical_op, AbstractUDFMap
195
+ ):
196
+ # Allow fusing tasks->actors if the resources are compatible (read->map),
197
+ # but not the other way around. The latter (downstream op) will be used as
198
+ # the compute if fused.
199
+ if is_task_compute(down_logical_op._compute) and get_compute(
200
+ up_logical_op._compute
201
+ ) != get_compute(down_logical_op._compute):
202
+ return False
203
+
204
+ # Only fuse if the ops' remote arguments are compatible.
205
+ if not _are_remote_args_compatible(
206
+ getattr(up_logical_op, "_ray_remote_args", {}),
207
+ getattr(down_logical_op, "_ray_remote_args", {}),
208
+ ):
209
+ return False
210
+
211
+ # Do not fuse if either op specifies a `_ray_remote_args_fn`,
212
+ # since it is not known whether the generated args will be compatible.
213
+ if getattr(up_logical_op, "_ray_remote_args_fn", None) or getattr(
214
+ down_logical_op, "_ray_remote_args_fn", None
215
+ ):
216
+ return False
217
+
218
+ if not self._can_merge_target_max_block_size(
219
+ up_op.target_max_block_size, down_op.target_max_block_size
220
+ ):
221
+ return False
222
+
223
+ # Otherwise, ops are compatible for fusion.
224
+ return True
225
+
226
+ def _can_merge_target_max_block_size(
227
+ self,
228
+ up_target_max_block_size: Optional[int],
229
+ down_target_max_block_size: Optional[int],
230
+ ):
231
+ # If the upstream op overrode the target max block size, only fuse if
232
+ # they are equal.
233
+ if up_target_max_block_size is not None:
234
+ if down_target_max_block_size is None:
235
+ down_target_max_block_size = (
236
+ DataContext.get_current().target_max_block_size
237
+ )
238
+ if up_target_max_block_size != down_target_max_block_size:
239
+ return False
240
+ return True
241
+
242
+ def _get_merged_target_max_block_size(
243
+ self,
244
+ up_target_max_block_size: Optional[int],
245
+ down_target_max_block_size: Optional[int],
246
+ ):
247
+ if up_target_max_block_size is not None:
248
+ # If the upstream op overrode the target max block size, we can
249
+ # only merge if the downstream op matches or uses the default.
250
+ assert (
251
+ down_target_max_block_size is None
252
+ or down_target_max_block_size == up_target_max_block_size
253
+ )
254
+ return up_target_max_block_size
255
+ else:
256
+ # Upstream op inherits the downstream op's target max block size,
257
+ # because the downstream op is the one that outputs the final
258
+ # blocks.
259
+ return down_target_max_block_size
260
+
261
+ def _get_fused_map_operator(
262
+ self, down_op: MapOperator, up_op: MapOperator
263
+ ) -> MapOperator:
264
+ from ray.data._internal.logical.operators.map_operator import AbstractMap
265
+
266
+ assert self._can_fuse(down_op, up_op), (
267
+ "Current rule supports fusing MapOperator->MapOperator, but received: "
268
+ f"{type(up_op).__name__} -> {type(down_op).__name__}"
269
+ )
270
+
271
+ # Fuse operator names.
272
+ name = up_op.name + "->" + down_op.name
273
+
274
+ down_logical_op = self._op_map.pop(down_op)
275
+ up_logical_op = self._op_map.pop(up_op)
276
+
277
+ # Merge minimum block sizes.
278
+ down_min_rows_per_bundled_input = (
279
+ down_logical_op._min_rows_per_bundled_input
280
+ if isinstance(down_logical_op, AbstractMap)
281
+ else None
282
+ )
283
+ up_min_rows_per_bundled_input = (
284
+ up_logical_op._min_rows_per_bundled_input
285
+ if isinstance(up_logical_op, AbstractMap)
286
+ else None
287
+ )
288
+ if (
289
+ down_min_rows_per_bundled_input is not None
290
+ and up_min_rows_per_bundled_input is not None
291
+ ):
292
+ min_rows_per_bundled_input = max(
293
+ down_min_rows_per_bundled_input, up_min_rows_per_bundled_input
294
+ )
295
+ elif up_min_rows_per_bundled_input is not None:
296
+ min_rows_per_bundled_input = up_min_rows_per_bundled_input
297
+ else:
298
+ min_rows_per_bundled_input = down_min_rows_per_bundled_input
299
+
300
+ target_max_block_size = self._get_merged_target_max_block_size(
301
+ up_op.target_max_block_size, down_op.target_max_block_size
302
+ )
303
+
304
+ # We take the downstream op's compute in case we're fusing upstream tasks with a
305
+ # downstream actor pool (e.g. read->map).
306
+ compute = None
307
+ if isinstance(down_logical_op, AbstractUDFMap):
308
+ compute = get_compute(down_logical_op._compute)
309
+ ray_remote_args = up_logical_op._ray_remote_args
310
+ ray_remote_args_fn = (
311
+ up_logical_op._ray_remote_args_fn or down_logical_op._ray_remote_args_fn
312
+ )
313
+ # Make the upstream operator's inputs the new, fused operator's inputs.
314
+ input_deps = up_op.input_dependencies
315
+ assert len(input_deps) == 1
316
+ input_op = input_deps[0]
317
+
318
+ # Fused physical map operator.
319
+ op = MapOperator.create(
320
+ up_op.get_map_transformer().fuse(down_op.get_map_transformer()),
321
+ input_op,
322
+ target_max_block_size=target_max_block_size,
323
+ name=name,
324
+ compute_strategy=compute,
325
+ min_rows_per_bundle=min_rows_per_bundled_input,
326
+ ray_remote_args=ray_remote_args,
327
+ ray_remote_args_fn=ray_remote_args_fn,
328
+ )
329
+ op.set_logical_operators(*up_op._logical_operators, *down_op._logical_operators)
330
+
331
+ # Build a map logical operator to be used as a reference for further fusion.
332
+ # TODO(Scott): This is hacky, remove this once we push fusion to be purely based
333
+ # on a lower-level operator spec.
334
+ if isinstance(up_logical_op, AbstractUDFMap):
335
+ input_op = up_logical_op.input_dependency
336
+ else:
337
+ # Bottom out at the source logical op (e.g. Read()).
338
+ input_op = up_logical_op
339
+ if isinstance(down_logical_op, AbstractUDFMap):
340
+ logical_op = AbstractUDFMap(
341
+ name,
342
+ input_op,
343
+ down_logical_op._fn,
344
+ down_logical_op._fn_args,
345
+ down_logical_op._fn_kwargs,
346
+ down_logical_op._fn_constructor_args,
347
+ down_logical_op._fn_constructor_kwargs,
348
+ min_rows_per_bundled_input,
349
+ compute,
350
+ ray_remote_args_fn,
351
+ ray_remote_args,
352
+ )
353
+ else:
354
+ from ray.data._internal.logical.operators.map_operator import AbstractMap
355
+
356
+ # The downstream op is AbstractMap instead of AbstractUDFMap.
357
+ logical_op = AbstractMap(
358
+ name,
359
+ input_op,
360
+ min_rows_per_bundled_input=min_rows_per_bundled_input,
361
+ ray_remote_args_fn=ray_remote_args_fn,
362
+ ray_remote_args=ray_remote_args,
363
+ )
364
+ self._op_map[op] = logical_op
365
+ # Return the fused physical operator.
366
+ return op
367
+
368
+ def _get_fused_all_to_all_operator(
369
+ self, down_op: AllToAllOperator, up_op: MapOperator
370
+ ) -> AllToAllOperator:
371
+ assert self._can_fuse(down_op, up_op), (
372
+ "Current rule supports fusing MapOperator -> AllToAllOperator"
373
+ f", but received: {type(up_op).__name__} -> {type(down_op).__name__}"
374
+ )
375
+
376
+ # Fuse operator names.
377
+ name = up_op.name + "->" + down_op.name
378
+
379
+ down_logical_op: AbstractAllToAll = self._op_map.pop(down_op)
380
+ up_logical_op: AbstractUDFMap = self._op_map.pop(up_op)
381
+
382
+ # Fuse transformation functions.
383
+ ray_remote_args = up_logical_op._ray_remote_args
384
+ down_transform_fn = down_op.get_transformation_fn()
385
+ up_map_transformer = up_op.get_map_transformer()
386
+
387
+ def fused_all_to_all_transform_fn(
388
+ blocks: List[RefBundle], ctx: TaskContext
389
+ ) -> Tuple[List[RefBundle], StatsDict]:
390
+ """To fuse MapOperator->AllToAllOperator, we store the map function
391
+ in the TaskContext so that it may be used by the downstream
392
+ AllToAllOperator's transform function."""
393
+ ctx.upstream_map_transformer = up_map_transformer
394
+ ctx.upstream_map_ray_remote_args = ray_remote_args
395
+ return down_transform_fn(blocks, ctx)
396
+
397
+ # Make the upstream operator's inputs the new, fused operator's inputs.
398
+ input_deps = up_op.input_dependencies
399
+ assert len(input_deps) == 1
400
+ input_op = input_deps[0]
401
+
402
+ target_max_block_size = self._get_merged_target_max_block_size(
403
+ up_op.target_max_block_size, down_op.target_max_block_size
404
+ )
405
+
406
+ op = AllToAllOperator(
407
+ fused_all_to_all_transform_fn,
408
+ input_op,
409
+ target_max_block_size=target_max_block_size,
410
+ num_outputs=down_op._num_outputs,
411
+ # Transfer over the existing sub-progress bars from
412
+ # the AllToAllOperator (if any) into the fused operator.
413
+ sub_progress_bar_names=down_op._sub_progress_bar_names,
414
+ name=name,
415
+ )
416
+ # Bottom out at the source logical op (e.g. Read()).
417
+ input_op = up_logical_op
418
+
419
+ if isinstance(down_logical_op, RandomShuffle):
420
+ logical_op = RandomShuffle(
421
+ input_op,
422
+ name=name,
423
+ ray_remote_args=ray_remote_args,
424
+ )
425
+ elif isinstance(down_logical_op, Repartition):
426
+ logical_op = Repartition(
427
+ input_op,
428
+ num_outputs=down_logical_op._num_outputs,
429
+ shuffle=down_logical_op._shuffle,
430
+ )
431
+ self._op_map[op] = logical_op
432
+ # Return the fused physical operator.
433
+ return op
434
+
435
+
436
+ def _are_remote_args_compatible(prev_args, next_args):
437
+ """Check if Ray remote arguments are compatible for merging."""
438
+ prev_args = _canonicalize(prev_args)
439
+ next_args = _canonicalize(next_args)
440
+ remote_args = next_args.copy()
441
+ for key in INHERITABLE_REMOTE_ARGS:
442
+ # NOTE: We only carry over inheritable value in case
443
+ # of it not being provided in the remote args
444
+ if key in prev_args and key not in remote_args:
445
+ remote_args[key] = prev_args[key]
446
+
447
+ if prev_args != remote_args:
448
+ return False
449
+ return True
450
+
451
+
452
+ def _canonicalize(remote_args: dict) -> dict:
453
+ """Returns canonical form of given remote args."""
454
+ remote_args = remote_args.copy()
455
+ if "num_cpus" not in remote_args or remote_args["num_cpus"] is None:
456
+ remote_args["num_cpus"] = 1
457
+ if "num_gpus" not in remote_args or remote_args["num_gpus"] is None:
458
+ remote_args["num_gpus"] = 0
459
+ resources = remote_args.get("resources", {})
460
+ for k, v in list(resources.items()):
461
+ if v is None or v == 0.0:
462
+ del resources[k]
463
+ remote_args["resources"] = resources
464
+ return remote_args
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ from collections import deque
3
+
4
+ from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule
5
+ from ray.data._internal.logical.operators.all_to_all_operator import (
6
+ AbstractAllToAll,
7
+ RandomizeBlocks,
8
+ )
9
+
10
+
11
+ class ReorderRandomizeBlocksRule(Rule):
12
+ """Rule for reordering RandomizeBlocks logical operator.
13
+
14
+ Reordering RandomizeBlocks operators is to help fuse multiple
15
+ AbstractUDFMap operators together for better performance.
16
+
17
+ 1. Dedupes multiple RandomizeBlocks operators if they are not seeded.
18
+ 2. Moves RandomizeBlocks operator to the end of a sequence of AbstractUDFMap
19
+ operators. RandomizeBlocks operators are not moved across AbstractAllToAll operator
20
+ boundaries.
21
+ """
22
+
23
+ def apply(self, plan: LogicalPlan) -> LogicalPlan:
24
+ optimized_dag: LogicalOperator = self._apply(plan.dag)
25
+ new_plan = LogicalPlan(dag=optimized_dag, context=plan.context)
26
+ return new_plan
27
+
28
+ def _apply(self, op: LogicalOperator) -> LogicalOperator:
29
+ operators = []
30
+
31
+ # Post-order traversal.
32
+ nodes = deque()
33
+ for node in op.post_order_iter():
34
+ nodes.appendleft(node)
35
+
36
+ while len(nodes) > 0:
37
+ current_op = nodes.pop()
38
+ upstream_ops = current_op.input_dependencies
39
+
40
+ # Iterate through all upstream ops, and remove all RandomizeBlocks
41
+ # operators.
42
+ for i in range(len(upstream_ops)):
43
+ if isinstance(upstream_ops[i], RandomizeBlocks):
44
+ # If no seeds are provided, then collapse into a single
45
+ # RandomizeBlocks operator.
46
+ current_seed = upstream_ops[i]._seed
47
+ if not operators or current_seed or operators[-1]._seed:
48
+ # We need to make a copy of the operator.
49
+ # Because the operator instance may be shared by multiple
50
+ # Datasets. We shouldn't modify it in place.
51
+ operators.append(copy.copy(upstream_ops[i]))
52
+
53
+ # Remove RandomizeBlocks operator from the dag and wire in new input
54
+ # dependencies.
55
+ assert len(upstream_ops[i].input_dependencies) == 1
56
+ upstream_ops[i] = upstream_ops[i].input_dependencies[0]
57
+ if isinstance(current_op, AbstractAllToAll) and not isinstance(
58
+ current_op, RandomizeBlocks
59
+ ):
60
+ # If this operator is a an AllToAll Operator, then insert
61
+ # RandomizeBlocks right before this operator rather than the end of the
62
+ # DAG.
63
+ # All-to-all operators can have only 1 input operator.
64
+ assert len(upstream_ops) == 1
65
+ input_op = upstream_ops[0]
66
+ for random_op in operators:
67
+ random_op._input_dependencies = [input_op]
68
+ input_op = random_op
69
+ upstream_ops[0] = input_op
70
+ operators = []
71
+
72
+ # Add RandomizeBlocks operator as the last operator in the DAG if necessary.
73
+ for random_op in operators:
74
+ random_op._input_dependencies = [op]
75
+ op = random_op
76
+
77
+ return op
infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import math
3
+ from typing import Optional, Tuple, Union
4
+
5
+ from ray import available_resources as ray_available_resources
6
+ from ray.data._internal.execution.interfaces import PhysicalOperator
7
+ from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer
8
+ from ray.data._internal.logical.interfaces import PhysicalPlan, Rule
9
+ from ray.data._internal.logical.operators.read_operator import Read
10
+ from ray.data._internal.util import _autodetect_parallelism
11
+ from ray.data.context import WARN_PREFIX, DataContext
12
+ from ray.data.datasource.datasource import Datasource, Reader
13
+
14
+ logger = logging.getLogger(__name__)
15
+
16
+
17
+ def compute_additional_split_factor(
18
+ datasource_or_legacy_reader: Union[Datasource, Reader],
19
+ parallelism: int,
20
+ mem_size: int,
21
+ target_max_block_size: int,
22
+ cur_additional_split_factor: Optional[int] = None,
23
+ ) -> Tuple[int, str, int, Optional[int]]:
24
+ ctx = DataContext.get_current()
25
+ detected_parallelism, reason, _ = _autodetect_parallelism(
26
+ parallelism, target_max_block_size, ctx, datasource_or_legacy_reader, mem_size
27
+ )
28
+ num_read_tasks = len(
29
+ datasource_or_legacy_reader.get_read_tasks(detected_parallelism)
30
+ )
31
+ expected_block_size = None
32
+ if mem_size:
33
+ expected_block_size = mem_size / num_read_tasks
34
+ logger.debug(
35
+ f"Expected in-memory size {mem_size}," f" block size {expected_block_size}"
36
+ )
37
+ size_based_splits = round(max(1, expected_block_size / target_max_block_size))
38
+ else:
39
+ size_based_splits = 1
40
+ if cur_additional_split_factor:
41
+ size_based_splits *= cur_additional_split_factor
42
+ logger.debug(f"Size based split factor {size_based_splits}")
43
+ estimated_num_blocks = num_read_tasks * size_based_splits
44
+ logger.debug(f"Blocks after size splits {estimated_num_blocks}")
45
+
46
+ available_cpu_slots = ray_available_resources().get("CPU", 1)
47
+ if (
48
+ parallelism != -1
49
+ and num_read_tasks >= available_cpu_slots * 4
50
+ and num_read_tasks >= 5000
51
+ ):
52
+ logger.warning(
53
+ f"{WARN_PREFIX} The requested number of read blocks of {parallelism} "
54
+ "is more than 4x the number of available CPU slots in the cluster of "
55
+ f"{available_cpu_slots}. This can "
56
+ "lead to slowdowns during the data reading phase due to excessive "
57
+ "task creation. Reduce the value to match with the available "
58
+ "CPU slots in the cluster, or set override_num_blocks to -1 for Ray Data "
59
+ "to automatically determine the number of read tasks blocks."
60
+ "You can ignore this message if the cluster is expected to autoscale."
61
+ )
62
+
63
+ # Add more output splitting for each read task if needed.
64
+ # TODO(swang): For parallelism=-1 (user did not explicitly set
65
+ # parallelism), and if the following operator produces much larger blocks,
66
+ # we should scale down the target max block size here instead of using
67
+ # splitting, which can have higher memory usage.
68
+ if estimated_num_blocks < detected_parallelism and estimated_num_blocks > 0:
69
+ k = math.ceil(detected_parallelism / estimated_num_blocks)
70
+ estimated_num_blocks = estimated_num_blocks * k
71
+ return detected_parallelism, reason, estimated_num_blocks, k
72
+
73
+ return detected_parallelism, reason, estimated_num_blocks, None
74
+
75
+
76
+ class SetReadParallelismRule(Rule):
77
+ """
78
+ This rule sets the read op's task parallelism based on the target block
79
+ size, the requested parallelism, the number of read files, and the
80
+ available resources in the cluster.
81
+
82
+ If the parallelism is lower than requested, this rule also sets a split
83
+ factor to split the output blocks of the read task, so that the following
84
+ operator will have the desired parallelism.
85
+ """
86
+
87
+ def apply(self, plan: PhysicalPlan) -> PhysicalPlan:
88
+ ops = [plan.dag]
89
+
90
+ while len(ops) > 0:
91
+ op = ops.pop(0)
92
+ if isinstance(op, InputDataBuffer):
93
+ continue
94
+ logical_op = plan.op_map[op]
95
+ if isinstance(logical_op, Read):
96
+ self._apply(op, logical_op)
97
+ ops += op.input_dependencies
98
+
99
+ return plan
100
+
101
+ def _apply(self, op: PhysicalOperator, logical_op: Read):
102
+ (
103
+ detected_parallelism,
104
+ reason,
105
+ estimated_num_blocks,
106
+ k,
107
+ ) = compute_additional_split_factor(
108
+ logical_op._datasource_or_legacy_reader,
109
+ logical_op._parallelism,
110
+ logical_op._mem_size,
111
+ op.actual_target_max_block_size,
112
+ op._additional_split_factor,
113
+ )
114
+
115
+ if logical_op._parallelism == -1:
116
+ assert reason != ""
117
+ logger.debug(
118
+ f"Using autodetected parallelism={detected_parallelism} "
119
+ f"for operator {logical_op.name} to satisfy {reason}."
120
+ )
121
+ logical_op.set_detected_parallelism(detected_parallelism)
122
+
123
+ if k is not None:
124
+ logger.debug(
125
+ f"To satisfy the requested parallelism of {detected_parallelism}, "
126
+ f"each read task output is split into {k} smaller blocks."
127
+ )
128
+
129
+ if k is not None:
130
+ op.set_additional_split_factor(k)
131
+
132
+ logger.debug(f"Estimated num output blocks {estimated_num_blocks}")