diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fcdd771fca1030e3c91d467d2e7590fa00eddd94 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/aggregate.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/aggregate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afbafc5a3cea8b25cc41fcf7a0b33e173d86aaf5 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/aggregate.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/block.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/block.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f78a5a5847ecf1c01c1935e42a7ddf61661bc98a Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/block.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4144134cdc5b2d9b1a7923e01de1cc91b48e6f8 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/context.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/exceptions.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1494dfca5805dfd47cdc95c0c9ba5ae35db9d0c Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/exceptions.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/grouped_data.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/grouped_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e724f41085122802080bf2d1f9559f0e07f46d2 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/grouped_data.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eec64939730f2495e0457fa9d1b47501d1718980 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/iterator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/preprocessor.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/preprocessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ae174b17fe9b5f05db1949fdb504783abb60a79 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/preprocessor.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/random_access_dataset.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/random_access_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f720f1442a5260c5af0019aace964b0d44624631 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/__pycache__/random_access_dataset.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92822490b22d6e5685260b7de96a38e11a694181 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py @@ -0,0 +1,16 @@ +from .logical_operator import LogicalOperator +from .logical_plan import LogicalPlan +from .operator import Operator +from .optimizer import Optimizer, Rule +from .physical_plan import PhysicalPlan +from .plan import Plan + +__all__ = [ + "LogicalOperator", + "LogicalPlan", + "Operator", + "Optimizer", + "PhysicalPlan", + "Plan", + "Rule", +] diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f61f3ad65c436c5741337a8a9f61caff50dfa8fd Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf288aab8cad0bdea8049923bf8ea3e7c6bcc8a4 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..84535706cd5080061b88aa675944dc80dcdc32bb --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py @@ -0,0 +1,79 @@ +from typing import TYPE_CHECKING, Iterator, List, Optional + +from .operator import Operator +from ray.data.block import BlockMetadata + +if TYPE_CHECKING: + from ray.data._internal.execution.interfaces import RefBundle + + +class LogicalOperator(Operator): + """Abstract class for logical operators. + + A logical operator describes transformation, and later is converted into + physical operator. + """ + + def __init__( + self, + name: str, + input_dependencies: List["LogicalOperator"], + num_outputs: Optional[int] = None, + ): + super().__init__( + name, + input_dependencies, + ) + for x in input_dependencies: + assert isinstance(x, LogicalOperator), x + self._num_outputs = num_outputs + + def estimated_num_outputs(self) -> Optional[int]: + """Returns the estimated number of blocks that + would be outputted by this logical operator. + + This method does not execute the plan, so it does not take into consideration + block splitting. This method only considers high-level block constraints like + `Dataset.repartition(num_blocks=X)`. A more accurate estimation can be given by + `PhysicalOperator.num_outputs_total()` during execution. + """ + if self._num_outputs is not None: + return self._num_outputs + elif len(self._input_dependencies) == 1: + return self._input_dependencies[0].estimated_num_outputs() + return None + + # Override the following 3 methods to correct type hints. + + @property + def input_dependencies(self) -> List["LogicalOperator"]: + return super().input_dependencies # type: ignore + + @property + def output_dependencies(self) -> List["LogicalOperator"]: + return super().output_dependencies # type: ignore + + def post_order_iter(self) -> Iterator["LogicalOperator"]: + return super().post_order_iter() # type: ignore + + def output_data(self) -> Optional[List["RefBundle"]]: + """The output data of this operator, or ``None`` if not known.""" + return None + + def aggregate_output_metadata(self) -> BlockMetadata: + """A ``BlockMetadata`` that represents the aggregate metadata of the outputs. + + This method is used by methods like :meth:`~ray.data.Dataset.schema` to + efficiently return metadata. + """ + return BlockMetadata(None, None, None, None, None) + + def is_lineage_serializable(self) -> bool: + """Returns whether the lineage of this operator can be serialized. + + An operator is lineage serializable if you can serialize it on one machine and + deserialize it on another without losing information. Operators that store + object references (e.g., ``InputData``) aren't lineage serializable because the + objects aren't available on the deserialized machine. + """ + return True diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..3e0196bb440bbdb02ff2b112e2b68d8d2be58088 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py @@ -0,0 +1,31 @@ +from typing import TYPE_CHECKING, List + +from .logical_operator import LogicalOperator +from .plan import Plan + +if TYPE_CHECKING: + from ray.data import DataContext + + +class LogicalPlan(Plan): + """The plan with a DAG of logical operators.""" + + def __init__(self, dag: LogicalOperator, context: "DataContext"): + super().__init__(context) + self._dag = dag + + @property + def dag(self) -> LogicalOperator: + """Get the DAG of logical operators.""" + return self._dag + + def sources(self) -> List[LogicalOperator]: + """List of operators that are sources for this plan's DAG.""" + # If an operator has no input dependencies, it's a source. + if not any(self._dag.input_dependencies): + return [self._dag] + + sources = [] + for op in self._dag.input_dependencies: + sources.extend(LogicalPlan(op, self._context).sources()) + return sources diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py new file mode 100644 index 0000000000000000000000000000000000000000..76a320ef815a23cb319146221d32c7be10e5be52 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py @@ -0,0 +1,58 @@ +from typing import Iterator, List + + +class Operator: + """Abstract class for operators. + + Operators live on the driver side of the Dataset only. + """ + + def __init__( + self, + name: str, + input_dependencies: List["Operator"], + ): + self._name = name + self._input_dependencies = input_dependencies + self._output_dependencies = [] + for x in input_dependencies: + assert isinstance(x, Operator), x + x._output_dependencies.append(self) + + @property + def name(self) -> str: + return self._name + + @property + def input_dependencies(self) -> List["Operator"]: + """List of operators that provide inputs for this operator.""" + assert hasattr( + self, "_input_dependencies" + ), "Operator.__init__() was not called." + return self._input_dependencies + + @property + def output_dependencies(self) -> List["Operator"]: + """List of operators that consume outputs from this operator.""" + assert hasattr( + self, "_output_dependencies" + ), "Operator.__init__() was not called." + return self._output_dependencies + + def post_order_iter(self) -> Iterator["Operator"]: + """Depth-first traversal of this operator and its input dependencies.""" + for op in self.input_dependencies: + yield from op.post_order_iter() + yield self + + def __repr__(self) -> str: + if self.input_dependencies: + out_str = ", ".join([str(x) for x in self.input_dependencies]) + out_str += " -> " + else: + out_str = "" + out_str += f"{self.__class__.__name__}[{self._name}]" + return out_str + + def __str__(self) -> str: + return repr(self) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a25a44afe624b2e3ad6182739f68ed59b5cf720 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py @@ -0,0 +1,29 @@ +from typing import List + +from .plan import Plan + + +class Rule: + """Abstract class for optimization rule.""" + + def apply(self, plan: Plan) -> Plan: + """Apply the optimization rule to the execution plan.""" + raise NotImplementedError + + +class Optimizer: + """Abstract class for optimizers. + + An optimizers transforms a DAG of operators with a list of predefined rules. + """ + + @property + def rules(self) -> List[Rule]: + """List of predefined rules for this optimizer.""" + raise NotImplementedError + + def optimize(self, plan: Plan) -> Plan: + """Optimize operators with a list of rules.""" + for rule in self.rules: + plan = rule.apply(plan) + return plan diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..29503831db85e7d87f1f044d7c910826fa970515 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py @@ -0,0 +1,34 @@ +from typing import TYPE_CHECKING, Dict + +from .logical_operator import LogicalOperator +from .plan import Plan + +if TYPE_CHECKING: + from ray.data import DataContext + from ray.data._internal.execution.interfaces import PhysicalOperator + + +class PhysicalPlan(Plan): + """The plan with a DAG of physical operators.""" + + def __init__( + self, + dag: "PhysicalOperator", + op_map: Dict["PhysicalOperator", LogicalOperator], + context: "DataContext", + ): + super().__init__(context) + self._dag = dag + self._op_map = op_map + + @property + def dag(self) -> "PhysicalOperator": + """Get the DAG of physical operators.""" + return self._dag + + @property + def op_map(self) -> Dict["PhysicalOperator", LogicalOperator]: + """ + Get a mapping from physical operators to their corresponding logical operator. + """ + return self._op_map diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..450c513184cfe8120df360e9c7050982dc3af4b7 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44094bdc3c8f198a6ee5943707808e014e5f8024 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/all_to_all_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32e00b13bb9e9078cf8dc949b5fb1243802ca153 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3db4b7133bfc7c8a4538570243d74ab733c54d06 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/from_operators.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d94c1b498eb83dc58fcafc2b3280eb14acf12924 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69a707afde8d792249fa1fd99b59fb475b2ff0e7 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/map_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f45f62a2333a27c29866e8a49850b64529b7af03 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48bf32ed4fcafedd7021cc59d65d9e8744bf258e Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22b1d3ff914395229f00c2d5fcd5bdaf3fc7548a Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66039a6cc369a3c4b0abc229c6ff6a4888a82e6f Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/write_operator.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..745103f0036fee1de56fcd0ef645aac438af33d6 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py @@ -0,0 +1,163 @@ +from typing import Any, Dict, List, Optional + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.planner.exchange.interfaces import ExchangeTaskSpec +from ray.data._internal.planner.exchange.shuffle_task_spec import ShuffleTaskSpec +from ray.data._internal.planner.exchange.sort_task_spec import SortKey, SortTaskSpec +from ray.data.aggregate import AggregateFn +from ray.data.block import BlockMetadata + + +class AbstractAllToAll(LogicalOperator): + """Abstract class for logical operators should be converted to physical + AllToAllOperator. + """ + + def __init__( + self, + name: str, + input_op: LogicalOperator, + num_outputs: Optional[int] = None, + sub_progress_bar_names: Optional[List[str]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + num_outputs: The number of expected output bundles outputted by this + operator. + ray_remote_args: Args to provide to ray.remote. + """ + super().__init__(name, [input_op], num_outputs) + self._num_outputs = num_outputs + self._ray_remote_args = ray_remote_args or {} + self._sub_progress_bar_names = sub_progress_bar_names + + +class RandomizeBlocks(AbstractAllToAll): + """Logical operator for randomize_block_order.""" + + def __init__( + self, + input_op: LogicalOperator, + seed: Optional[int] = None, + ): + super().__init__( + "RandomizeBlockOrder", + input_op, + ) + self._seed = seed + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class RandomShuffle(AbstractAllToAll): + """Logical operator for random_shuffle.""" + + def __init__( + self, + input_op: LogicalOperator, + name: str = "RandomShuffle", + seed: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + name, + input_op, + sub_progress_bar_names=[ + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ray_remote_args=ray_remote_args, + ) + self._seed = seed + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Repartition(AbstractAllToAll): + """Logical operator for repartition.""" + + def __init__( + self, + input_op: LogicalOperator, + num_outputs: int, + shuffle: bool, + ): + if shuffle: + sub_progress_bar_names = [ + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ] + else: + sub_progress_bar_names = [ + ShuffleTaskSpec.SPLIT_REPARTITION_SUB_PROGRESS_BAR_NAME, + ] + super().__init__( + "Repartition", + input_op, + num_outputs=num_outputs, + sub_progress_bar_names=sub_progress_bar_names, + ) + self._shuffle = shuffle + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Sort(AbstractAllToAll): + """Logical operator for sort.""" + + def __init__( + self, + input_op: LogicalOperator, + sort_key: SortKey, + batch_format: Optional[str] = "default", + ): + super().__init__( + "Sort", + input_op, + sub_progress_bar_names=[ + SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ) + self._sort_key = sort_key + self._batch_format = batch_format + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Aggregate(AbstractAllToAll): + """Logical operator for aggregate.""" + + def __init__( + self, + input_op: LogicalOperator, + key: Optional[str], + aggs: List[AggregateFn], + batch_format: Optional[str] = "default", + ): + super().__init__( + "Aggregate", + input_op, + sub_progress_bar_names=[ + SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ) + self._key = key + self._aggs = aggs + self._batch_format = batch_format diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..409c99e3c000698622be3f6294d419d1c59ff2d8 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py @@ -0,0 +1,20 @@ +from typing import List + +from ray.data._internal.logical.interfaces import LogicalOperator + + +class Count(LogicalOperator): + """Logical operator that represents counting the number of rows in inputs. + + Physical operators that implement this logical operator should produce one or more + rows with a single column named `Count.COLUMN_NAME`. When you sum the values in + this column, you should get the total number of rows in the dataset. + """ + + COLUMN_NAME = "__num_rows" + + def __init__( + self, + input_dependencies: List["LogicalOperator"], + ): + super().__init__("Count", input_dependencies) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..afe5e8200bb14040e712e4b2faef12e79516f8df --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py @@ -0,0 +1,105 @@ +import abc +import functools +from typing import TYPE_CHECKING, List, Optional, Union + +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import Block, BlockMetadata +from ray.types import ObjectRef + +if TYPE_CHECKING: + import pyarrow as pa + + ArrowTable = Union["pa.Table", bytes] + + +class AbstractFrom(LogicalOperator, metaclass=abc.ABCMeta): + """Abstract logical operator for `from_*`.""" + + def __init__( + self, + input_blocks: List[ObjectRef[Block]], + input_metadata: List[BlockMetadata], + ): + super().__init__(self.__class__.__name__, [], len(input_blocks)) + assert len(input_blocks) == len(input_metadata), ( + len(input_blocks), + len(input_metadata), + ) + # `owns_blocks` is False because this op may be shared by multiple Datasets. + self._input_data = [ + RefBundle([(input_blocks[i], input_metadata[i])], owns_blocks=False) + for i in range(len(input_blocks)) + ] + + @property + def input_data(self) -> List[RefBundle]: + return self._input_data + + def output_data(self) -> Optional[List[RefBundle]]: + return self._input_data + + def aggregate_output_metadata(self) -> BlockMetadata: + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=self._size_bytes(), + schema=self._schema(), + input_files=None, + exec_stats=None, + ) + + def _num_rows(self): + if all(bundle.num_rows() is not None for bundle in self._input_data): + return sum(bundle.num_rows() for bundle in self._input_data) + else: + return None + + def _size_bytes(self): + metadata = [m for bundle in self._input_data for m in bundle.metadata] + if all(m.size_bytes is not None for m in metadata): + return sum(m.size_bytes for m in metadata) + else: + return None + + def _schema(self): + metadata = [m for bundle in self._input_data for m in bundle.metadata] + return unify_block_metadata_schema(metadata) + + def is_lineage_serializable(self) -> bool: + # This operator isn't serializable because it contains ObjectRefs. + return False + + +class FromItems(AbstractFrom): + """Logical operator for `from_items`.""" + + pass + + +class FromBlocks(AbstractFrom): + """Logical operator for `from_blocks`.""" + + pass + + +class FromNumpy(AbstractFrom): + """Logical operator for `from_numpy`.""" + + pass + + +class FromArrow(AbstractFrom): + """Logical operator for `from_arrow`.""" + + pass + + +class FromPandas(AbstractFrom): + """Logical operator for `from_pandas`.""" + + pass diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..2296b0ee315441bcdb0a3acd49791576aea3e191 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py @@ -0,0 +1,74 @@ +import functools +from typing import Callable, List, Optional + +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import BlockMetadata + + +class InputData(LogicalOperator): + """Logical operator for input data. + + This may hold cached blocks from a previous Dataset execution, or + the arguments for read tasks. + """ + + def __init__( + self, + input_data: Optional[List[RefBundle]] = None, + input_data_factory: Optional[Callable[[int], List[RefBundle]]] = None, + ): + assert (input_data is None) != ( + input_data_factory is None + ), "Only one of input_data and input_data_factory should be set." + super().__init__( + "InputData", [], len(input_data) if input_data is not None else None + ) + self.input_data = input_data + self.input_data_factory = input_data_factory + + def output_data(self) -> Optional[List[RefBundle]]: + if self.input_data is None: + return None + return self.input_data + + def aggregate_output_metadata(self) -> BlockMetadata: + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + if self.input_data is None: + return BlockMetadata(None, None, None, None, None) + + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=self._size_bytes(), + schema=self._schema(), + input_files=None, + exec_stats=None, + ) + + def _num_rows(self): + assert self.input_data is not None + if all(bundle.num_rows() is not None for bundle in self.input_data): + return sum(bundle.num_rows() for bundle in self.input_data) + else: + return None + + def _size_bytes(self): + assert self.input_data is not None + metadata = [m for bundle in self.input_data for m in bundle.metadata] + if all(m.size_bytes is not None for m in metadata): + return sum(m.size_bytes for m in metadata) + else: + return None + + def _schema(self): + assert self.input_data is not None + metadata = [m for bundle in self.input_data for m in bundle.metadata] + return unify_block_metadata_schema(metadata) + + def is_lineage_serializable(self) -> bool: + # This operator isn't serializable because it contains ObjectRefs. + return False diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..fd7ad71e5a885bf512b08a247de4c51346ca9de5 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py @@ -0,0 +1,293 @@ +import inspect +import logging +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +from ray.data._internal.compute import ComputeStrategy, TaskPoolStrategy +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.one_to_one_operator import AbstractOneToOne +from ray.data.block import UserDefinedFunction +from ray.data.context import DEFAULT_BATCH_SIZE +from ray.data.preprocessor import Preprocessor + +logger = logging.getLogger(__name__) + + +class AbstractMap(AbstractOneToOne): + """Abstract class for logical operators that should be converted to physical + MapOperator. + """ + + def __init__( + self, + name: str, + input_op: Optional[LogicalOperator] = None, + num_outputs: Optional[int] = None, + *, + min_rows_per_bundled_input: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + min_rows_per_bundled_input: The target number of rows to pass to + ``MapOperator._add_bundled_input()``. + ray_remote_args: Args to provide to ray.remote. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + """ + super().__init__(name, input_op, num_outputs) + self._min_rows_per_bundled_input = min_rows_per_bundled_input + self._ray_remote_args = ray_remote_args or {} + self._ray_remote_args_fn = ray_remote_args_fn + + +class AbstractUDFMap(AbstractMap): + """Abstract class for logical operators performing a UDF that should be converted + to physical MapOperator. + """ + + def __init__( + self, + name: str, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + min_rows_per_bundled_input: Optional[int] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + fn: User-defined function to be called. + fn_args: Arguments to `fn`. + fn_kwargs: Keyword arguments to `fn`. + fn_constructor_args: Arguments to provide to the initializor of `fn` if + `fn` is a callable class. + fn_constructor_kwargs: Keyword Arguments to provide to the initializor of + `fn` if `fn` is a callable class. + min_rows_per_bundled_input: The target number of rows to pass to + ``MapOperator._add_bundled_input()``. + compute: The compute strategy, either ``"tasks"`` (default) to use Ray + tasks, or ``"actors"`` to use an autoscaling actor pool. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + ray_remote_args: Args to provide to ray.remote. + """ + name = self._get_operator_name(name, fn) + super().__init__( + name, + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ) + self._fn = fn + self._fn_args = fn_args + self._fn_kwargs = fn_kwargs + self._fn_constructor_args = fn_constructor_args + self._fn_constructor_kwargs = fn_constructor_kwargs + self._compute = compute or TaskPoolStrategy() + self._ray_remote_args_fn = ray_remote_args_fn + + def _get_operator_name(self, op_name: str, fn: UserDefinedFunction): + """Gets the Operator name including the map `fn` UDF name.""" + # If the input `fn` is a Preprocessor, the + # name is simply the name of the Preprocessor class. + if inspect.ismethod(fn) and isinstance(fn.__self__, Preprocessor): + return fn.__self__.__class__.__name__ + + # Otherwise, it takes the form of `()`, + # e.g. `MapBatches(my_udf)`. + try: + if inspect.isclass(fn): + # callable class + return f"{op_name}({fn.__name__})" + elif inspect.ismethod(fn): + # class method + return f"{op_name}({fn.__self__.__class__.__name__}.{fn.__name__})" + elif inspect.isfunction(fn): + # normal function or lambda function. + return f"{op_name}({fn.__name__})" + else: + # callable object. + return f"{op_name}({fn.__class__.__name__})" + except AttributeError as e: + logger.error("Failed to get name of UDF %s: %s", fn, e) + return "" + + +class MapBatches(AbstractUDFMap): + """Logical operator for map_batches.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + batch_size: Optional[int] = DEFAULT_BATCH_SIZE, + batch_format: str = "default", + zero_copy_batch: bool = False, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + min_rows_per_bundled_input: Optional[int] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "MapBatches", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + min_rows_per_bundled_input=min_rows_per_bundled_input, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + self._batch_size = batch_size + self._batch_format = batch_format + self._zero_copy_batch = zero_copy_batch + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class MapRows(AbstractUDFMap): + """Logical operator for map.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "Map", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class Filter(AbstractUDFMap): + """Logical operator for filter.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "Filter", + input_op, + fn, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return True + + +class Project(AbstractMap): + """Logical operator for select_columns.""" + + def __init__( + self, + input_op: LogicalOperator, + cols: List[str], + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__("Project", input_op=input_op, ray_remote_args=ray_remote_args) + self._compute = compute + self._batch_size = DEFAULT_BATCH_SIZE + self._cols = cols + self._batch_format = "pyarrow" + self._zero_copy_batch = True + + @property + def cols(self) -> List[str]: + return self._cols + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class FlatMap(AbstractUDFMap): + """Logical operator for flat_map.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "FlatMap", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return True diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f062578e2c1d1f31b42fe5f96dfbae6cd13654 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py @@ -0,0 +1,60 @@ +from typing import Optional + +from ray.data._internal.logical.interfaces import LogicalOperator + + +class NAry(LogicalOperator): + """Base class for n-ary operators, which take multiple input operators.""" + + def __init__( + self, + *input_ops: LogicalOperator, + num_outputs: Optional[int] = None, + ): + """ + Args: + input_ops: The input operators. + """ + super().__init__(self.__class__.__name__, list(input_ops), num_outputs) + + +class Zip(NAry): + """Logical operator for zip.""" + + def __init__( + self, + left_input_op: LogicalOperator, + right_input_op: LogicalOperator, + ): + """ + Args: + left_input_ops: The input operator at left hand side. + right_input_op: The input operator at right hand side. + """ + super().__init__(left_input_op, right_input_op) + + def estimated_num_outputs(self): + left_num_outputs = self._input_dependencies[0].estimated_num_outputs() + right_num_outputs = self._input_dependencies[1].estimated_num_outputs() + if left_num_outputs is None or right_num_outputs is None: + return None + return max(left_num_outputs, right_num_outputs) + + +class Union(NAry): + """Logical operator for union.""" + + def __init__( + self, + *input_ops: LogicalOperator, + ): + super().__init__(*input_ops) + + def estimated_num_outputs(self): + total_num_outputs = 0 + for input in self._input_dependencies: + num_outputs = input.estimated_num_outputs() + if num_outputs is None: + return None + total_num_outputs += num_outputs + return total_num_outputs diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..052d0b23ecda047b1876c8ca6adfa9bf6fd8dfa5 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py @@ -0,0 +1,80 @@ +import abc +from typing import Optional + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data.block import BlockMetadata + + +class AbstractOneToOne(LogicalOperator): + """Abstract class for one-to-one logical operators, which + have one input and one output dependency. + """ + + def __init__( + self, + name: str, + input_op: Optional[LogicalOperator], + num_outputs: Optional[int] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + """ + super().__init__(name, [input_op] if input_op else [], num_outputs) + + @property + def input_dependency(self) -> LogicalOperator: + return self._input_dependencies[0] + + @property + @abc.abstractmethod + def can_modify_num_rows(self) -> bool: + """Whether this operator can modify the number of rows, + i.e. number of input rows != number of output rows.""" + + +class Limit(AbstractOneToOne): + """Logical operator for limit.""" + + def __init__( + self, + input_op: LogicalOperator, + limit: int, + ): + super().__init__( + f"limit={limit}", + input_op, + ) + self._limit = limit + + @property + def can_modify_num_rows(self) -> bool: + return True + + def aggregate_output_metadata(self) -> BlockMetadata: + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=None, + schema=self._schema(), + input_files=self._input_files(), + exec_stats=None, + ) + + def _schema(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata().schema + + def _num_rows(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + input_rows = self._input_dependencies[0].aggregate_output_metadata().num_rows + if input_rows is not None: + return min(input_rows, self._limit) + else: + return None + + def _input_files(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata().input_files diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..5d958dbc59fbd5c0c09312d2c50a59de9f689d41 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py @@ -0,0 +1,95 @@ +import functools +from typing import Any, Dict, Optional, Union + +from ray.data._internal.logical.operators.map_operator import AbstractMap +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import BlockMetadata +from ray.data.datasource.datasource import Datasource, Reader + + +class Read(AbstractMap): + """Logical operator for read.""" + + def __init__( + self, + datasource: Datasource, + datasource_or_legacy_reader: Union[Datasource, Reader], + parallelism: int, + mem_size: Optional[int], + num_outputs: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + ): + super().__init__( + f"Read{datasource.get_name()}", + None, + num_outputs, + ray_remote_args=ray_remote_args, + ) + self._datasource = datasource + self._datasource_or_legacy_reader = datasource_or_legacy_reader + self._parallelism = parallelism + self._mem_size = mem_size + self._concurrency = concurrency + self._detected_parallelism = None + + def set_detected_parallelism(self, parallelism: int): + """ + Set the true parallelism that should be used during execution. This + should be specified by the user or detected by the optimizer. + """ + self._detected_parallelism = parallelism + + def get_detected_parallelism(self) -> int: + """ + Get the true parallelism that should be used during execution. + """ + return self._detected_parallelism + + def aggregate_output_metadata(self) -> BlockMetadata: + """A ``BlockMetadata`` that represents the aggregate metadata of the outputs. + + This method gets metadata from the read tasks. It doesn't trigger any actual + execution. + """ + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + # Legacy datasources might not implement `get_read_tasks`. + if self._datasource.should_create_reader: + return BlockMetadata(None, None, None, None, None) + + # HACK: Try to get a single read task to get the metadata. + read_tasks = self._datasource.get_read_tasks(1) + if len(read_tasks) == 0: + # If there are no read tasks, the dataset is probably empty. + return BlockMetadata(None, None, None, None, None) + + # `get_read_tasks` isn't guaranteed to return exactly one read task. + metadata = [read_task.metadata for read_task in read_tasks] + + if all(meta.num_rows is not None for meta in metadata): + num_rows = sum(meta.num_rows for meta in metadata) + else: + num_rows = None + + if all(meta.size_bytes is not None for meta in metadata): + size_bytes = sum(meta.size_bytes for meta in metadata) + else: + size_bytes = None + + schema = unify_block_metadata_schema(metadata) + + input_files = [] + for meta in metadata: + if meta.input_files is not None: + input_files.extend(meta.input_files) + + return BlockMetadata( + num_rows=num_rows, + size_bytes=size_bytes, + schema=schema, + input_files=input_files, + exec_stats=None, + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..cee1930b788f9ddc8bcead1150168f635a374658 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py @@ -0,0 +1,35 @@ +from typing import Any, Dict, Optional, Union + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.map_operator import AbstractMap +from ray.data.datasource.datasink import Datasink +from ray.data.datasource.datasource import Datasource + + +class Write(AbstractMap): + """Logical operator for write.""" + + def __init__( + self, + input_op: LogicalOperator, + datasink_or_legacy_datasource: Union[Datasink, Datasource], + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + **write_args, + ): + if isinstance(datasink_or_legacy_datasource, Datasink): + min_rows_per_bundled_input = ( + datasink_or_legacy_datasource.num_rows_per_write + ) + else: + min_rows_per_bundled_input = None + + super().__init__( + "Write", + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ) + self._datasink_or_legacy_datasource = datasink_or_legacy_datasource + self._write_args = write_args + self._concurrency = concurrency diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7ea8366e4b2211360e0644b966056ed3d4bf3e3 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0d0cb0e850c8b292e027ee67172c003863dd9db Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc2955e704f0a0253da301d73181d283d25cc07e Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f0cf6e53ff65f6b91bba3accb7b713483ae46d Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c1818316e957ea7a26961ba9b743b0f6be0e30b Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b946362267c32d81f52e0cb365dae4ea2298af5 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..128affd6c03f2f4fa50024bac13bbadbbe06ad38 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41b7b5a7f9d1b0c2d83aa9f7dc6e2a2fb5fb0f7e Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py new file mode 100644 index 0000000000000000000000000000000000000000..298ff6c4edbff9cfab6ea14418dc61b81c93e1e8 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py @@ -0,0 +1,30 @@ +from typing import Optional + +from ray.data._internal.execution.interfaces import PhysicalOperator +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule + + +class InheritTargetMaxBlockSizeRule(Rule): + """For each op that has overridden the default target max block size, + propagate to upstream ops until we reach an op that has also overridden the + target max block size.""" + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._propagate_target_max_block_size_to_upstream_ops(plan.dag) + return plan + + def _propagate_target_max_block_size_to_upstream_ops( + self, dag: PhysicalOperator, target_max_block_size: Optional[int] = None + ): + if dag.target_max_block_size is not None: + # Set the target block size to inherit for + # upstream ops. + target_max_block_size = dag.target_max_block_size + elif target_max_block_size is not None: + # Inherit from downstream op. + dag.set_target_max_block_size(target_max_block_size) + + for upstream_op in dag.input_dependencies: + self._propagate_target_max_block_size_to_upstream_ops( + upstream_op, target_max_block_size + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py new file mode 100644 index 0000000000000000000000000000000000000000..a13378eb991eb6bdc7e389cf6842ae3df91e66a2 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py @@ -0,0 +1,133 @@ +import copy +from collections import deque +from typing import Iterable, List + +from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule +from ray.data._internal.logical.operators.one_to_one_operator import ( + AbstractOneToOne, + Limit, +) +from ray.data._internal.logical.operators.read_operator import Read + + +class LimitPushdownRule(Rule): + """Rule for pushing down the limit operator. + + When a limit operator is present, we apply the limit on the + most upstream operator that supports it. Notably, we move the + Limit operator downstream from Read op, any other non-OneToOne operator, + or any operator which could potentially change the number of output rows. + + In addition, we also fuse consecutive Limit operators into a single + Limit operator, i.e. `Limit[n] -> Limit[m]` becomes `Limit[min(n, m)]`. + """ + + def apply(self, plan: LogicalPlan) -> LogicalPlan: + optimized_dag = self._apply_limit_pushdown(plan.dag) + optimized_dag = self._apply_limit_fusion(optimized_dag) + return LogicalPlan(dag=optimized_dag, context=plan.context) + + def _apply_limit_pushdown(self, op: LogicalOperator) -> LogicalOperator: + """Given a DAG of LogicalOperators, traverse the DAG and push down + Limit operators, i.e. move Limit operators as far upstream as possible. + + Returns a new LogicalOperator with the Limit operators pushed down.""" + # Post-order traversal. + nodes: Iterable[LogicalOperator] = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + + # If we encounter a Limit op, move it upstream until it reaches: + # - Read operator + # - A non-AbstractOneToOne operator (e.g. AbstractAllToAll) + # - An AbstractOneToOne operator that could change the number of output rows + + # TODO(scottjlee): in our current abstraction, we have Read extend + # AbstractMap (with no input dependency), which extends AbstractOneToOne. + # So we have to explicitly separate the Read op in its own check. + # We should remove this case once we refactor Read op to no longer + # be an AbstractOneToOne op. + if isinstance(current_op, Limit): + limit_op_copy = copy.copy(current_op) + + # Traverse up the DAG until we reach the first operator that meets + # one of the conditions above, which will serve as the new input + # into the Limit operator. + new_input_into_limit = current_op.input_dependency + ops_between_new_input_and_limit: List[LogicalOperator] = [] + while ( + isinstance(new_input_into_limit, AbstractOneToOne) + and not isinstance(new_input_into_limit, Read) + and not getattr(new_input_into_limit, "can_modify_num_rows", False) + ): + new_input_into_limit_copy = copy.copy(new_input_into_limit) + ops_between_new_input_and_limit.append(new_input_into_limit_copy) + new_input_into_limit = new_input_into_limit.input_dependency + + # Link the Limit operator and its newly designated input op from above. + limit_op_copy._input_dependencies = [new_input_into_limit] + new_input_into_limit._output_dependencies = [limit_op_copy] + + # Build the chain of operator dependencies between the new + # input and the Limit operator, using copies of traversed operators. + ops_between_new_input_and_limit.append(limit_op_copy) + for idx in range(len(ops_between_new_input_and_limit) - 1): + curr_op, up_op = ( + ops_between_new_input_and_limit[idx], + ops_between_new_input_and_limit[idx + 1], + ) + curr_op._input_dependencies = [up_op] + up_op._output_dependencies = [curr_op] + # Add the copied operator to the list of nodes to be traversed. + nodes.append(curr_op) + + # Link the Limit operator to its new input operator. + for limit_output_op in current_op.output_dependencies: + limit_output_op._input_dependencies = [ + ops_between_new_input_and_limit[0] + ] + last_op = ops_between_new_input_and_limit[0] + last_op._output_dependencies = current_op.output_dependencies + + return current_op + + def _apply_limit_fusion(self, op: LogicalOperator) -> LogicalOperator: + """Given a DAG of LogicalOperators, traverse the DAG and fuse all + back-to-back Limit operators, i.e. + Limit[n] -> Limit[m] becomes Limit[min(n, m)]. + + Returns a new LogicalOperator with the Limit operators fusion applied.""" + + # Post-order traversal. + nodes: Iterable[LogicalOperator] = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + + # If we encounter two back-to-back Limit operators, fuse them. + if isinstance(current_op, Limit): + upstream_op = current_op.input_dependency + if isinstance(upstream_op, Limit): + new_limit = min(current_op._limit, upstream_op._limit) + fused_limit_op = Limit(upstream_op.input_dependency, new_limit) + + # Link the fused Limit operator to its input and output ops, i.e.: + # `upstream_input -> limit_upstream -> limit_downstream -> downstream_output` # noqa: E501 + # becomes `upstream_input -> fused_limit -> downstream_output` + fused_limit_op._input_dependencies = upstream_op.input_dependencies + fused_limit_op._output_dependencies = current_op.output_dependencies + + # Replace occurrences of the upstream Limit operator in + # output_dependencies with the newly fused Limit operator. + upstream_input = upstream_op.input_dependency + upstream_input._output_dependencies = [fused_limit_op] + + for current_output in current_op.output_dependencies: + current_output._input_dependencies = [fused_limit_op] + nodes.append(fused_limit_op) + return current_op diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..fef82a3b1f935a2b989c199c9888cc69abe12ec5 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py @@ -0,0 +1,464 @@ +from typing import List, Optional, Tuple + +# TODO(Clark): Remove compute dependency once we delete the legacy compute. +from ray.data._internal.compute import get_compute, is_task_compute +from ray.data._internal.execution.interfaces import ( + PhysicalOperator, + RefBundle, + TaskContext, +) +from ray.data._internal.execution.operators.actor_pool_map_operator import ( + ActorPoolMapOperator, +) +from ray.data._internal.execution.operators.base_physical_operator import ( + AllToAllOperator, +) +from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.execution.operators.task_pool_map_operator import ( + TaskPoolMapOperator, +) +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule +from ray.data._internal.logical.operators.all_to_all_operator import ( + AbstractAllToAll, + RandomShuffle, + Repartition, +) +from ray.data._internal.logical.operators.map_operator import AbstractUDFMap +from ray.data._internal.stats import StatsDict +from ray.data.context import DataContext + +# Scheduling strategy can be inherited from upstream operator if not specified. +INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"] + + +class OperatorFusionRule(Rule): + """Fuses linear chains of compatible physical operators.""" + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._op_map = plan.op_map.copy() + # Do DFS fusion on compatible pairwise operators in two passes. + # In the first pass, only fuse back-to-back map operators together. + fused_dag = self._fuse_map_operators_in_dag(plan.dag) + + # Now that we have fused together all back-to-back map operators, + # we fuse together MapOperator -> AllToAllOperator pairs. + fused_dag = self._fuse_all_to_all_operators_in_dag(fused_dag) + + # Update output dependencies after fusion. + # TODO(hchen): Instead of updating the depdencies manually, + # we need a better abstraction for manipulating the DAG. + self._remove_output_depes(fused_dag) + self._update_output_depes(fused_dag) + + new_plan = PhysicalPlan(fused_dag, self._op_map, plan.context) + return new_plan + + def _remove_output_depes(self, op: PhysicalOperator) -> None: + for input in op._input_dependencies: + input._output_dependencies = [] + self._remove_output_depes(input) + + def _update_output_depes(self, op: PhysicalOperator) -> None: + for input in op._input_dependencies: + input._output_dependencies.append(op) + self._update_output_depes(input) + + def _fuse_map_operators_in_dag(self, dag: PhysicalOperator) -> MapOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> MapOperator pairs. + Returns the current (root) operator after completing upstream operator fusions. + """ + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, MapOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): + # Fuse operator with its upstream op. + dag = self._get_fused_map_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing back-to-back map operators together here, + # move up the DAG to find the next map operators to fuse. + dag._input_dependencies = [ + self._fuse_map_operators_in_dag(upstream_op) for upstream_op in upstream_ops + ] + return dag + + def _fuse_all_to_all_operators_in_dag( + self, dag: AllToAllOperator + ) -> AllToAllOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> AllToAllOperator pairs. + + Also, sets the target block size of the immediately upstream map op to + match the shuffle block size. We use a larger block size for shuffles + because tiny blocks are bad for I/O performance. + + Returns the current (root) operator after completing upstream operator fusions. + """ + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, AllToAllOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): + # Fuse operator with its upstream op. + dag = self._get_fused_all_to_all_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing MapOperator -> AllToAllOperator together here, + # move up the DAG to find the next pair of operators to fuse. + dag._input_dependencies = [ + self._fuse_all_to_all_operators_in_dag(upstream_op) + for upstream_op in upstream_ops + ] + return dag + + def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: + """Returns whether the provided downstream operator can be fused with the given + upstream operator. + + We currently support fusing two operators if the following are all true: + * We are fusing either MapOperator -> MapOperator or + MapOperator -> AllToAllOperator. + * They either use the same compute configuration, or the upstream operator + uses a task pool while the downstream operator uses an actor pool. + * If both operators involve callable classes, the callable classes are + the same class AND constructor args are the same for both. + * They have compatible remote arguments. + """ + from ray.data._internal.logical.operators.map_operator import ( + AbstractMap, + AbstractUDFMap, + ) + + if not up_op.supports_fusion() or not down_op.supports_fusion(): + return False + + # We currently only support fusing for the following cases: + # - TaskPoolMapOperator -> TaskPoolMapOperator/ActorPoolMapOperator + # - TaskPoolMapOperator -> AllToAllOperator + # (only RandomShuffle and Repartition LogicalOperators are currently supported) + if not ( + ( + isinstance(up_op, TaskPoolMapOperator) + and isinstance(down_op, (TaskPoolMapOperator, ActorPoolMapOperator)) + ) + or ( + isinstance(up_op, TaskPoolMapOperator) + and isinstance(down_op, AllToAllOperator) + ) + ): + return False + + down_logical_op = self._op_map[down_op] + up_logical_op = self._op_map[up_op] + + if up_op.get_additional_split_factor() > 1: + return False + + # If the downstream operator takes no input, it cannot be fused with + # the upstream operator. + if not down_logical_op._input_dependencies: + return False + + # We currently only support fusing for the following cases: + # - AbstractMap -> AbstractMap + # - AbstractMap -> RandomShuffle + # - AbstractMap -> Repartition (shuffle=True) + if not ( + ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, AbstractMap) + ) + or ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, RandomShuffle) + ) + or ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, Repartition) + ) + ): + return False + + # Do not fuse Repartition operator if shuffle is disabled + # (i.e. using split shuffle). + if isinstance(down_logical_op, Repartition) and not down_logical_op._shuffle: + return False + + if isinstance(down_logical_op, AbstractUDFMap) and isinstance( + up_logical_op, AbstractUDFMap + ): + # Allow fusing tasks->actors if the resources are compatible (read->map), + # but not the other way around. The latter (downstream op) will be used as + # the compute if fused. + if is_task_compute(down_logical_op._compute) and get_compute( + up_logical_op._compute + ) != get_compute(down_logical_op._compute): + return False + + # Only fuse if the ops' remote arguments are compatible. + if not _are_remote_args_compatible( + getattr(up_logical_op, "_ray_remote_args", {}), + getattr(down_logical_op, "_ray_remote_args", {}), + ): + return False + + # Do not fuse if either op specifies a `_ray_remote_args_fn`, + # since it is not known whether the generated args will be compatible. + if getattr(up_logical_op, "_ray_remote_args_fn", None) or getattr( + down_logical_op, "_ray_remote_args_fn", None + ): + return False + + if not self._can_merge_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ): + return False + + # Otherwise, ops are compatible for fusion. + return True + + def _can_merge_target_max_block_size( + self, + up_target_max_block_size: Optional[int], + down_target_max_block_size: Optional[int], + ): + # If the upstream op overrode the target max block size, only fuse if + # they are equal. + if up_target_max_block_size is not None: + if down_target_max_block_size is None: + down_target_max_block_size = ( + DataContext.get_current().target_max_block_size + ) + if up_target_max_block_size != down_target_max_block_size: + return False + return True + + def _get_merged_target_max_block_size( + self, + up_target_max_block_size: Optional[int], + down_target_max_block_size: Optional[int], + ): + if up_target_max_block_size is not None: + # If the upstream op overrode the target max block size, we can + # only merge if the downstream op matches or uses the default. + assert ( + down_target_max_block_size is None + or down_target_max_block_size == up_target_max_block_size + ) + return up_target_max_block_size + else: + # Upstream op inherits the downstream op's target max block size, + # because the downstream op is the one that outputs the final + # blocks. + return down_target_max_block_size + + def _get_fused_map_operator( + self, down_op: MapOperator, up_op: MapOperator + ) -> MapOperator: + from ray.data._internal.logical.operators.map_operator import AbstractMap + + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator->MapOperator, but received: " + f"{type(up_op).__name__} -> {type(down_op).__name__}" + ) + + # Fuse operator names. + name = up_op.name + "->" + down_op.name + + down_logical_op = self._op_map.pop(down_op) + up_logical_op = self._op_map.pop(up_op) + + # Merge minimum block sizes. + down_min_rows_per_bundled_input = ( + down_logical_op._min_rows_per_bundled_input + if isinstance(down_logical_op, AbstractMap) + else None + ) + up_min_rows_per_bundled_input = ( + up_logical_op._min_rows_per_bundled_input + if isinstance(up_logical_op, AbstractMap) + else None + ) + if ( + down_min_rows_per_bundled_input is not None + and up_min_rows_per_bundled_input is not None + ): + min_rows_per_bundled_input = max( + down_min_rows_per_bundled_input, up_min_rows_per_bundled_input + ) + elif up_min_rows_per_bundled_input is not None: + min_rows_per_bundled_input = up_min_rows_per_bundled_input + else: + min_rows_per_bundled_input = down_min_rows_per_bundled_input + + target_max_block_size = self._get_merged_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ) + + # We take the downstream op's compute in case we're fusing upstream tasks with a + # downstream actor pool (e.g. read->map). + compute = None + if isinstance(down_logical_op, AbstractUDFMap): + compute = get_compute(down_logical_op._compute) + ray_remote_args = up_logical_op._ray_remote_args + ray_remote_args_fn = ( + up_logical_op._ray_remote_args_fn or down_logical_op._ray_remote_args_fn + ) + # Make the upstream operator's inputs the new, fused operator's inputs. + input_deps = up_op.input_dependencies + assert len(input_deps) == 1 + input_op = input_deps[0] + + # Fused physical map operator. + op = MapOperator.create( + up_op.get_map_transformer().fuse(down_op.get_map_transformer()), + input_op, + target_max_block_size=target_max_block_size, + name=name, + compute_strategy=compute, + min_rows_per_bundle=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ray_remote_args_fn=ray_remote_args_fn, + ) + op.set_logical_operators(*up_op._logical_operators, *down_op._logical_operators) + + # Build a map logical operator to be used as a reference for further fusion. + # TODO(Scott): This is hacky, remove this once we push fusion to be purely based + # on a lower-level operator spec. + if isinstance(up_logical_op, AbstractUDFMap): + input_op = up_logical_op.input_dependency + else: + # Bottom out at the source logical op (e.g. Read()). + input_op = up_logical_op + if isinstance(down_logical_op, AbstractUDFMap): + logical_op = AbstractUDFMap( + name, + input_op, + down_logical_op._fn, + down_logical_op._fn_args, + down_logical_op._fn_kwargs, + down_logical_op._fn_constructor_args, + down_logical_op._fn_constructor_kwargs, + min_rows_per_bundled_input, + compute, + ray_remote_args_fn, + ray_remote_args, + ) + else: + from ray.data._internal.logical.operators.map_operator import AbstractMap + + # The downstream op is AbstractMap instead of AbstractUDFMap. + logical_op = AbstractMap( + name, + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + self._op_map[op] = logical_op + # Return the fused physical operator. + return op + + def _get_fused_all_to_all_operator( + self, down_op: AllToAllOperator, up_op: MapOperator + ) -> AllToAllOperator: + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator -> AllToAllOperator" + f", but received: {type(up_op).__name__} -> {type(down_op).__name__}" + ) + + # Fuse operator names. + name = up_op.name + "->" + down_op.name + + down_logical_op: AbstractAllToAll = self._op_map.pop(down_op) + up_logical_op: AbstractUDFMap = self._op_map.pop(up_op) + + # Fuse transformation functions. + ray_remote_args = up_logical_op._ray_remote_args + down_transform_fn = down_op.get_transformation_fn() + up_map_transformer = up_op.get_map_transformer() + + def fused_all_to_all_transform_fn( + blocks: List[RefBundle], ctx: TaskContext + ) -> Tuple[List[RefBundle], StatsDict]: + """To fuse MapOperator->AllToAllOperator, we store the map function + in the TaskContext so that it may be used by the downstream + AllToAllOperator's transform function.""" + ctx.upstream_map_transformer = up_map_transformer + ctx.upstream_map_ray_remote_args = ray_remote_args + return down_transform_fn(blocks, ctx) + + # Make the upstream operator's inputs the new, fused operator's inputs. + input_deps = up_op.input_dependencies + assert len(input_deps) == 1 + input_op = input_deps[0] + + target_max_block_size = self._get_merged_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ) + + op = AllToAllOperator( + fused_all_to_all_transform_fn, + input_op, + target_max_block_size=target_max_block_size, + num_outputs=down_op._num_outputs, + # Transfer over the existing sub-progress bars from + # the AllToAllOperator (if any) into the fused operator. + sub_progress_bar_names=down_op._sub_progress_bar_names, + name=name, + ) + # Bottom out at the source logical op (e.g. Read()). + input_op = up_logical_op + + if isinstance(down_logical_op, RandomShuffle): + logical_op = RandomShuffle( + input_op, + name=name, + ray_remote_args=ray_remote_args, + ) + elif isinstance(down_logical_op, Repartition): + logical_op = Repartition( + input_op, + num_outputs=down_logical_op._num_outputs, + shuffle=down_logical_op._shuffle, + ) + self._op_map[op] = logical_op + # Return the fused physical operator. + return op + + +def _are_remote_args_compatible(prev_args, next_args): + """Check if Ray remote arguments are compatible for merging.""" + prev_args = _canonicalize(prev_args) + next_args = _canonicalize(next_args) + remote_args = next_args.copy() + for key in INHERITABLE_REMOTE_ARGS: + # NOTE: We only carry over inheritable value in case + # of it not being provided in the remote args + if key in prev_args and key not in remote_args: + remote_args[key] = prev_args[key] + + if prev_args != remote_args: + return False + return True + + +def _canonicalize(remote_args: dict) -> dict: + """Returns canonical form of given remote args.""" + remote_args = remote_args.copy() + if "num_cpus" not in remote_args or remote_args["num_cpus"] is None: + remote_args["num_cpus"] = 1 + if "num_gpus" not in remote_args or remote_args["num_gpus"] is None: + remote_args["num_gpus"] = 0 + resources = remote_args.get("resources", {}) + for k, v in list(resources.items()): + if v is None or v == 0.0: + del resources[k] + remote_args["resources"] = resources + return remote_args diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..8810217258ab5b55e3d83eecd41c8e6c2fe4f99f --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py @@ -0,0 +1,77 @@ +import copy +from collections import deque + +from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule +from ray.data._internal.logical.operators.all_to_all_operator import ( + AbstractAllToAll, + RandomizeBlocks, +) + + +class ReorderRandomizeBlocksRule(Rule): + """Rule for reordering RandomizeBlocks logical operator. + + Reordering RandomizeBlocks operators is to help fuse multiple + AbstractUDFMap operators together for better performance. + + 1. Dedupes multiple RandomizeBlocks operators if they are not seeded. + 2. Moves RandomizeBlocks operator to the end of a sequence of AbstractUDFMap + operators. RandomizeBlocks operators are not moved across AbstractAllToAll operator + boundaries. + """ + + def apply(self, plan: LogicalPlan) -> LogicalPlan: + optimized_dag: LogicalOperator = self._apply(plan.dag) + new_plan = LogicalPlan(dag=optimized_dag, context=plan.context) + return new_plan + + def _apply(self, op: LogicalOperator) -> LogicalOperator: + operators = [] + + # Post-order traversal. + nodes = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + upstream_ops = current_op.input_dependencies + + # Iterate through all upstream ops, and remove all RandomizeBlocks + # operators. + for i in range(len(upstream_ops)): + if isinstance(upstream_ops[i], RandomizeBlocks): + # If no seeds are provided, then collapse into a single + # RandomizeBlocks operator. + current_seed = upstream_ops[i]._seed + if not operators or current_seed or operators[-1]._seed: + # We need to make a copy of the operator. + # Because the operator instance may be shared by multiple + # Datasets. We shouldn't modify it in place. + operators.append(copy.copy(upstream_ops[i])) + + # Remove RandomizeBlocks operator from the dag and wire in new input + # dependencies. + assert len(upstream_ops[i].input_dependencies) == 1 + upstream_ops[i] = upstream_ops[i].input_dependencies[0] + if isinstance(current_op, AbstractAllToAll) and not isinstance( + current_op, RandomizeBlocks + ): + # If this operator is a an AllToAll Operator, then insert + # RandomizeBlocks right before this operator rather than the end of the + # DAG. + # All-to-all operators can have only 1 input operator. + assert len(upstream_ops) == 1 + input_op = upstream_ops[0] + for random_op in operators: + random_op._input_dependencies = [input_op] + input_op = random_op + upstream_ops[0] = input_op + operators = [] + + # Add RandomizeBlocks operator as the last operator in the DAG if necessary. + for random_op in operators: + random_op._input_dependencies = [op] + op = random_op + + return op diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py new file mode 100644 index 0000000000000000000000000000000000000000..0f9bb1b56ada4375eec9061dc47d30cd1862bd98 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py @@ -0,0 +1,132 @@ +import logging +import math +from typing import Optional, Tuple, Union + +from ray import available_resources as ray_available_resources +from ray.data._internal.execution.interfaces import PhysicalOperator +from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.util import _autodetect_parallelism +from ray.data.context import WARN_PREFIX, DataContext +from ray.data.datasource.datasource import Datasource, Reader + +logger = logging.getLogger(__name__) + + +def compute_additional_split_factor( + datasource_or_legacy_reader: Union[Datasource, Reader], + parallelism: int, + mem_size: int, + target_max_block_size: int, + cur_additional_split_factor: Optional[int] = None, +) -> Tuple[int, str, int, Optional[int]]: + ctx = DataContext.get_current() + detected_parallelism, reason, _ = _autodetect_parallelism( + parallelism, target_max_block_size, ctx, datasource_or_legacy_reader, mem_size + ) + num_read_tasks = len( + datasource_or_legacy_reader.get_read_tasks(detected_parallelism) + ) + expected_block_size = None + if mem_size: + expected_block_size = mem_size / num_read_tasks + logger.debug( + f"Expected in-memory size {mem_size}," f" block size {expected_block_size}" + ) + size_based_splits = round(max(1, expected_block_size / target_max_block_size)) + else: + size_based_splits = 1 + if cur_additional_split_factor: + size_based_splits *= cur_additional_split_factor + logger.debug(f"Size based split factor {size_based_splits}") + estimated_num_blocks = num_read_tasks * size_based_splits + logger.debug(f"Blocks after size splits {estimated_num_blocks}") + + available_cpu_slots = ray_available_resources().get("CPU", 1) + if ( + parallelism != -1 + and num_read_tasks >= available_cpu_slots * 4 + and num_read_tasks >= 5000 + ): + logger.warning( + f"{WARN_PREFIX} The requested number of read blocks of {parallelism} " + "is more than 4x the number of available CPU slots in the cluster of " + f"{available_cpu_slots}. This can " + "lead to slowdowns during the data reading phase due to excessive " + "task creation. Reduce the value to match with the available " + "CPU slots in the cluster, or set override_num_blocks to -1 for Ray Data " + "to automatically determine the number of read tasks blocks." + "You can ignore this message if the cluster is expected to autoscale." + ) + + # Add more output splitting for each read task if needed. + # TODO(swang): For parallelism=-1 (user did not explicitly set + # parallelism), and if the following operator produces much larger blocks, + # we should scale down the target max block size here instead of using + # splitting, which can have higher memory usage. + if estimated_num_blocks < detected_parallelism and estimated_num_blocks > 0: + k = math.ceil(detected_parallelism / estimated_num_blocks) + estimated_num_blocks = estimated_num_blocks * k + return detected_parallelism, reason, estimated_num_blocks, k + + return detected_parallelism, reason, estimated_num_blocks, None + + +class SetReadParallelismRule(Rule): + """ + This rule sets the read op's task parallelism based on the target block + size, the requested parallelism, the number of read files, and the + available resources in the cluster. + + If the parallelism is lower than requested, this rule also sets a split + factor to split the output blocks of the read task, so that the following + operator will have the desired parallelism. + """ + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + ops = [plan.dag] + + while len(ops) > 0: + op = ops.pop(0) + if isinstance(op, InputDataBuffer): + continue + logical_op = plan.op_map[op] + if isinstance(logical_op, Read): + self._apply(op, logical_op) + ops += op.input_dependencies + + return plan + + def _apply(self, op: PhysicalOperator, logical_op: Read): + ( + detected_parallelism, + reason, + estimated_num_blocks, + k, + ) = compute_additional_split_factor( + logical_op._datasource_or_legacy_reader, + logical_op._parallelism, + logical_op._mem_size, + op.actual_target_max_block_size, + op._additional_split_factor, + ) + + if logical_op._parallelism == -1: + assert reason != "" + logger.debug( + f"Using autodetected parallelism={detected_parallelism} " + f"for operator {logical_op.name} to satisfy {reason}." + ) + logical_op.set_detected_parallelism(detected_parallelism) + + if k is not None: + logger.debug( + f"To satisfy the requested parallelism of {detected_parallelism}, " + f"each read task output is split into {k} smaller blocks." + ) + + if k is not None: + op.set_additional_split_factor(k) + + logger.debug(f"Estimated num output blocks {estimated_num_blocks}") diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..6495f64f10a49e781c75b1b5061ba78e66715ad2 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py @@ -0,0 +1,88 @@ +from abc import abstractmethod +from typing import List + +from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.execution.operators.map_transformer import ( + BuildOutputBlocksMapTransformFn, + MapTransformFn, + MapTransformFnDataType, +) +from ray.data._internal.logical.interfaces.optimizer import Rule +from ray.data._internal.logical.interfaces.physical_plan import PhysicalPlan + + +class ZeroCopyMapFusionRule(Rule): + """Base abstract class for all zero-copy map fusion rules. + + A zero-copy map fusion rule is a rule that optimizes the transform_fn chain of + a fused MapOperator. The optimization is usually done by removing unnecessary + data conversions. + + This base abstract class defines the common util functions. And subclasses + should implement the `_optimize` method for the concrete optimization + strategy. + """ + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._traverse(plan.dag) + return plan + + def _traverse(self, op): + """Traverse the DAG and apply the optimization to each MapOperator.""" + if isinstance(op, MapOperator): + map_transformer = op.get_map_transformer() + transform_fns = map_transformer.get_transform_fns() + new_transform_fns = self._optimize(transform_fns) + # Physical operators won't be shared, + # so it's safe to modify the transform_fns in place. + map_transformer.set_transform_fns(new_transform_fns) + + for input_op in op.input_dependencies: + self._traverse(input_op) + + @abstractmethod + def _optimize(self, transform_fns: List[MapTransformFn]) -> List[MapTransformFn]: + """Optimize the transform_fns chain of a MapOperator. + + Args: + transform_fns: The old transform_fns chain. + Returns: + The optimized transform_fns chain. + """ + ... + + +class EliminateBuildOutputBlocks(ZeroCopyMapFusionRule): + """This rule eliminates unnecessary BuildOutputBlocksMapTransformFn, + if the previous fn already outputs blocks. + + This happens for the "Read -> Map/Write" fusion. + """ + + def _optimize(self, transform_fns: List[MapTransformFn]) -> List[MapTransformFn]: + # For the following subsquence, + # 1. Any MapTransformFn with block output. + # 2. BuildOutputBlocksMapTransformFn + # 3. Any MapTransformFn with block input. + # We drop the BuildOutputBlocksMapTransformFn in the middle. + new_transform_fns = [] + + for i in range(len(transform_fns)): + cur_fn = transform_fns[i] + drop = False + if ( + i > 0 + and i < len(transform_fns) - 1 + and isinstance(cur_fn, BuildOutputBlocksMapTransformFn) + ): + prev_fn = transform_fns[i - 1] + next_fn = transform_fns[i + 1] + if ( + prev_fn.output_type == MapTransformFnDataType.Block + and next_fn.input_type == MapTransformFnDataType.Block + ): + drop = True + if not drop: + new_transform_fns.append(cur_fn) + + return new_transform_fns diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ef99a58ee113934da0162c86945c1275cb85455b --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__init__.py @@ -0,0 +1,50 @@ +from ray.data.preprocessors.chain import Chain +from ray.data.preprocessors.concatenator import Concatenator +from ray.data.preprocessors.discretizer import ( + CustomKBinsDiscretizer, + UniformKBinsDiscretizer, +) +from ray.data.preprocessors.encoder import ( + Categorizer, + LabelEncoder, + MultiHotEncoder, + OneHotEncoder, + OrdinalEncoder, +) +from ray.data.preprocessors.hasher import FeatureHasher +from ray.data.preprocessors.imputer import SimpleImputer +from ray.data.preprocessors.normalizer import Normalizer +from ray.data.preprocessors.scaler import ( + MaxAbsScaler, + MinMaxScaler, + RobustScaler, + StandardScaler, +) +from ray.data.preprocessors.tokenizer import Tokenizer +from ray.data.preprocessors.torch import TorchVisionPreprocessor +from ray.data.preprocessors.transformer import PowerTransformer +from ray.data.preprocessors.vectorizer import CountVectorizer, HashingVectorizer + +__all__ = [ + "Categorizer", + "CountVectorizer", + "Chain", + "FeatureHasher", + "HashingVectorizer", + "LabelEncoder", + "MaxAbsScaler", + "MinMaxScaler", + "MultiHotEncoder", + "Normalizer", + "OneHotEncoder", + "OrdinalEncoder", + "PowerTransformer", + "RobustScaler", + "SimpleImputer", + "StandardScaler", + "Concatenator", + "Tokenizer", + "TorchVisionPreprocessor", + "CustomKBinsDiscretizer", + "UniformKBinsDiscretizer", +] diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da0f3ae31e584a1c043866a61fc9fef6909c8ba1 Binary files /dev/null and b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/__pycache__/encoder.cpython-310.pyc differ diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py new file mode 100644 index 0000000000000000000000000000000000000000..e608f8cf2f86aaf86b418e01dd182fa1bb56365d --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/chain.py @@ -0,0 +1,101 @@ +from typing import TYPE_CHECKING + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor + +if TYPE_CHECKING: + from ray.air.data_batch_type import DataBatchType + + +class Chain(Preprocessor): + """Combine multiple preprocessors into a single :py:class:`Preprocessor`. + + When you call ``fit``, each preprocessor is fit on the dataset produced by the + preceeding preprocessor's ``fit_transform``. + + Example: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import * + >>> + >>> df = pd.DataFrame({ + ... "X0": [0, 1, 2], + ... "X1": [3, 4, 5], + ... "Y": ["orange", "blue", "orange"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> preprocessor = Chain( + ... StandardScaler(columns=["X0", "X1"]), + ... Concatenator(columns=["X0", "X1"], output_column_name="X"), + ... LabelEncoder(label_column="Y") + ... ) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + Y X + 0 1 [-1.224744871391589, -1.224744871391589] + 1 0 [0.0, 0.0] + 2 1 [1.224744871391589, 1.224744871391589] + + Args: + preprocessors: The preprocessors to sequentially compose. + """ + + def fit_status(self): + fittable_count = 0 + fitted_count = 0 + for p in self.preprocessors: + if p.fit_status() == Preprocessor.FitStatus.FITTED: + fittable_count += 1 + fitted_count += 1 + elif p.fit_status() in ( + Preprocessor.FitStatus.NOT_FITTED, + Preprocessor.FitStatus.PARTIALLY_FITTED, + ): + fittable_count += 1 + else: + assert p.fit_status() == Preprocessor.FitStatus.NOT_FITTABLE + if fittable_count > 0: + if fitted_count == fittable_count: + return Preprocessor.FitStatus.FITTED + elif fitted_count > 0: + return Preprocessor.FitStatus.PARTIALLY_FITTED + else: + return Preprocessor.FitStatus.NOT_FITTED + else: + return Preprocessor.FitStatus.NOT_FITTABLE + + def __init__(self, *preprocessors: Preprocessor): + self.preprocessors = preprocessors + + def _fit(self, ds: Dataset) -> Preprocessor: + for preprocessor in self.preprocessors[:-1]: + ds = preprocessor.fit_transform(ds) + self.preprocessors[-1].fit(ds) + return self + + def fit_transform(self, ds: Dataset) -> Dataset: + for preprocessor in self.preprocessors: + ds = preprocessor.fit_transform(ds) + return ds + + def _transform(self, ds: Dataset) -> Dataset: + for preprocessor in self.preprocessors: + ds = preprocessor.transform(ds) + return ds + + def _transform_batch(self, df: "DataBatchType") -> "DataBatchType": + for preprocessor in self.preprocessors: + df = preprocessor.transform_batch(df) + return df + + def __repr__(self): + arguments = ", ".join(repr(preprocessor) for preprocessor in self.preprocessors) + return f"{self.__class__.__name__}({arguments})" + + def _determine_transform_to_use(self) -> BatchFormat: + # This is relevant for BatchPrediction. + # For Chain preprocessor, we picked the first one as entry point. + # TODO (jiaodong): We should revisit if our Chain preprocessor is + # still optimal with context of lazy execution. + return self.preprocessors[0]._determine_transform_to_use() diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py new file mode 100644 index 0000000000000000000000000000000000000000..941834adbd16614366a870fa1d9a5d379a73926c --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/concatenator.py @@ -0,0 +1,125 @@ +import logging +from typing import List, Optional + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + +logger = logging.getLogger(__name__) + + +@PublicAPI(stability="alpha") +class Concatenator(Preprocessor): + """Combine numeric columns into a column of type + :class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`. Only columns + specified in ``columns`` will be concatenated. + + This preprocessor concatenates numeric columns and stores the result in a new + column. The new column contains + :class:`~ray.air.util.tensor_extensions.pandas.TensorArrayElement` objects of + shape :math:`(m,)`, where :math:`m` is the number of columns concatenated. + The :math:`m` concatenated columns are dropped after concatenation. + The preprocessor preserves the order of the columns provided in the ``colummns`` + argument and will use that order when calling ``transform()`` and ``transform_batch()``. + + Examples: + >>> import numpy as np + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Concatenator + + :py:class:`Concatenator` combines numeric columns into a column of + :py:class:`~ray.air.util.tensor_extensions.pandas.TensorDtype`. + + >>> df = pd.DataFrame({"X0": [0, 3, 1], "X1": [0.5, 0.2, 0.9]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> concatenator = Concatenator(columns=["X0", "X1"]) + >>> concatenator.transform(ds).to_pandas() # doctest: +SKIP + concat_out + 0 [0.0, 0.5] + 1 [3.0, 0.2] + 2 [1.0, 0.9] + + By default, the created column is called `"concat_out"`, but you can specify + a different name. + + >>> concatenator = Concatenator(columns=["X0", "X1"], output_column_name="tensor") + >>> concatenator.transform(ds).to_pandas() # doctest: +SKIP + tensor + 0 [0.0, 0.5] + 1 [3.0, 0.2] + 2 [1.0, 0.9] + + >>> concatenator = Concatenator(columns=["X0", "X1"], dtype=np.float32) + >>> concatenator.transform(ds) # doctest: +SKIP + Dataset(num_rows=3, schema={Y: object, concat_out: TensorDtype(shape=(2,), dtype=float32)}) + + Args: + output_column_name: The desired name for the new column. + Defaults to ``"concat_out"``. + columns: A list of columns to concatenate. The provided order of the columns + will be retained during concatenation. + dtype: The ``dtype`` to convert the output tensors to. If unspecified, + the ``dtype`` is determined by standard coercion rules. + raise_if_missing: If ``True``, an error is raised if any + of the columns in ``columns`` don't exist. + Defaults to ``False``. + + Raises: + ValueError: if `raise_if_missing` is `True` and a column in `columns` or + doesn't exist in the dataset. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + output_column_name: str = "concat_out", + dtype: Optional[np.dtype] = None, + raise_if_missing: bool = False, + ): + self.columns = columns + + self.output_column_name = output_column_name + self.dtype = dtype + self.raise_if_missing = raise_if_missing + + def _validate(self, df: pd.DataFrame) -> None: + missing_columns = set(self.columns) - set(df) + if missing_columns: + message = ( + f"Missing columns specified in '{self.columns}': {missing_columns}" + ) + if self.raise_if_missing: + raise ValueError(message) + else: + logger.warning(message) + + def _transform_pandas(self, df: pd.DataFrame): + self._validate(df) + + concatenated = df[self.columns].to_numpy(dtype=self.dtype) + df = df.drop(columns=self.columns) + # Use a Pandas Series for column assignment to get more consistent + # behavior across Pandas versions. + df.loc[:, self.output_column_name] = pd.Series(list(concatenated)) + return df + + def __repr__(self): + default_values = { + "output_column_name": "concat_out", + "columns": None, + "dtype": None, + "raise_if_missing": False, + } + + non_default_arguments = [] + for parameter, default_value in default_values.items(): + value = getattr(self, parameter) + if value != default_value: + non_default_arguments.append(f"{parameter}={value}") + + return f"{self.__class__.__name__}({', '.join(non_default_arguments)})" diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6ccd33fc8af407e8d3726979f87d4d89230f27a9 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/discretizer.py @@ -0,0 +1,363 @@ +from typing import Dict, Iterable, List, Optional, Type, Union + +import numpy as np +import pandas as pd + +from ray.data import Dataset +from ray.data._internal.aggregate import Max, Min +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +class _AbstractKBinsDiscretizer(Preprocessor): + """Abstract base class for all KBinsDiscretizers. + + Essentially a thin wraper around ``pd.cut``. + + Expects either ``self.stats_`` or ``self.bins`` to be set and + contain {column:list_of_bin_intervals}. + """ + + def _transform_pandas(self, df: pd.DataFrame): + def bin_values(s: pd.Series) -> pd.Series: + if s.name not in self.columns: + return s + labels = self.dtypes.get(s.name) if self.dtypes else False + ordered = True + if labels: + if isinstance(labels, pd.CategoricalDtype): + ordered = labels.ordered + labels = list(labels.categories) + else: + labels = False + + bins = self.stats_ if self._is_fittable else self.bins + return pd.cut( + s, + bins[s.name] if isinstance(bins, dict) else bins, + right=self.right, + labels=labels, + ordered=ordered, + retbins=False, + include_lowest=self.include_lowest, + duplicates=self.duplicates, + ) + + return df.apply(bin_values, axis=0) + + def _validate_bins_columns(self): + if isinstance(self.bins, dict) and not all( + col in self.bins for col in self.columns + ): + raise ValueError( + "If `bins` is a dictionary, all elements of `columns` must be present " + "in it." + ) + + def __repr__(self): + attr_str = ", ".join( + [ + f"{attr_name}={attr_value!r}" + for attr_name, attr_value in vars(self).items() + if not attr_name.startswith("_") + ] + ) + return f"{self.__class__.__name__}({attr_str})" + + +@PublicAPI(stability="alpha") +class CustomKBinsDiscretizer(_AbstractKBinsDiscretizer): + """Bin values into discrete intervals using custom bin edges. + + Columns must contain numerical values. + + Examples: + Use :class:`CustomKBinsDiscretizer` to bin continuous features. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import CustomKBinsDiscretizer + >>> df = pd.DataFrame({ + ... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1], + ... "value_2": [10, 15, 13, 12, 23, 25], + ... }) + >>> ds = ray.data.from_pandas(df) + >>> discretizer = CustomKBinsDiscretizer( + ... columns=["value_1", "value_2"], + ... bins=[0, 1, 4, 10, 25] + ... ) + >>> discretizer.transform(ds).to_pandas() + value_1 value_2 + 0 0 2 + 1 1 3 + 2 1 3 + 3 2 3 + 4 2 3 + 5 1 3 + + You can also specify different bin edges per column. + + >>> discretizer = CustomKBinsDiscretizer( + ... columns=["value_1", "value_2"], + ... bins={"value_1": [0, 1, 4], "value_2": [0, 18, 35, 70]}, + ... ) + >>> discretizer.transform(ds).to_pandas() + value_1 value_2 + 0 0.0 0 + 1 1.0 0 + 2 1.0 0 + 3 NaN 0 + 4 NaN 1 + 5 1.0 1 + + + Args: + columns: The columns to discretize. + bins: Defines custom bin edges. Can be an iterable of numbers, + a ``pd.IntervalIndex``, or a dict mapping columns to either of them. + Note that ``pd.IntervalIndex`` for bins must be non-overlapping. + right: Indicates whether bins include the rightmost edge. + include_lowest: Indicates whether the first interval should be left-inclusive. + duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique, + raise ``ValueError`` or drop non-uniques. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects or ``np.integer`` types. If you don't include a column in ``dtypes`` + or specify it as an integer dtype, the outputted column will consist of + ordered integers corresponding to bins. If you use a + ``pd.CategoricalDtype``, the outputted column will be a + ``pd.CategoricalDtype`` with the categories being mapped to bins. + You can use ``pd.CategoricalDtype(categories, ordered=True)`` to + preserve information about bin order. + + .. seealso:: + + :class:`UniformKBinsDiscretizer` + If you want to bin data into uniform width bins. + """ + + def __init__( + self, + columns: List[str], + bins: Union[ + Iterable[float], + pd.IntervalIndex, + Dict[str, Union[Iterable[float], pd.IntervalIndex]], + ], + *, + right: bool = True, + include_lowest: bool = False, + duplicates: str = "raise", + dtypes: Optional[ + Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]] + ] = None, + ): + self.columns = columns + self.bins = bins + self.right = right + self.include_lowest = include_lowest + self.duplicates = duplicates + self.dtypes = dtypes + + self._validate_bins_columns() + + _is_fittable = False + + +@PublicAPI(stability="alpha") +class UniformKBinsDiscretizer(_AbstractKBinsDiscretizer): + """Bin values into discrete intervals (bins) of uniform width. + + Columns must contain numerical values. + + Examples: + Use :class:`UniformKBinsDiscretizer` to bin continuous features. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import UniformKBinsDiscretizer + >>> df = pd.DataFrame({ + ... "value_1": [0.2, 1.4, 2.5, 6.2, 9.7, 2.1], + ... "value_2": [10, 15, 13, 12, 23, 25], + ... }) + >>> ds = ray.data.from_pandas(df) + >>> discretizer = UniformKBinsDiscretizer( + ... columns=["value_1", "value_2"], bins=4 + ... ) + >>> discretizer.fit_transform(ds).to_pandas() + value_1 value_2 + 0 0 0 + 1 0 1 + 2 0 0 + 3 2 0 + 4 3 3 + 5 0 3 + + You can also specify different number of bins per column. + + >>> discretizer = UniformKBinsDiscretizer( + ... columns=["value_1", "value_2"], bins={"value_1": 4, "value_2": 3} + ... ) + >>> discretizer.fit_transform(ds).to_pandas() + value_1 value_2 + 0 0 0 + 1 0 0 + 2 0 0 + 3 2 0 + 4 3 2 + 5 0 2 + + + Args: + columns: The columns to discretize. + bins: Defines the number of equal-width bins. + Can be either an integer (which will be applied to all columns), + or a dict that maps columns to integers. + The range is extended by .1% on each side to include + the minimum and maximum values. + right: Indicates whether bins includes the rightmost edge or not. + include_lowest: Whether the first interval should be left-inclusive + or not. + duplicates: Can be either 'raise' or 'drop'. If bin edges are not unique, + raise ``ValueError`` or drop non-uniques. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects or ``np.integer`` types. If you don't include a column in ``dtypes`` + or specify it as an integer dtype, the outputted column will consist of + ordered integers corresponding to bins. If you use a + ``pd.CategoricalDtype``, the outputted column will be a + ``pd.CategoricalDtype`` with the categories being mapped to bins. + You can use ``pd.CategoricalDtype(categories, ordered=True)`` to + preserve information about bin order. + + .. seealso:: + + :class:`CustomKBinsDiscretizer` + If you want to specify your own bin edges. + """ + + def __init__( + self, + columns: List[str], + bins: Union[int, Dict[str, int]], + *, + right: bool = True, + include_lowest: bool = False, + duplicates: str = "raise", + dtypes: Optional[ + Dict[str, Union[pd.CategoricalDtype, Type[np.integer]]] + ] = None, + ): + self.columns = columns + self.bins = bins + self.right = right + self.include_lowest = include_lowest + self.duplicates = duplicates + self.dtypes = dtypes + + def _fit(self, dataset: Dataset) -> Preprocessor: + self._validate_on_fit() + stats = {} + aggregates = [] + if isinstance(self.bins, dict): + columns = self.bins.keys() + else: + columns = self.columns + + for column in columns: + aggregates.extend( + self._fit_uniform_covert_bin_to_aggregate_if_needed(column) + ) + + aggregate_stats = dataset.aggregate(*aggregates) + mins = {} + maxes = {} + for key, value in aggregate_stats.items(): + column_name = key[4:-1] # min(column) -> column + if key.startswith("min"): + mins[column_name] = value + if key.startswith("max"): + maxes[column_name] = value + + for column in mins.keys(): + bins = self.bins[column] if isinstance(self.bins, dict) else self.bins + stats[column] = _translate_min_max_number_of_bins_to_bin_edges( + mins[column], maxes[column], bins, self.right + ) + + self.stats_ = stats + return self + + def _validate_on_fit(self): + self._validate_bins_columns() + + def _fit_uniform_covert_bin_to_aggregate_if_needed(self, column: str): + bins = self.bins[column] if isinstance(self.bins, dict) else self.bins + if isinstance(bins, int): + return (Min(column), Max(column)) + else: + raise TypeError( + f"`bins` must be an integer or a dict of integers, got {bins}" + ) + + +# Copied from +# https://github.com/pandas-dev/pandas/blob/v1.4.4/pandas/core/reshape/tile.py#L257 +# under +# BSD 3-Clause License +# +# Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. +# and PyData Development Team +# All rights reserved. +# +# Copyright (c) 2011-2022, Open source contributors. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +def _translate_min_max_number_of_bins_to_bin_edges( + mn: float, mx: float, bins: int, right: bool +) -> List[float]: + """Translates a range and desired number of bins into list of bin edges.""" + rng = (mn, mx) + mn, mx = (mi + 0.0 for mi in rng) + + if np.isinf(mn) or np.isinf(mx): + raise ValueError( + "Cannot specify integer `bins` when input data contains infinity." + ) + elif mn == mx: # adjust end points before binning + mn -= 0.001 * abs(mn) if mn != 0 else 0.001 + mx += 0.001 * abs(mx) if mx != 0 else 0.001 + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + else: # adjust end points after binning + bins = np.linspace(mn, mx, bins + 1, endpoint=True) + adj = (mx - mn) * 0.001 # 0.1% of the range + if right: + bins[0] -= adj + else: + bins[-1] += adj + return bins + + +# TODO(ml-team) +# Add QuantileKBinsDiscretizer diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..8bd6af80f6b19ab7ac0fa23bb436a2410ec6d946 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/encoder.py @@ -0,0 +1,661 @@ +from collections import Counter, OrderedDict +from functools import partial +from typing import Dict, List, Optional + +import numpy as np +import pandas as pd +import pandas.api.types + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor, PreprocessorNotFittedException +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class OrdinalEncoder(Preprocessor): + """Encode values within columns as ordered integer values. + + :class:`OrdinalEncoder` encodes categorical features as integers that range from + :math:`0` to :math:`n - 1`, where :math:`n` is the number of categories. + + If you transform a value that isn't in the fitted datset, then the value is encoded + as ``float("nan")``. + + Columns must contain either hashable values or lists of hashable values. Also, you + can't have both scalars and lists in the same column. + + Examples: + Use :class:`OrdinalEncoder` to encode categorical features as integers. + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import OrdinalEncoder + >>> df = pd.DataFrame({ + ... "sex": ["male", "female", "male", "female"], + ... "level": ["L4", "L5", "L3", "L4"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OrdinalEncoder(columns=["sex", "level"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + sex level + 0 1 1 + 1 0 2 + 2 1 0 + 3 0 1 + + If you transform a value not present in the original dataset, then the value + is encoded as ``float("nan")``. + + >>> df = pd.DataFrame({"sex": ["female"], "level": ["L6"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(ds).to_pandas() # doctest: +SKIP + sex level + 0 0 NaN + + :class:`OrdinalEncoder` can also encode categories in a list. + + >>> df = pd.DataFrame({ + ... "name": ["Shaolin Soccer", "Moana", "The Smartest Guys in the Room"], + ... "genre": [ + ... ["comedy", "action", "sports"], + ... ["animation", "comedy", "action"], + ... ["documentary"], + ... ], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OrdinalEncoder(columns=["genre"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [2, 0, 4] + 1 Moana [1, 2, 0] + 2 The Smartest Guys in the Room [3] + + Args: + columns: The columns to separately encode. + encode_lists: If ``True``, encode list elements. If ``False``, encode + whole lists (i.e., replace each list with an integer). ``True`` + by default. + + .. seealso:: + + :class:`OneHotEncoder` + Another preprocessor that encodes categorical data. + """ + + def __init__(self, columns: List[str], *, encode_lists: bool = True): + # TODO: allow user to specify order of values within each column. + self.columns = columns + self.encode_lists = encode_lists + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, self.columns, encode_lists=self.encode_lists + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + def encode_list(element: list, *, name: str): + return [self.stats_[f"unique_values({name})"].get(x) for x in element] + + def column_ordinal_encoder(s: pd.Series): + if _is_series_composed_of_lists(s): + if self.encode_lists: + return s.map(partial(encode_list, name=s.name)) + + # cannot simply use map here due to pandas thinking + # tuples are to be used for indices + def list_as_category(element): + element = tuple(element) + return self.stats_[f"unique_values({s.name})"].get(element) + + return s.apply(list_as_category) + + s_values = self.stats_[f"unique_values({s.name})"] + return s.map(s_values) + + df[self.columns] = df[self.columns].apply(column_ordinal_encoder) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"encode_lists={self.encode_lists!r})" + ) + + +@PublicAPI(stability="alpha") +class OneHotEncoder(Preprocessor): + """`One-hot encode `_ + categorical data. + + This preprocessor transforms each specified column into a one-hot encoded vector. + Each element in the vector corresponds to a unique category in the column, with a + value of 1 if the category matches and 0 otherwise. + + If a category is infrequent (based on ``max_categories``) or not present in the + fitted dataset, it is encoded as all 0s. + + Columns must contain hashable objects or lists of hashable objects. + + .. note:: + Lists are treated as categories. If you want to encode individual list + elements, use :class:`MultiHotEncoder`. + + Example: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import OneHotEncoder + >>> + >>> df = pd.DataFrame({"color": ["red", "green", "red", "red", "blue", "green"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder = OneHotEncoder(columns=["color"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + color_blue color_green color_red + 0 0 0 1 + 1 0 1 0 + 2 0 0 1 + 3 0 0 1 + 4 1 0 0 + 5 0 1 0 + + If you one-hot encode a value that isn't in the fitted dataset, then the + value is encoded with zeros. + + >>> df = pd.DataFrame({"color": ["yellow"]}) + >>> batch = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(batch).to_pandas() # doctest: +SKIP + color_blue color_green color_red + 0 0 0 0 + + Likewise, if you one-hot encode an infrequent value, then the value is encoded + with zeros. + + >>> encoder = OneHotEncoder(columns=["color"], max_categories={"color": 2}) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + color_red color_green + 0 1 0 + 1 0 1 + 2 1 0 + 3 1 0 + 4 0 0 + 5 0 1 + + Args: + columns: The columns to separately encode. + max_categories: The maximum number of features to create for each column. + If a value isn't specified for a column, then a feature is created + for every category in that column. + + .. seealso:: + + :class:`MultiHotEncoder` + If you want to encode individual list elements, use + :class:`MultiHotEncoder`. + + :class:`OrdinalEncoder` + If your categories are ordered, you may want to use + :class:`OrdinalEncoder`. + """ # noqa: E501 + + def __init__( + self, columns: List[str], *, max_categories: Optional[Dict[str, int]] = None + ): + # TODO: add `drop` parameter. + self.columns = columns + self.max_categories = max_categories + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, + self.columns, + max_categories=self.max_categories, + encode_lists=False, + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + # Compute new one-hot encoded columns + for column in self.columns: + column_values = self.stats_[f"unique_values({column})"] + if _is_series_composed_of_lists(df[column]): + df[column] = df[column].map(lambda x: tuple(x)) + for column_value in column_values: + df[f"{column}_{column_value}"] = (df[column] == column_value).astype( + int + ) + # Concatenate the value columns + value_columns = [ + f"{column}_{column_value}" for column_value in column_values + ] + concatenated = df[value_columns].to_numpy() + df = df.drop(columns=value_columns) + # Use a Pandas Series for column assignment to get more consistent + # behavior across Pandas versions. + df.loc[:, column] = pd.Series(list(concatenated)) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"max_categories={self.max_categories!r})" + ) + + +@PublicAPI(stability="alpha") +class MultiHotEncoder(Preprocessor): + """Multi-hot encode categorical data. + + This preprocessor replaces each list of categories with an :math:`m`-length binary + list, where :math:`m` is the number of unique categories in the column or the value + specified in ``max_categories``. The :math:`i\\text{-th}` element of the binary list + is :math:`1` if category :math:`i` is in the input list and :math:`0` otherwise. + + Columns must contain hashable objects or lists of hashable objects. + Also, you can't have both types in the same column. + + .. note:: + The logic is similar to scikit-learn's `MultiLabelBinarizer \ + `_. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MultiHotEncoder + >>> + >>> df = pd.DataFrame({ + ... "name": ["Shaolin Soccer", "Moana", "The Smartest Guys in the Room"], + ... "genre": [ + ... ["comedy", "action", "sports"], + ... ["animation", "comedy", "action"], + ... ["documentary"], + ... ], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> encoder = MultiHotEncoder(columns=["genre"]) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [1, 0, 1, 0, 1] + 1 Moana [1, 1, 1, 0, 0] + 2 The Smartest Guys in the Room [0, 0, 0, 1, 0] + + If you specify ``max_categories``, then :class:`MultiHotEncoder` + creates features for only the most frequent categories. + + >>> encoder = MultiHotEncoder(columns=["genre"], max_categories={"genre": 3}) + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + name genre + 0 Shaolin Soccer [1, 1, 1] + 1 Moana [1, 1, 0] + 2 The Smartest Guys in the Room [0, 0, 0] + >>> encoder.stats_ # doctest: +SKIP + OrderedDict([('unique_values(genre)', {'comedy': 0, 'action': 1, 'sports': 2})]) + + Args: + columns: The columns to separately encode. + max_categories: The maximum number of features to create for each column. + If a value isn't specified for a column, then a feature is created + for every unique category in that column. + + .. seealso:: + + :class:`OneHotEncoder` + If you're encoding individual categories instead of lists of + categories, use :class:`OneHotEncoder`. + + :class:`OrdinalEncoder` + If your categories are ordered, you may want to use + :class:`OrdinalEncoder`. + """ + + def __init__( + self, columns: List[str], *, max_categories: Optional[Dict[str, int]] = None + ): + # TODO: add `drop` parameter. + self.columns = columns + self.max_categories = max_categories + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices( + dataset, + self.columns, + max_categories=self.max_categories, + encode_lists=True, + ) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, *self.columns) + + def encode_list(element: list, *, name: str): + if isinstance(element, np.ndarray): + element = element.tolist() + elif not isinstance(element, list): + element = [element] + stats = self.stats_[f"unique_values({name})"] + counter = Counter(element) + return [counter.get(x, 0) for x in stats] + + for column in self.columns: + df[column] = df[column].map(partial(encode_list, name=column)) + + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"max_categories={self.max_categories!r})" + ) + + +@PublicAPI(stability="alpha") +class LabelEncoder(Preprocessor): + """Encode labels as integer targets. + + :class:`LabelEncoder` encodes labels as integer targets that range from + :math:`0` to :math:`n - 1`, where :math:`n` is the number of unique labels. + + If you transform a label that isn't in the fitted datset, then the label is encoded + as ``float("nan")``. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df = pd.DataFrame({ + ... "sepal_width": [5.1, 7, 4.9, 6.2], + ... "sepal_height": [3.5, 3.2, 3, 3.4], + ... "species": ["setosa", "versicolor", "setosa", "virginica"] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> from ray.data.preprocessors import LabelEncoder + >>> encoder = LabelEncoder(label_column="species") + >>> encoder.fit_transform(ds).to_pandas() # doctest: +SKIP + sepal_width sepal_height species + 0 5.1 3.5 0 + 1 7.0 3.2 1 + 2 4.9 3.0 0 + 3 6.2 3.4 2 + + If you transform a label not present in the original dataset, then the new + label is encoded as ``float("nan")``. + + >>> df = pd.DataFrame({ + ... "sepal_width": [4.2], + ... "sepal_height": [2.7], + ... "species": ["bracteata"] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> encoder.transform(ds).to_pandas() # doctest: +SKIP + sepal_width sepal_height species + 0 4.2 2.7 NaN + + Args: + label_column: A column containing labels that you want to encode. + + .. seealso:: + + :class:`OrdinalEncoder` + If you're encoding ordered features, use :class:`OrdinalEncoder` instead of + :class:`LabelEncoder`. + """ + + def __init__(self, label_column: str): + self.label_column = label_column + + def _fit(self, dataset: Dataset) -> Preprocessor: + self.stats_ = _get_unique_value_indices(dataset, [self.label_column]) + return self + + def _transform_pandas(self, df: pd.DataFrame): + _validate_df(df, self.label_column) + + def column_label_encoder(s: pd.Series): + s_values = self.stats_[f"unique_values({s.name})"] + return s.map(s_values) + + df[self.label_column] = df[self.label_column].transform(column_label_encoder) + return df + + def inverse_transform(self, ds: "Dataset") -> "Dataset": + """Inverse transform the given dataset. + + Args: + ds: Input Dataset that has been fitted and/or transformed. + + Returns: + ray.data.Dataset: The inverse transformed Dataset. + + Raises: + PreprocessorNotFittedException: if ``fit`` is not called yet. + """ + + fit_status = self.fit_status() + + if fit_status in ( + Preprocessor.FitStatus.PARTIALLY_FITTED, + Preprocessor.FitStatus.NOT_FITTED, + ): + raise PreprocessorNotFittedException( + "`fit` must be called before `inverse_transform`, " + ) + + kwargs = self._get_transform_config() + + return ds.map_batches( + self._inverse_transform_pandas, batch_format=BatchFormat.PANDAS, **kwargs + ) + + def _inverse_transform_pandas(self, df: pd.DataFrame): + def column_label_decoder(s: pd.Series): + inverse_values = { + value: key + for key, value in self.stats_[ + f"unique_values({self.label_column})" + ].items() + } + return s.map(inverse_values) + + df[self.label_column] = df[self.label_column].transform(column_label_decoder) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(label_column={self.label_column!r})" + + +@PublicAPI(stability="alpha") +class Categorizer(Preprocessor): + """Convert columns to ``pd.CategoricalDtype``. + + Use this preprocessor with frameworks that have built-in support for + ``pd.CategoricalDtype`` like LightGBM. + + .. warning:: + + If you don't specify ``dtypes``, fit this preprocessor before splitting + your dataset into train and test splits. This ensures categories are + consistent across splits. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Categorizer + >>> + >>> df = pd.DataFrame( + ... { + ... "sex": ["male", "female", "male", "female"], + ... "level": ["L4", "L5", "L3", "L4"], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> categorizer = Categorizer(columns=["sex", "level"]) + >>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP + [CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5'], ordered=False)] + + If you know the categories in advance, you can specify the categories with the + ``dtypes`` parameter. + + >>> categorizer = Categorizer( + ... columns=["sex", "level"], + ... dtypes={"level": pd.CategoricalDtype(["L3", "L4", "L5", "L6"], ordered=True)}, + ... ) + >>> categorizer.fit_transform(ds).schema().types # doctest: +SKIP + [CategoricalDtype(categories=['female', 'male'], ordered=False), CategoricalDtype(categories=['L3', 'L4', 'L5', 'L6'], ordered=True)] + + Args: + columns: The columns to convert to ``pd.CategoricalDtype``. + dtypes: An optional dictionary that maps columns to ``pd.CategoricalDtype`` + objects. If you don't include a column in ``dtypes``, the categories + are inferred. + """ # noqa: E501 + + def __init__( + self, + columns: List[str], + dtypes: Optional[Dict[str, pd.CategoricalDtype]] = None, + ): + if not dtypes: + dtypes = {} + + self.columns = columns + self.dtypes = dtypes + + def _fit(self, dataset: Dataset) -> Preprocessor: + columns_to_get = [ + column for column in self.columns if column not in set(self.dtypes) + ] + if columns_to_get: + unique_indices = _get_unique_value_indices( + dataset, columns_to_get, drop_na_values=True, key_format="{0}" + ) + unique_indices = { + column: pd.CategoricalDtype(values_indices.keys()) + for column, values_indices in unique_indices.items() + } + else: + unique_indices = {} + unique_indices = {**self.dtypes, **unique_indices} + self.stats_: Dict[str, pd.CategoricalDtype] = unique_indices + return self + + def _transform_pandas(self, df: pd.DataFrame): + df = df.astype(self.stats_) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"dtypes={self.dtypes!r})" + ) + + +def _get_unique_value_indices( + dataset: Dataset, + columns: List[str], + drop_na_values: bool = False, + key_format: str = "unique_values({0})", + max_categories: Optional[Dict[str, int]] = None, + encode_lists: bool = True, +) -> Dict[str, Dict[str, int]]: + """If drop_na_values is True, will silently drop NA values.""" + + if max_categories is None: + max_categories = {} + columns_set = set(columns) + for column in max_categories: + if column not in columns_set: + raise ValueError( + f"You set `max_categories` for {column}, which is not present in " + f"{columns}." + ) + + def get_pd_value_counts_per_column(col: pd.Series): + # special handling for lists + if _is_series_composed_of_lists(col): + if encode_lists: + counter = Counter() + + def update_counter(element): + counter.update(element) + return element + + col.map(update_counter) + return counter + else: + # convert to tuples to make lists hashable + col = col.map(lambda x: tuple(x)) + return Counter(col.value_counts(dropna=False).to_dict()) + + def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: + df_columns = df.columns.tolist() + result = {} + for col in columns: + if col in df_columns: + result[col] = [get_pd_value_counts_per_column(df[col])] + else: + raise ValueError( + f"Column '{col}' does not exist in DataFrame, which has columns: {df_columns}" # noqa: E501 + ) + return result + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + final_counters = {col: Counter() for col in columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter + + # Inspect if there is any NA values. + for col in columns: + if drop_na_values: + counter = final_counters[col] + counter_dict = dict(counter) + sanitized_dict = {k: v for k, v in counter_dict.items() if not pd.isnull(k)} + final_counters[col] = Counter(sanitized_dict) + else: + if any(pd.isnull(k) for k in final_counters[col]): + raise ValueError( + f"Unable to fit column '{col}' because it contains null" + f" values. Consider imputing missing values first." + ) + + unique_values_with_indices = OrderedDict() + for column in columns: + if column in max_categories: + # Output sorted by freq. + unique_values_with_indices[key_format.format(column)] = { + k[0]: j + for j, k in enumerate( + final_counters[column].most_common(max_categories[column]) + ) + } + else: + # Output sorted by column name. + unique_values_with_indices[key_format.format(column)] = { + k: j for j, k in enumerate(sorted(dict(final_counters[column]).keys())) + } + return unique_values_with_indices + + +def _validate_df(df: pd.DataFrame, *columns: str) -> None: + null_columns = [column for column in columns if df[column].isnull().values.any()] + if null_columns: + raise ValueError( + f"Unable to transform columns {null_columns} because they contain " + f"null values. Consider imputing missing values first." + ) + + +def _is_series_composed_of_lists(series: pd.Series) -> bool: + # we assume that all elements are a list here + first_not_none_element = next( + (element for element in series if element is not None), None + ) + return pandas.api.types.is_object_dtype(series.dtype) and isinstance( + first_not_none_element, (list, np.ndarray) + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py new file mode 100644 index 0000000000000000000000000000000000000000..364874b21d3f1e3b5053fb79b31f51e317db7441 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/hasher.py @@ -0,0 +1,106 @@ +import collections +from typing import List + +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_hash +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class FeatureHasher(Preprocessor): + """Apply the `hashing trick `_ to a + table that describes token frequencies. + + :class:`FeatureHasher` creates ``num_features`` columns named ``hash_{index}``, + where ``index`` ranges from :math:`0` to ``num_features``:math:`- 1`. The column + ``hash_{index}`` describes the frequency of tokens that hash to ``index``. + + Distinct tokens can correspond to the same index. However, if ``num_features`` is + large enough, then columns probably correspond to a unique token. + + This preprocessor is memory efficient and quick to pickle. However, given a + transformed column, you can't know which tokens correspond to it. This might make it + hard to determine which tokens are important to your model. + + .. warning:: + Sparse matrices aren't supported. If you use a large ``num_features``, this + preprocessor might behave poorly. + + Examples: + + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import FeatureHasher + + The data below describes the frequencies of tokens in ``"I like Python"`` and + ``"I dislike Python"``. + + >>> df = pd.DataFrame({ + ... "I": [1, 1], + ... "like": [1, 0], + ... "dislike": [0, 1], + ... "Python": [1, 1] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + + :class:`FeatureHasher` hashes each token to determine its index. For example, + the index of ``"I"`` is :math:`hash(\\texttt{"I"}) \pmod 8 = 5`. + + >>> hasher = FeatureHasher(columns=["I", "like", "dislike", "Python"], num_features=8) + >>> hasher.fit_transform(ds).to_pandas().to_numpy() # doctest: +SKIP + array([[0, 0, 0, 2, 0, 1, 0, 0], + [0, 0, 0, 1, 0, 1, 1, 0]]) + + Notice the hash collision: both ``"like"`` and ``"Python"`` correspond to index + :math:`3`. You can avoid hash collisions like these by increasing + ``num_features``. + + Args: + columns: The columns to apply the hashing trick to. Each column should describe + the frequency of a token. + num_features: The number of features used to represent the vocabulary. You + should choose a value large enough to prevent hash collisions between + distinct tokens. + + .. seealso:: + :class:`~ray.data.preprocessors.CountVectorizer` + Use this preprocessor to generate inputs for :class:`FeatureHasher`. + + :class:`ray.data.preprocessors.HashingVectorizer` + If your input data describes documents rather than token frequencies, + use :class:`~ray.data.preprocessors.HashingVectorizer`. + """ # noqa: E501 + + _is_fittable = False + + def __init__(self, columns: List[str], num_features: int): + self.columns = columns + # TODO(matt): Set default number of features. + # This likely requires sparse matrix support to avoid explosion of columns. + self.num_features = num_features + + def _transform_pandas(self, df: pd.DataFrame): + # TODO(matt): Use sparse matrix for efficiency. + def row_feature_hasher(row): + hash_counts = collections.defaultdict(int) + for column in self.columns: + hashed_value = simple_hash(column, self.num_features) + hash_counts[hashed_value] += row[column] + return {f"hash_{i}": hash_counts[i] for i in range(self.num_features)} + + feature_columns = df.loc[:, self.columns].apply( + row_feature_hasher, axis=1, result_type="expand" + ) + df = df.join(feature_columns) + + # Drop original unhashed columns. + df.drop(columns=self.columns, inplace=True) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"num_features={self.num_features!r})" + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py new file mode 100644 index 0000000000000000000000000000000000000000..dde460fb3b80c712e8c484e7e26d6c1619f202da --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/imputer.py @@ -0,0 +1,170 @@ +from collections import Counter +from numbers import Number +from typing import Dict, List, Optional, Union + +import pandas as pd +from pandas.api.types import is_categorical_dtype + +from ray.data import Dataset +from ray.data._internal.aggregate import Mean +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class SimpleImputer(Preprocessor): + """Replace missing values with imputed values. If the column is missing from a + batch, it will be filled with the imputed value. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import SimpleImputer + >>> df = pd.DataFrame({"X": [0, None, 3, 3], "Y": [None, "b", "c", "c"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X Y + 0 0.0 None + 1 NaN b + 2 3.0 c + 3 3.0 c + + The `"mean"` strategy imputes missing values with the mean of non-missing + values. This strategy doesn't work with categorical data. + + >>> preprocessor = SimpleImputer(columns=["X"], strategy="mean") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 None + 1 2.0 b + 2 3.0 c + 3 3.0 c + + The `"most_frequent"` strategy imputes missing values with the most frequent + value in each column. + + >>> preprocessor = SimpleImputer(columns=["X", "Y"], strategy="most_frequent") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 c + 1 3.0 b + 2 3.0 c + 3 3.0 c + + The `"constant"` strategy imputes missing values with the value specified by + `fill_value`. + + >>> preprocessor = SimpleImputer( + ... columns=["Y"], + ... strategy="constant", + ... fill_value="?", + ... ) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X Y + 0 0.0 ? + 1 NaN b + 2 3.0 c + 3 3.0 c + + Args: + columns: The columns to apply imputation to. + strategy: How imputed values are chosen. + + * ``"mean"``: The mean of non-missing values. This strategy only works with numeric columns. + * ``"most_frequent"``: The most common value. + * ``"constant"``: The value passed to ``fill_value``. + + fill_value: The value to use when ``strategy`` is ``"constant"``. + + Raises: + ValueError: if ``strategy`` is not ``"mean"``, ``"most_frequent"``, or + ``"constant"``. + """ # noqa: E501 + + _valid_strategies = ["mean", "most_frequent", "constant"] + + def __init__( + self, + columns: List[str], + strategy: str = "mean", + fill_value: Optional[Union[str, Number]] = None, + ): + self.columns = columns + self.strategy = strategy + self.fill_value = fill_value + + if strategy not in self._valid_strategies: + raise ValueError( + f"Strategy {strategy} is not supported." + f"Supported values are: {self._valid_strategies}" + ) + + if strategy == "constant": + # There is no information to be fitted. + self._is_fittable = False + if fill_value is None: + raise ValueError( + '`fill_value` must be set when using "constant" strategy.' + ) + + def _fit(self, dataset: Dataset) -> Preprocessor: + if self.strategy == "mean": + aggregates = [Mean(col) for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + elif self.strategy == "most_frequent": + self.stats_ = _get_most_frequent_values(dataset, *self.columns) + + return self + + def _transform_pandas(self, df: pd.DataFrame): + if self.strategy == "mean": + new_values = { + column: self.stats_[f"mean({column})"] for column in self.columns + } + elif self.strategy == "most_frequent": + new_values = { + column: self.stats_[f"most_frequent({column})"] + for column in self.columns + } + elif self.strategy == "constant": + new_values = {column: self.fill_value for column in self.columns} + for column, value in new_values.items(): + if is_categorical_dtype(df.dtypes[column]): + df[column] = df[column].cat.add_categories(value) + + for column_name in new_values: + if column_name not in df.columns: + # Create the column with the fill_value if it doesn't exist + df[column_name] = new_values[column_name] + else: + # Fill NaN (empty) values in the existing column with the fill_value + df[column_name].fillna(new_values[column_name], inplace=True) + + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"strategy={self.strategy!r}, fill_value={self.fill_value!r})" + ) + + +def _get_most_frequent_values( + dataset: Dataset, *columns: str +) -> Dict[str, Union[str, Number]]: + columns = list(columns) + + def get_pd_value_counts(df: pd.DataFrame) -> List[Dict[str, Counter]]: + return {col: [Counter(df[col].value_counts().to_dict())] for col in columns} + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + final_counters = {col: Counter() for col in columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + final_counters[col] += counter + + return { + f"most_frequent({column})": final_counters[column].most_common(1)[0][0] + for column in columns + } diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py new file mode 100644 index 0000000000000000000000000000000000000000..430bf6ec6c09aecc9c18b4f2f21e409af0586b9e --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/normalizer.py @@ -0,0 +1,106 @@ +from typing import List + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class Normalizer(Preprocessor): + r"""Scales each sample to have unit norm. + + This preprocessor works by dividing each sample (i.e., row) by the sample's norm. + The general formula is given by + + .. math:: + + s' = \frac{s}{\lVert s \rVert_p} + + where :math:`s` is the sample, :math:`s'` is the transformed sample, + :math:\lVert s \rVert`, and :math:`p` is the norm type. + + The following norms are supported: + + * `"l1"` (:math:`L^1`): Sum of the absolute values. + * `"l2"` (:math:`L^2`): Square root of the sum of the squared values. + * `"max"` (:math:`L^\infty`): Maximum value. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import Normalizer + >>> + >>> df = pd.DataFrame({"X1": [1, 1], "X2": [1, 0], "X3": [0, 1]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1 1 0 + 1 1 0 1 + + The :math:`L^2`-norm of the first sample is :math:`\sqrt{2}`, and the + :math:`L^2`-norm of the second sample is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.707107 0.707107 0 + 1 1.000000 0.000000 1 + + The :math:`L^1`-norm of the first sample is :math:`2`, and the + :math:`L^1`-norm of the second sample is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"], norm="l1") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.5 0.5 0 + 1 1.0 0.0 1 + + The :math:`L^\infty`-norm of the both samples is :math:`1`. + + >>> preprocessor = Normalizer(columns=["X1", "X2"], norm="max") + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1.0 1.0 0 + 1 1.0 0.0 1 + + Args: + columns: The columns to scale. For each row, these colmumns are scaled to + unit-norm. + norm: The norm to use. The supported values are ``"l1"``, ``"l2"``, or + ``"max"``. Defaults to ``"l2"``. + + Raises: + ValueError: if ``norm`` is not ``"l1"``, ``"l2"``, or ``"max"``. + """ + + _norm_fns = { + "l1": lambda cols: np.abs(cols).sum(axis=1), + "l2": lambda cols: np.sqrt(np.power(cols, 2).sum(axis=1)), + "max": lambda cols: np.max(abs(cols), axis=1), + } + + _is_fittable = False + + def __init__(self, columns: List[str], norm="l2"): + self.columns = columns + self.norm = norm + + if norm not in self._norm_fns: + raise ValueError( + f"Norm {norm} is not supported." + f"Supported values are: {self._norm_fns.keys()}" + ) + + def _transform_pandas(self, df: pd.DataFrame): + columns = df.loc[:, self.columns] + column_norms = self._norm_fns[self.norm](columns) + + df.loc[:, self.columns] = columns.div(column_norms, axis=0) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, norm={self.norm!r})" + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..4a30d315d6a2b811fb750dc3069150a95c32cdb1 --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/scaler.py @@ -0,0 +1,376 @@ +from typing import List, Tuple + +import numpy as np +import pandas as pd + +from ray.data import Dataset +from ray.data._internal.aggregate import AbsMax, Max, Mean, Min, Std +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class StandardScaler(Preprocessor): + r"""Translate and scale each column by its mean and standard deviation, + respectively. + + The general formula is given by + + .. math:: + + x' = \frac{x - \bar{x}}{s} + + where :math:`x` is the column, :math:`x'` is the transformed column, + :math:`\bar{x}` is the column average, and :math:`s` is the column's sample + standard deviation. If :math:`s = 0` (i.e., the column is constant-valued), + then the transformed column will contain zeros. + + .. warning:: + :class:`StandardScaler` works best when your data is normal. If your data isn't + approximately normal, then the transformed features won't be meaningful. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import StandardScaler + >>> + >>> df = pd.DataFrame({"X1": [-2, 0, 2], "X2": [-3, -3, 3], "X3": [1, 1, 1]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 1 + 1 0 -3 1 + 2 2 3 1 + + Columns are scaled separately. + + >>> preprocessor = StandardScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.224745 -0.707107 1 + 1 0.000000 -0.707107 1 + 2 1.224745 1.414214 1 + + Constant-valued columns get filled with zeros. + + >>> preprocessor = StandardScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 0.0 + 1 0 -3 0.0 + 2 2 3 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + mean_aggregates = [Mean(col) for col in self.columns] + std_aggregates = [Std(col, ddof=0) for col in self.columns] + self.stats_ = dataset.aggregate(*mean_aggregates, *std_aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_standard_scaler(s: pd.Series): + s_mean = self.stats_[f"mean({s.name})"] + s_std = self.stats_[f"std({s.name})"] + + # Handle division by zero. + # TODO: extend this to handle near-zero values. + if s_std == 0: + s_std = 1 + + return (s - s_mean) / s_std + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_standard_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class MinMaxScaler(Preprocessor): + r"""Scale each column by its range. + + The general formula is given by + + .. math:: + + x' = \frac{x - \min(x)}{\max{x} - \min{x}} + + where :math:`x` is the column and :math:`x'` is the transformed column. If + :math:`\max{x} - \min{x} = 0` (i.e., the column is constant-valued), then the + transformed column will get filled with zeros. + + Transformed values are always in the range :math:`[0, 1]`. + + .. tip:: + This can be used as an alternative to :py:class:`StandardScaler`. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MinMaxScaler + >>> + >>> df = pd.DataFrame({"X1": [-2, 0, 2], "X2": [-3, -3, 3], "X3": [1, 1, 1]}) # noqa: E501 + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 1 + 1 0 -3 1 + 2 2 3 1 + + Columns are scaled separately. + + >>> preprocessor = MinMaxScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 0.0 0.0 1 + 1 0.5 0.0 1 + 2 1.0 1.0 1 + + Constant-valued columns get filled with zeros. + + >>> preprocessor = MinMaxScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -2 -3 0.0 + 1 0 -3 0.0 + 2 2 3 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + aggregates = [Agg(col) for Agg in [Min, Max] for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_min_max_scaler(s: pd.Series): + s_min = self.stats_[f"min({s.name})"] + s_max = self.stats_[f"max({s.name})"] + diff = s_max - s_min + + # Handle division by zero. + # TODO: extend this to handle near-zero values. + if diff == 0: + diff = 1 + + return (s - s_min) / diff + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_min_max_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class MaxAbsScaler(Preprocessor): + r"""Scale each column by its absolute max value. + + The general formula is given by + + .. math:: + + x' = \frac{x}{\max{\vert x \vert}} + + where :math:`x` is the column and :math:`x'` is the transformed column. If + :math:`\max{\vert x \vert} = 0` (i.e., the column contains all zeros), then the + column is unmodified. + + .. tip:: + This is the recommended way to scale sparse data. If you data isn't sparse, + you can use :class:`MinMaxScaler` or :class:`StandardScaler` instead. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import MaxAbsScaler + >>> + >>> df = pd.DataFrame({"X1": [-6, 3], "X2": [2, -4], "X3": [0, 0]}) # noqa: E501 + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -6 2 0 + 1 3 -4 0 + + Columns are scaled separately. + + >>> preprocessor = MaxAbsScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.0 0.5 0 + 1 0.5 -1.0 0 + + Zero-valued columns aren't scaled. + + >>> preprocessor = MaxAbsScaler(columns=["X3"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -6 2 0.0 + 1 3 -4 0.0 + + Args: + columns: The columns to separately scale. + """ + + def __init__(self, columns: List[str]): + self.columns = columns + + def _fit(self, dataset: Dataset) -> Preprocessor: + aggregates = [AbsMax(col) for col in self.columns] + self.stats_ = dataset.aggregate(*aggregates) + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_abs_max_scaler(s: pd.Series): + s_abs_max = self.stats_[f"abs_max({s.name})"] + + # Handle division by zero. + # All values are 0. + if s_abs_max == 0: + s_abs_max = 1 + + return s / s_abs_max + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_abs_max_scaler + ) + return df + + def __repr__(self): + return f"{self.__class__.__name__}(columns={self.columns!r})" + + +@PublicAPI(stability="alpha") +class RobustScaler(Preprocessor): + r"""Scale and translate each column using quantiles. + + The general formula is given by + + .. math:: + x' = \frac{x - \mu_{1/2}}{\mu_h - \mu_l} + + where :math:`x` is the column, :math:`x'` is the transformed column, + :math:`\mu_{1/2}` is the column median. :math:`\mu_{h}` and :math:`\mu_{l}` are the + high and low quantiles, respectively. By default, :math:`\mu_{h}` is the third + quartile and :math:`\mu_{l}` is the first quartile. + + .. tip:: + This scaler works well when your data contains many outliers. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import RobustScaler + >>> + >>> df = pd.DataFrame({ + ... "X1": [1, 2, 3, 4, 5], + ... "X2": [13, 5, 14, 2, 8], + ... "X3": [1, 2, 2, 2, 3], + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> ds.to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 1 13 1 + 1 2 5 2 + 2 3 14 2 + 3 4 2 2 + 4 5 8 3 + + :class:`RobustScaler` separately scales each column. + + >>> preprocessor = RobustScaler(columns=["X1", "X2"]) + >>> preprocessor.fit_transform(ds).to_pandas() # doctest: +SKIP + X1 X2 X3 + 0 -1.0 0.625 1 + 1 -0.5 -0.375 2 + 2 0.0 0.750 2 + 3 0.5 -0.750 2 + 4 1.0 0.000 3 + + Args: + columns: The columns to separately scale. + quantile_range: A tuple that defines the lower and upper quantiles. Values + must be between 0 and 1. Defaults to the 1st and 3rd quartiles: + ``(0.25, 0.75)``. + """ + + def __init__( + self, columns: List[str], quantile_range: Tuple[float, float] = (0.25, 0.75) + ): + self.columns = columns + self.quantile_range = quantile_range + + def _fit(self, dataset: Dataset) -> Preprocessor: + low = self.quantile_range[0] + med = 0.50 + high = self.quantile_range[1] + + num_records = dataset.count() + max_index = num_records - 1 + split_indices = [int(percentile * max_index) for percentile in (low, med, high)] + + self.stats_ = {} + + # TODO(matt): Handle case where quantile lands between 2 numbers. + # The current implementation will simply choose the closest index. + # This will affect the results of small datasets more than large datasets. + for col in self.columns: + filtered_dataset = dataset.map_batches( + lambda df: df[[col]], batch_format="pandas" + ) + sorted_dataset = filtered_dataset.sort(col) + _, low, med, high = sorted_dataset.split_at_indices(split_indices) + + def _get_first_value(ds: Dataset, c: str): + return ds.take(1)[0][c] + + low_val = _get_first_value(low, col) + med_val = _get_first_value(med, col) + high_val = _get_first_value(high, col) + + self.stats_[f"low_quantile({col})"] = low_val + self.stats_[f"median({col})"] = med_val + self.stats_[f"high_quantile({col})"] = high_val + + return self + + def _transform_pandas(self, df: pd.DataFrame): + def column_robust_scaler(s: pd.Series): + s_low_q = self.stats_[f"low_quantile({s.name})"] + s_median = self.stats_[f"median({s.name})"] + s_high_q = self.stats_[f"high_quantile({s.name})"] + diff = s_high_q - s_low_q + + # Handle division by zero. + # Return all zeros. + if diff == 0: + return np.zeros_like(s) + + return (s - s_median) / diff + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_robust_scaler + ) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"quantile_range={self.quantile_range!r})" + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..03bc14185244f404276071ffd880ca0dcd18331c --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/tokenizer.py @@ -0,0 +1,74 @@ +from typing import Callable, List, Optional + +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_split_tokenizer +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class Tokenizer(Preprocessor): + """Replace each string with a list of tokens. + + Examples: + >>> import pandas as pd + >>> import ray + >>> df = pd.DataFrame({"text": ["Hello, world!", "foo bar\\nbaz"]}) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + + The default ``tokenization_fn`` delimits strings using the space character. + + >>> from ray.data.preprocessors import Tokenizer + >>> tokenizer = Tokenizer(columns=["text"]) + >>> tokenizer.transform(ds).to_pandas() # doctest: +SKIP + text + 0 [Hello,, world!] + 1 [foo, bar\\nbaz] + + If the default logic isn't adequate for your use case, you can specify a + custom ``tokenization_fn``. + + >>> import string + >>> def tokenization_fn(s): + ... for character in string.punctuation: + ... s = s.replace(character, "") + ... return s.split() + >>> tokenizer = Tokenizer(columns=["text"], tokenization_fn=tokenization_fn) + >>> tokenizer.transform(ds).to_pandas() # doctest: +SKIP + text + 0 [Hello, world] + 1 [foo, bar, baz] + + Args: + columns: The columns to tokenize. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + """ + + _is_fittable = False + + def __init__( + self, + columns: List[str], + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + ): + self.columns = columns + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + + def _transform_pandas(self, df: pd.DataFrame): + def column_tokenizer(s: pd.Series): + return s.map(self.tokenization_fn) + + df.loc[:, self.columns] = df.loc[:, self.columns].transform(column_tokenizer) + return df + + def __repr__(self): + name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"tokenization_fn={name})" + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py new file mode 100644 index 0000000000000000000000000000000000000000..4206bc18e214f36d566931ad3cc6c365983c352f --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/torch.py @@ -0,0 +1,149 @@ +from typing import TYPE_CHECKING, Callable, Dict, List, Mapping, Optional, Union + +import numpy as np + +from ray.air.util.data_batch_conversion import BatchFormat +from ray.air.util.tensor_extensions.utils import _create_possibly_ragged_ndarray +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + +if TYPE_CHECKING: + import torch + + +@PublicAPI(stability="alpha") +class TorchVisionPreprocessor(Preprocessor): + """Apply a `TorchVision transform `_ + to image columns. + + Examples: + + Torch models expect inputs of shape :math:`(B, C, H, W)` in the range + :math:`[0.0, 1.0]`. To convert images to this format, add ``ToTensor`` to your + preprocessing pipeline. + + .. testcode:: + + from torchvision import transforms + + import ray + from ray.data.preprocessors import TorchVisionPreprocessor + + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Resize((224, 224)), + ]) + preprocessor = TorchVisionPreprocessor(["image"], transform=transform) + + dataset = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + dataset = preprocessor.transform(dataset) + + + For better performance, set ``batched`` to ``True`` and replace ``ToTensor`` + with a batch-supporting ``Lambda``. + + .. testcode:: + + import numpy as np + import torch + + def to_tensor(batch: np.ndarray) -> torch.Tensor: + tensor = torch.as_tensor(batch, dtype=torch.float) + # (B, H, W, C) -> (B, C, H, W) + tensor = tensor.permute(0, 3, 1, 2).contiguous() + # [0., 255.] -> [0., 1.] + tensor = tensor.div(255) + return tensor + + transform = transforms.Compose([ + transforms.Lambda(to_tensor), + transforms.Resize((224, 224)) + ]) + preprocessor = TorchVisionPreprocessor(["image"], transform=transform, batched=True) + + dataset = ray.data.read_images("s3://anonymous@air-example-data-2/imagenet-sample-images") + dataset = preprocessor.transform(dataset) + + Args: + columns: The columns to apply the TorchVision transform to. + transform: The TorchVision transform you want to apply. This transform should + accept a ``np.ndarray`` or ``torch.Tensor`` as input and return a + ``torch.Tensor`` as output. + output_columns: The output name for each input column. If not specified, this + defaults to the same set of columns as the columns. + batched: If ``True``, apply ``transform`` to batches of shape + :math:`(B, H, W, C)`. Otherwise, apply ``transform`` to individual images. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + transform: Callable[[Union["np.ndarray", "torch.Tensor"]], "torch.Tensor"], + output_columns: Optional[List[str]] = None, + batched: bool = False, + ): + if not output_columns: + output_columns = columns + if len(columns) != len(output_columns): + raise ValueError( + "The length of columns should match the " + f"length of output_columns: {columns} vs {output_columns}." + ) + self._columns = columns + self._output_columns = output_columns + self._torchvision_transform = transform + self._batched = batched + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(" + f"columns={self._columns}, " + f"output_columns={self._output_columns}, " + f"transform={self._torchvision_transform!r})" + ) + + def _transform_numpy( + self, data_batch: Dict[str, "np.ndarray"] + ) -> Dict[str, "np.ndarray"]: + import torch + + from ray.air._internal.torch_utils import convert_ndarray_to_torch_tensor + + def apply_torchvision_transform(array: np.ndarray) -> np.ndarray: + try: + tensor = convert_ndarray_to_torch_tensor(array) + output = self._torchvision_transform(tensor) + except TypeError: + # Transforms like `ToTensor` expect a `np.ndarray` as input. + output = self._torchvision_transform(array) + if isinstance(output, torch.Tensor): + output = output.numpy() + if not isinstance(output, np.ndarray): + raise ValueError( + "`TorchVisionPreprocessor` expected your transform to return a " + "`torch.Tensor` or `np.ndarray`, but your transform returned a " + f"`{type(output).__name__}` instead." + ) + return output + + def transform_batch(batch: np.ndarray) -> np.ndarray: + if self._batched: + return apply_torchvision_transform(batch) + return _create_possibly_ragged_ndarray( + [apply_torchvision_transform(array) for array in batch] + ) + + if isinstance(data_batch, Mapping): + for input_col, output_col in zip(self._columns, self._output_columns): + data_batch[output_col] = transform_batch(data_batch[input_col]) + else: + # TODO(ekl) deprecate this code path. Unfortunately, predictors are still + # sending schemaless arrays to preprocessors. + data_batch = transform_batch(data_batch) + + return data_batch + + def preferred_batch_format(cls) -> BatchFormat: + return BatchFormat.NUMPY diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..e0429b05251dde20edda92ea3c416dae8cc2ba0d --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/transformer.py @@ -0,0 +1,88 @@ +from typing import List + +import numpy as np +import pandas as pd + +from ray.data.preprocessor import Preprocessor +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class PowerTransformer(Preprocessor): + """Apply a `power transform `_ to + make your data more normally distributed. + + Some models expect data to be normally distributed. By making your data more + Gaussian-like, you might be able to improve your model's performance. + + This preprocessor supports the following transformations: + + * `Yeo-Johnson `_ + * `Box-Cox `_ + + Box-Cox requires all data to be positive. + + .. warning:: + + You need to manually specify the transform's power parameter. If you + choose a bad value, the transformation might not work well. + + Args: + columns: The columns to separately transform. + power: A parameter that determines how your data is transformed. Practioners + typically set ``power`` between :math:`-2.5` and :math:`2.5`, although you + may need to try different values to find one that works well. + method: A string representing which transformation to apply. Supports + ``"yeo-johnson"`` and ``"box-cox"``. If you choose ``"box-cox"``, your data + needs to be positive. Defaults to ``"yeo-johnson"``. + """ # noqa: E501 + + _valid_methods = ["yeo-johnson", "box-cox"] + _is_fittable = False + + def __init__(self, columns: List[str], power: float, method: str = "yeo-johnson"): + self.columns = columns + self.method = method + self.power = power + + if method not in self._valid_methods: + raise ValueError( + f"Method {method} is not supported." + f"Supported values are: {self._valid_methods}" + ) + + def _transform_pandas(self, df: pd.DataFrame): + def column_power_transformer(s: pd.Series): + if self.method == "yeo-johnson": + result = np.zeros_like(s, dtype=np.float64) + pos = s >= 0 # binary mask + + if self.power != 0: + result[pos] = (np.power(s[pos] + 1, self.power) - 1) / self.power + else: + result[pos] = np.log(s[pos] + 1) + + if self.power != 2: + result[~pos] = -(np.power(-s[~pos] + 1, 2 - self.power) - 1) / ( + 2 - self.power + ) + else: + result[~pos] = -np.log(-s[~pos] + 1) + return result + + else: # box-cox + if self.power != 0: + return (np.power(s, self.power) - 1) / self.power + else: + return np.log(s) + + df.loc[:, self.columns] = df.loc[:, self.columns].transform( + column_power_transformer + ) + return df + + def __repr__(self): + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"power={self.power!r}, method={self.method!r})" + ) diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..81c061fc1441917f790efeadbc3eaea4e43bf89e --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/utils.py @@ -0,0 +1,19 @@ +import hashlib +from typing import List + +from ray.util.annotations import DeveloperAPI + + +@DeveloperAPI +def simple_split_tokenizer(value: str) -> List[str]: + """Tokenize a string using a split on spaces.""" + return value.split(" ") + + +@DeveloperAPI +def simple_hash(value: object, num_features: int) -> int: + """Deterministically hash a value into the integer space.""" + encoded_value = str(value).encode() + hashed_value = hashlib.sha1(encoded_value) + hashed_value_int = int(hashed_value.hexdigest(), 16) + return hashed_value_int % num_features diff --git a/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py new file mode 100644 index 0000000000000000000000000000000000000000..c33b8e93ef8e4ee111715440bcb9b62deed466ad --- /dev/null +++ b/infer_4_37_2/lib/python3.10/site-packages/ray/data/preprocessors/vectorizer.py @@ -0,0 +1,270 @@ +from collections import Counter +from typing import Callable, List, Optional + +import pandas as pd + +from ray.data import Dataset +from ray.data.preprocessor import Preprocessor +from ray.data.preprocessors.utils import simple_hash, simple_split_tokenizer +from ray.util.annotations import PublicAPI + + +@PublicAPI(stability="alpha") +class HashingVectorizer(Preprocessor): + """Count the frequency of tokens using the + `hashing trick `_. + + This preprocessors creates ``num_features`` columns named like + ``hash_{column_name}_{index}``. If ``num_features`` is large enough relative to + the size of your vocabulary, then each column approximately corresponds to the + frequency of a unique token. + + :class:`HashingVectorizer` is memory efficient and quick to pickle. However, given a + transformed column, you can't know which tokens correspond to it. This might make it + hard to determine which tokens are important to your model. + + .. note:: + + This preprocessor transforms each input column to a + `document-term matrix `_. + + A document-term matrix is a table that describes the frequency of tokens in a + collection of documents. For example, the strings `"I like Python"` and `"I + dislike Python"` might have the document-term matrix below: + + .. code-block:: + + corpus_I corpus_Python corpus_dislike corpus_like + 0 1 1 1 0 + 1 1 1 0 1 + + To generate the matrix, you typically map each token to a unique index. For + example: + + .. code-block:: + + token index + 0 I 0 + 1 Python 1 + 2 dislike 2 + 3 like 3 + + The problem with this approach is that memory use scales linearly with the size + of your vocabulary. :class:`HashingVectorizer` circumvents this problem by + computing indices with a hash function: + :math:`\\texttt{index} = hash(\\texttt{token})`. + + .. warning:: + Sparse matrices aren't currently supported. If you use a large ``num_features``, + this preprocessor might behave poorly. + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import HashingVectorizer + >>> + >>> df = pd.DataFrame({ + ... "corpus": [ + ... "Jimmy likes volleyball", + ... "Bob likes volleyball too", + ... "Bob also likes fruit jerky" + ... ] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> vectorizer = HashingVectorizer(["corpus"], num_features=8) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + hash_corpus_0 hash_corpus_1 hash_corpus_2 hash_corpus_3 hash_corpus_4 hash_corpus_5 hash_corpus_6 hash_corpus_7 + 0 1 0 1 0 0 0 0 1 + 1 1 0 1 0 0 0 1 1 + 2 0 0 1 1 0 2 1 0 + + Args: + columns: The columns to separately tokenize and count. + num_features: The number of features used to represent the vocabulary. You + should choose a value large enough to prevent hash collisions between + distinct tokens. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + + .. seealso:: + + :class:`CountVectorizer` + Another method for counting token frequencies. Unlike :class:`HashingVectorizer`, + :class:`CountVectorizer` creates a feature for each unique token. This + enables you to compute the inverse transformation. + + :class:`FeatureHasher` + This preprocessor is similar to :class:`HashingVectorizer`, except it expects + a table describing token frequencies. In contrast, + :class:`FeatureHasher` expects a column containing documents. + """ # noqa: E501 + + _is_fittable = False + + def __init__( + self, + columns: List[str], + num_features: int, + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + ): + self.columns = columns + # TODO(matt): Set default number of features. + # This likely requires sparse matrix support to avoid explosion of columns. + self.num_features = num_features + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + + def _transform_pandas(self, df: pd.DataFrame): + # TODO(matt): Use sparse matrix for efficiency. + + def hash_count(tokens: List[str]) -> Counter: + hashed_tokens = [simple_hash(token, self.num_features) for token in tokens] + return Counter(hashed_tokens) + + for col in self.columns: + tokenized = df[col].map(self.tokenization_fn) + hashed = tokenized.map(hash_count) + for i in range(self.num_features): + df[f"hash_{col}_{i}"] = hashed.map(lambda counts: counts[i]) + + # Drop original columns. + df.drop(columns=self.columns, inplace=True) + return df + + def __repr__(self): + fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"num_features={self.num_features!r}, tokenization_fn={fn_name})" + ) + + +@PublicAPI(stability="alpha") +class CountVectorizer(Preprocessor): + """Count the frequency of tokens in a column of strings. + + :class:`CountVectorizer` operates on columns that contain strings. For example: + + .. code-block:: + + corpus + 0 I dislike Python + 1 I like Python + + This preprocessors creates a column named like ``{column}_{token}`` for each + unique token. These columns represent the frequency of token ``{token}`` in + column ``{column}``. For example: + + .. code-block:: + + corpus_I corpus_Python corpus_dislike corpus_like + 0 1 1 1 0 + 1 1 1 0 1 + + Examples: + >>> import pandas as pd + >>> import ray + >>> from ray.data.preprocessors import CountVectorizer + >>> + >>> df = pd.DataFrame({ + ... "corpus": [ + ... "Jimmy likes volleyball", + ... "Bob likes volleyball too", + ... "Bob also likes fruit jerky" + ... ] + ... }) + >>> ds = ray.data.from_pandas(df) # doctest: +SKIP + >>> + >>> vectorizer = CountVectorizer(["corpus"]) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + corpus_likes corpus_volleyball corpus_Bob corpus_Jimmy corpus_too corpus_also corpus_fruit corpus_jerky + 0 1 1 0 1 0 0 0 0 + 1 1 1 1 0 1 0 0 0 + 2 1 0 1 0 0 1 1 1 + + You can limit the number of tokens in the vocabulary with ``max_features``. + + >>> vectorizer = CountVectorizer(["corpus"], max_features=3) + >>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP + corpus_likes corpus_volleyball corpus_Bob + 0 1 1 0 + 1 1 1 1 + 2 1 0 1 + + Args: + columns: The columns to separately tokenize and count. + tokenization_fn: The function used to generate tokens. This function + should accept a string as input and return a list of tokens as + output. If unspecified, the tokenizer uses a function equivalent to + ``lambda s: s.split(" ")``. + max_features: The maximum number of tokens to encode in the transformed + dataset. If specified, only the most frequent tokens are encoded. + + """ # noqa: E501 + + def __init__( + self, + columns: List[str], + tokenization_fn: Optional[Callable[[str], List[str]]] = None, + max_features: Optional[int] = None, + ): + # TODO(matt): Add fit_transform to avoid recomputing tokenization step. + self.columns = columns + # TODO(matt): Add a more robust default tokenizer. + self.tokenization_fn = tokenization_fn or simple_split_tokenizer + self.max_features = max_features + + def _fit(self, dataset: Dataset) -> Preprocessor: + def get_pd_value_counts(df: pd.DataFrame) -> List[Counter]: + def get_token_counts(col): + token_series = df[col].apply(self.tokenization_fn) + tokens = token_series.sum() + return Counter(tokens) + + return {col: [get_token_counts(col)] for col in self.columns} + + value_counts = dataset.map_batches(get_pd_value_counts, batch_format="pandas") + total_counts = {col: Counter() for col in self.columns} + for batch in value_counts.iter_batches(batch_size=None): + for col, counters in batch.items(): + for counter in counters: + total_counts[col].update(counter) + + def most_common(counter: Counter, n: int): + return Counter(dict(counter.most_common(n))) + + top_counts = [ + most_common(counter, self.max_features) for counter in total_counts.values() + ] + + self.stats_ = { + f"token_counts({col})": counts + for (col, counts) in zip(self.columns, top_counts) + } + + return self + + def _transform_pandas(self, df: pd.DataFrame): + + to_concat = [] + for col in self.columns: + token_counts = self.stats_[f"token_counts({col})"] + sorted_tokens = [token for (token, count) in token_counts.most_common()] + tokenized = df[col].map(self.tokenization_fn).map(Counter) + for token in sorted_tokens: + series = tokenized.map(lambda val: val[token]) + series.name = f"{col}_{token}" + to_concat.append(series) + + df = pd.concat(to_concat, axis=1) + return df + + def __repr__(self): + fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn) + return ( + f"{self.__class__.__name__}(columns={self.columns!r}, " + f"tokenization_fn={fn_name}, max_features={self.max_features!r})" + ) diff --git a/janus/lib/python3.10/encodings/__pycache__/cp863.cpython-310.pyc b/janus/lib/python3.10/encodings/__pycache__/cp863.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83e7de1175abe59f130091e2310c66d5e5334b9e Binary files /dev/null and b/janus/lib/python3.10/encodings/__pycache__/cp863.cpython-310.pyc differ diff --git a/janus/lib/python3.10/encodings/__pycache__/hex_codec.cpython-310.pyc b/janus/lib/python3.10/encodings/__pycache__/hex_codec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e92f15b2e8b18596bc4f7b338b90334d62eabeee Binary files /dev/null and b/janus/lib/python3.10/encodings/__pycache__/hex_codec.cpython-310.pyc differ diff --git a/janus/lib/python3.10/encodings/__pycache__/mac_romanian.cpython-310.pyc b/janus/lib/python3.10/encodings/__pycache__/mac_romanian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9dc0a9b4623f904bf0934b12b713aa80bda21483 Binary files /dev/null and b/janus/lib/python3.10/encodings/__pycache__/mac_romanian.cpython-310.pyc differ diff --git a/janus/lib/python3.10/encodings/__pycache__/punycode.cpython-310.pyc b/janus/lib/python3.10/encodings/__pycache__/punycode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8303db1888d493a3b4b400a43ae31ecf8f6b0cb3 Binary files /dev/null and b/janus/lib/python3.10/encodings/__pycache__/punycode.cpython-310.pyc differ diff --git a/janus/lib/python3.10/encodings/__pycache__/shift_jis.cpython-310.pyc b/janus/lib/python3.10/encodings/__pycache__/shift_jis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28a562a18223ea49fe5d3df56aee458c4e95903a Binary files /dev/null and b/janus/lib/python3.10/encodings/__pycache__/shift_jis.cpython-310.pyc differ diff --git a/janus/lib/python3.10/encodings/cp1251.py b/janus/lib/python3.10/encodings/cp1251.py new file mode 100644 index 0000000000000000000000000000000000000000..22bc66002d3e8a72d2dd42c79055b61d6b8e082b --- /dev/null +++ b/janus/lib/python3.10/encodings/cp1251.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp1251', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x04' # 0x04 -> END OF TRANSMISSION + '\x05' # 0x05 -> ENQUIRY + '\x06' # 0x06 -> ACKNOWLEDGE + '\x07' # 0x07 -> BELL + '\x08' # 0x08 -> BACKSPACE + '\t' # 0x09 -> HORIZONTAL TABULATION + '\n' # 0x0A -> LINE FEED + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x14' # 0x14 -> DEVICE CONTROL FOUR + '\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x16 -> SYNCHRONOUS IDLE + '\x17' # 0x17 -> END OF TRANSMISSION BLOCK + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x1a' # 0x1A -> SUBSTITUTE + '\x1b' # 0x1B -> ESCAPE + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + ' ' # 0x20 -> SPACE + '!' # 0x21 -> EXCLAMATION MARK + '"' # 0x22 -> QUOTATION MARK + '#' # 0x23 -> NUMBER SIGN + '$' # 0x24 -> DOLLAR SIGN + '%' # 0x25 -> PERCENT SIGN + '&' # 0x26 -> AMPERSAND + "'" # 0x27 -> APOSTROPHE + '(' # 0x28 -> LEFT PARENTHESIS + ')' # 0x29 -> RIGHT PARENTHESIS + '*' # 0x2A -> ASTERISK + '+' # 0x2B -> PLUS SIGN + ',' # 0x2C -> COMMA + '-' # 0x2D -> HYPHEN-MINUS + '.' # 0x2E -> FULL STOP + '/' # 0x2F -> SOLIDUS + '0' # 0x30 -> DIGIT ZERO + '1' # 0x31 -> DIGIT ONE + '2' # 0x32 -> DIGIT TWO + '3' # 0x33 -> DIGIT THREE + '4' # 0x34 -> DIGIT FOUR + '5' # 0x35 -> DIGIT FIVE + '6' # 0x36 -> DIGIT SIX + '7' # 0x37 -> DIGIT SEVEN + '8' # 0x38 -> DIGIT EIGHT + '9' # 0x39 -> DIGIT NINE + ':' # 0x3A -> COLON + ';' # 0x3B -> SEMICOLON + '<' # 0x3C -> LESS-THAN SIGN + '=' # 0x3D -> EQUALS SIGN + '>' # 0x3E -> GREATER-THAN SIGN + '?' # 0x3F -> QUESTION MARK + '@' # 0x40 -> COMMERCIAL AT + 'A' # 0x41 -> LATIN CAPITAL LETTER A + 'B' # 0x42 -> LATIN CAPITAL LETTER B + 'C' # 0x43 -> LATIN CAPITAL LETTER C + 'D' # 0x44 -> LATIN CAPITAL LETTER D + 'E' # 0x45 -> LATIN CAPITAL LETTER E + 'F' # 0x46 -> LATIN CAPITAL LETTER F + 'G' # 0x47 -> LATIN CAPITAL LETTER G + 'H' # 0x48 -> LATIN CAPITAL LETTER H + 'I' # 0x49 -> LATIN CAPITAL LETTER I + 'J' # 0x4A -> LATIN CAPITAL LETTER J + 'K' # 0x4B -> LATIN CAPITAL LETTER K + 'L' # 0x4C -> LATIN CAPITAL LETTER L + 'M' # 0x4D -> LATIN CAPITAL LETTER M + 'N' # 0x4E -> LATIN CAPITAL LETTER N + 'O' # 0x4F -> LATIN CAPITAL LETTER O + 'P' # 0x50 -> LATIN CAPITAL LETTER P + 'Q' # 0x51 -> LATIN CAPITAL LETTER Q + 'R' # 0x52 -> LATIN CAPITAL LETTER R + 'S' # 0x53 -> LATIN CAPITAL LETTER S + 'T' # 0x54 -> LATIN CAPITAL LETTER T + 'U' # 0x55 -> LATIN CAPITAL LETTER U + 'V' # 0x56 -> LATIN CAPITAL LETTER V + 'W' # 0x57 -> LATIN CAPITAL LETTER W + 'X' # 0x58 -> LATIN CAPITAL LETTER X + 'Y' # 0x59 -> LATIN CAPITAL LETTER Y + 'Z' # 0x5A -> LATIN CAPITAL LETTER Z + '[' # 0x5B -> LEFT SQUARE BRACKET + '\\' # 0x5C -> REVERSE SOLIDUS + ']' # 0x5D -> RIGHT SQUARE BRACKET + '^' # 0x5E -> CIRCUMFLEX ACCENT + '_' # 0x5F -> LOW LINE + '`' # 0x60 -> GRAVE ACCENT + 'a' # 0x61 -> LATIN SMALL LETTER A + 'b' # 0x62 -> LATIN SMALL LETTER B + 'c' # 0x63 -> LATIN SMALL LETTER C + 'd' # 0x64 -> LATIN SMALL LETTER D + 'e' # 0x65 -> LATIN SMALL LETTER E + 'f' # 0x66 -> LATIN SMALL LETTER F + 'g' # 0x67 -> LATIN SMALL LETTER G + 'h' # 0x68 -> LATIN SMALL LETTER H + 'i' # 0x69 -> LATIN SMALL LETTER I + 'j' # 0x6A -> LATIN SMALL LETTER J + 'k' # 0x6B -> LATIN SMALL LETTER K + 'l' # 0x6C -> LATIN SMALL LETTER L + 'm' # 0x6D -> LATIN SMALL LETTER M + 'n' # 0x6E -> LATIN SMALL LETTER N + 'o' # 0x6F -> LATIN SMALL LETTER O + 'p' # 0x70 -> LATIN SMALL LETTER P + 'q' # 0x71 -> LATIN SMALL LETTER Q + 'r' # 0x72 -> LATIN SMALL LETTER R + 's' # 0x73 -> LATIN SMALL LETTER S + 't' # 0x74 -> LATIN SMALL LETTER T + 'u' # 0x75 -> LATIN SMALL LETTER U + 'v' # 0x76 -> LATIN SMALL LETTER V + 'w' # 0x77 -> LATIN SMALL LETTER W + 'x' # 0x78 -> LATIN SMALL LETTER X + 'y' # 0x79 -> LATIN SMALL LETTER Y + 'z' # 0x7A -> LATIN SMALL LETTER Z + '{' # 0x7B -> LEFT CURLY BRACKET + '|' # 0x7C -> VERTICAL LINE + '}' # 0x7D -> RIGHT CURLY BRACKET + '~' # 0x7E -> TILDE + '\x7f' # 0x7F -> DELETE + '\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE + '\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE + '\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK + '\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE + '\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK + '\u2026' # 0x85 -> HORIZONTAL ELLIPSIS + '\u2020' # 0x86 -> DAGGER + '\u2021' # 0x87 -> DOUBLE DAGGER + '\u20ac' # 0x88 -> EURO SIGN + '\u2030' # 0x89 -> PER MILLE SIGN + '\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE + '\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK + '\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE + '\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE + '\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE + '\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE + '\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE + '\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK + '\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK + '\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK + '\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK + '\u2022' # 0x95 -> BULLET + '\u2013' # 0x96 -> EN DASH + '\u2014' # 0x97 -> EM DASH + '\ufffe' # 0x98 -> UNDEFINED + '\u2122' # 0x99 -> TRADE MARK SIGN + '\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE + '\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + '\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE + '\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE + '\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE + '\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE + '\xa0' # 0xA0 -> NO-BREAK SPACE + '\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U + '\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U + '\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE + '\xa4' # 0xA4 -> CURRENCY SIGN + '\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN + '\xa6' # 0xA6 -> BROKEN BAR + '\xa7' # 0xA7 -> SECTION SIGN + '\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO + '\xa9' # 0xA9 -> COPYRIGHT SIGN + '\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE + '\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xAC -> NOT SIGN + '\xad' # 0xAD -> SOFT HYPHEN + '\xae' # 0xAE -> REGISTERED SIGN + '\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI + '\xb0' # 0xB0 -> DEGREE SIGN + '\xb1' # 0xB1 -> PLUS-MINUS SIGN + '\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I + '\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN + '\xb5' # 0xB5 -> MICRO SIGN + '\xb6' # 0xB6 -> PILCROW SIGN + '\xb7' # 0xB7 -> MIDDLE DOT + '\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO + '\u2116' # 0xB9 -> NUMERO SIGN + '\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE + '\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE + '\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE + '\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE + '\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI + '\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A + '\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE + '\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE + '\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE + '\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE + '\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE + '\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE + '\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE + '\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I + '\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I + '\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA + '\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL + '\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM + '\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN + '\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O + '\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE + '\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER + '\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES + '\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE + '\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U + '\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF + '\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA + '\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE + '\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE + '\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA + '\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA + '\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN + '\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU + '\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN + '\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E + '\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU + '\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA + '\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A + '\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE + '\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE + '\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE + '\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE + '\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE + '\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE + '\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE + '\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I + '\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I + '\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA + '\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL + '\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM + '\u043d' # 0xED -> CYRILLIC SMALL LETTER EN + '\u043e' # 0xEE -> CYRILLIC SMALL LETTER O + '\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE + '\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER + '\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES + '\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE + '\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U + '\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF + '\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA + '\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE + '\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE + '\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA + '\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA + '\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN + '\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU + '\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN + '\u044d' # 0xFD -> CYRILLIC SMALL LETTER E + '\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU + '\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table) diff --git a/janus/lib/python3.10/encodings/cp437.py b/janus/lib/python3.10/encodings/cp437.py new file mode 100644 index 0000000000000000000000000000000000000000..b6c75e2ca1c897d563bd329ca2881d167abf5f12 --- /dev/null +++ b/janus/lib/python3.10/encodings/cp437.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp437', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE + 0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE + 0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x00e6, # LATIN SMALL LIGATURE AE + 0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE + 0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE + 0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x00a2, # CENT SIGN + 0x009c: 0x00a3, # POUND SIGN + 0x009d: 0x00a5, # YEN SIGN + 0x009e: 0x20a7, # PESETA SIGN + 0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE + 0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE + 0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR + 0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR + 0x00a8: 0x00bf, # INVERTED QUESTION MARK + 0x00a9: 0x2310, # REVERSED NOT SIGN + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF + 0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER + 0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x258c, # LEFT HALF BLOCK + 0x00de: 0x2590, # RIGHT HALF BLOCK + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA + 0x00e3: 0x03c0, # GREEK SMALL LETTER PI + 0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA + 0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA + 0x00e6: 0x00b5, # MICRO SIGN + 0x00e7: 0x03c4, # GREEK SMALL LETTER TAU + 0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI + 0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA + 0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA + 0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA + 0x00ec: 0x221e, # INFINITY + 0x00ed: 0x03c6, # GREEK SMALL LETTER PHI + 0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON + 0x00ef: 0x2229, # INTERSECTION + 0x00f0: 0x2261, # IDENTICAL TO + 0x00f1: 0x00b1, # PLUS-MINUS SIGN + 0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO + 0x00f3: 0x2264, # LESS-THAN OR EQUAL TO + 0x00f4: 0x2320, # TOP HALF INTEGRAL + 0x00f5: 0x2321, # BOTTOM HALF INTEGRAL + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x2248, # ALMOST EQUAL TO + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x2219, # BULLET OPERATOR + 0x00fa: 0x00b7, # MIDDLE DOT + 0x00fb: 0x221a, # SQUARE ROOT + 0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N + 0x00fd: 0x00b2, # SUPERSCRIPT TWO + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE + '\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE + '\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE + '\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE + '\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX + '\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE + '\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\xa2' # 0x009b -> CENT SIGN + '\xa3' # 0x009c -> POUND SIGN + '\xa5' # 0x009d -> YEN SIGN + '\u20a7' # 0x009e -> PESETA SIGN + '\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE + '\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE + '\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR + '\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR + '\xbf' # 0x00a8 -> INVERTED QUESTION MARK + '\u2310' # 0x00a9 -> REVERSED NOT SIGN + '\xac' # 0x00aa -> NOT SIGN + '\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF + '\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER + '\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + '\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + '\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + '\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + '\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + '\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + '\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + '\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + '\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + '\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + '\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + '\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + '\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + '\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + '\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u258c' # 0x00dd -> LEFT HALF BLOCK + '\u2590' # 0x00de -> RIGHT HALF BLOCK + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA + '\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI + '\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA + '\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA + '\xb5' # 0x00e6 -> MICRO SIGN + '\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU + '\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI + '\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA + '\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA + '\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA + '\u221e' # 0x00ec -> INFINITY + '\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI + '\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON + '\u2229' # 0x00ef -> INTERSECTION + '\u2261' # 0x00f0 -> IDENTICAL TO + '\xb1' # 0x00f1 -> PLUS-MINUS SIGN + '\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO + '\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO + '\u2320' # 0x00f4 -> TOP HALF INTEGRAL + '\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL + '\xf7' # 0x00f6 -> DIVISION SIGN + '\u2248' # 0x00f7 -> ALMOST EQUAL TO + '\xb0' # 0x00f8 -> DEGREE SIGN + '\u2219' # 0x00f9 -> BULLET OPERATOR + '\xb7' # 0x00fa -> MIDDLE DOT + '\u221a' # 0x00fb -> SQUARE ROOT + '\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N + '\xb2' # 0x00fd -> SUPERSCRIPT TWO + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK + 0x00a2: 0x009b, # CENT SIGN + 0x00a3: 0x009c, # POUND SIGN + 0x00a5: 0x009d, # YEN SIGN + 0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b1: 0x00f1, # PLUS-MINUS SIGN + 0x00b2: 0x00fd, # SUPERSCRIPT TWO + 0x00b5: 0x00e6, # MICRO SIGN + 0x00b7: 0x00fa, # MIDDLE DOT + 0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER + 0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF + 0x00bf: 0x00a8, # INVERTED QUESTION MARK + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE + 0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE + 0x00e6: 0x0091, # LATIN SMALL LIGATURE AE + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS + 0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE + 0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS + 0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK + 0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA + 0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA + 0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA + 0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI + 0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA + 0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA + 0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA + 0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON + 0x03c0: 0x00e3, # GREEK SMALL LETTER PI + 0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA + 0x03c4: 0x00e7, # GREEK SMALL LETTER TAU + 0x03c6: 0x00ed, # GREEK SMALL LETTER PHI + 0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N + 0x20a7: 0x009e, # PESETA SIGN + 0x2219: 0x00f9, # BULLET OPERATOR + 0x221a: 0x00fb, # SQUARE ROOT + 0x221e: 0x00ec, # INFINITY + 0x2229: 0x00ef, # INTERSECTION + 0x2248: 0x00f7, # ALMOST EQUAL TO + 0x2261: 0x00f0, # IDENTICAL TO + 0x2264: 0x00f3, # LESS-THAN OR EQUAL TO + 0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO + 0x2310: 0x00a9, # REVERSED NOT SIGN + 0x2320: 0x00f4, # TOP HALF INTEGRAL + 0x2321: 0x00f5, # BOTTOM HALF INTEGRAL + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE + 0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE + 0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE + 0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE + 0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE + 0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE + 0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE + 0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE + 0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE + 0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x258c: 0x00dd, # LEFT HALF BLOCK + 0x2590: 0x00de, # RIGHT HALF BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/janus/lib/python3.10/encodings/cp852.py b/janus/lib/python3.10/encodings/cp852.py new file mode 100644 index 0000000000000000000000000000000000000000..34d8a0ea5acc4b7f97cf4f943cfc4a78bb3d4247 --- /dev/null +++ b/janus/lib/python3.10/encodings/cp852.py @@ -0,0 +1,698 @@ +""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_map) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_map)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp852', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + +### Decoding Map + +decoding_map = codecs.make_identity_dict(range(256)) +decoding_map.update({ + 0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS + 0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE + 0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS + 0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE + 0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE + 0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA + 0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE + 0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS + 0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + 0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE + 0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE + 0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE + 0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE + 0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS + 0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON + 0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON + 0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE + 0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE + 0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON + 0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON + 0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE + 0x009e: 0x00d7, # MULTIPLICATION SIGN + 0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON + 0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE + 0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE + 0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE + 0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE + 0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK + 0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK + 0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON + 0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON + 0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK + 0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK + 0x00aa: 0x00ac, # NOT SIGN + 0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE + 0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON + 0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA + 0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00b0: 0x2591, # LIGHT SHADE + 0x00b1: 0x2592, # MEDIUM SHADE + 0x00b2: 0x2593, # DARK SHADE + 0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL + 0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON + 0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL + 0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL + 0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE + 0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE + 0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x00cf: 0x00a4, # CURRENCY SIGN + 0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE + 0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE + 0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON + 0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON + 0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON + 0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON + 0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT + 0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x00db: 0x2588, # FULL BLOCK + 0x00dc: 0x2584, # LOWER HALF BLOCK + 0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA + 0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE + 0x00df: 0x2580, # UPPER HALF BLOCK + 0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S + 0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE + 0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE + 0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON + 0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON + 0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON + 0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE + 0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE + 0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + 0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE + 0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA + 0x00ef: 0x00b4, # ACUTE ACCENT + 0x00f0: 0x00ad, # SOFT HYPHEN + 0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT + 0x00f2: 0x02db, # OGONEK + 0x00f3: 0x02c7, # CARON + 0x00f4: 0x02d8, # BREVE + 0x00f5: 0x00a7, # SECTION SIGN + 0x00f6: 0x00f7, # DIVISION SIGN + 0x00f7: 0x00b8, # CEDILLA + 0x00f8: 0x00b0, # DEGREE SIGN + 0x00f9: 0x00a8, # DIAERESIS + 0x00fa: 0x02d9, # DOT ABOVE + 0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE + 0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON + 0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON + 0x00fe: 0x25a0, # BLACK SQUARE + 0x00ff: 0x00a0, # NO-BREAK SPACE +}) + +### Decoding Table + +decoding_table = ( + '\x00' # 0x0000 -> NULL + '\x01' # 0x0001 -> START OF HEADING + '\x02' # 0x0002 -> START OF TEXT + '\x03' # 0x0003 -> END OF TEXT + '\x04' # 0x0004 -> END OF TRANSMISSION + '\x05' # 0x0005 -> ENQUIRY + '\x06' # 0x0006 -> ACKNOWLEDGE + '\x07' # 0x0007 -> BELL + '\x08' # 0x0008 -> BACKSPACE + '\t' # 0x0009 -> HORIZONTAL TABULATION + '\n' # 0x000a -> LINE FEED + '\x0b' # 0x000b -> VERTICAL TABULATION + '\x0c' # 0x000c -> FORM FEED + '\r' # 0x000d -> CARRIAGE RETURN + '\x0e' # 0x000e -> SHIFT OUT + '\x0f' # 0x000f -> SHIFT IN + '\x10' # 0x0010 -> DATA LINK ESCAPE + '\x11' # 0x0011 -> DEVICE CONTROL ONE + '\x12' # 0x0012 -> DEVICE CONTROL TWO + '\x13' # 0x0013 -> DEVICE CONTROL THREE + '\x14' # 0x0014 -> DEVICE CONTROL FOUR + '\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE + '\x16' # 0x0016 -> SYNCHRONOUS IDLE + '\x17' # 0x0017 -> END OF TRANSMISSION BLOCK + '\x18' # 0x0018 -> CANCEL + '\x19' # 0x0019 -> END OF MEDIUM + '\x1a' # 0x001a -> SUBSTITUTE + '\x1b' # 0x001b -> ESCAPE + '\x1c' # 0x001c -> FILE SEPARATOR + '\x1d' # 0x001d -> GROUP SEPARATOR + '\x1e' # 0x001e -> RECORD SEPARATOR + '\x1f' # 0x001f -> UNIT SEPARATOR + ' ' # 0x0020 -> SPACE + '!' # 0x0021 -> EXCLAMATION MARK + '"' # 0x0022 -> QUOTATION MARK + '#' # 0x0023 -> NUMBER SIGN + '$' # 0x0024 -> DOLLAR SIGN + '%' # 0x0025 -> PERCENT SIGN + '&' # 0x0026 -> AMPERSAND + "'" # 0x0027 -> APOSTROPHE + '(' # 0x0028 -> LEFT PARENTHESIS + ')' # 0x0029 -> RIGHT PARENTHESIS + '*' # 0x002a -> ASTERISK + '+' # 0x002b -> PLUS SIGN + ',' # 0x002c -> COMMA + '-' # 0x002d -> HYPHEN-MINUS + '.' # 0x002e -> FULL STOP + '/' # 0x002f -> SOLIDUS + '0' # 0x0030 -> DIGIT ZERO + '1' # 0x0031 -> DIGIT ONE + '2' # 0x0032 -> DIGIT TWO + '3' # 0x0033 -> DIGIT THREE + '4' # 0x0034 -> DIGIT FOUR + '5' # 0x0035 -> DIGIT FIVE + '6' # 0x0036 -> DIGIT SIX + '7' # 0x0037 -> DIGIT SEVEN + '8' # 0x0038 -> DIGIT EIGHT + '9' # 0x0039 -> DIGIT NINE + ':' # 0x003a -> COLON + ';' # 0x003b -> SEMICOLON + '<' # 0x003c -> LESS-THAN SIGN + '=' # 0x003d -> EQUALS SIGN + '>' # 0x003e -> GREATER-THAN SIGN + '?' # 0x003f -> QUESTION MARK + '@' # 0x0040 -> COMMERCIAL AT + 'A' # 0x0041 -> LATIN CAPITAL LETTER A + 'B' # 0x0042 -> LATIN CAPITAL LETTER B + 'C' # 0x0043 -> LATIN CAPITAL LETTER C + 'D' # 0x0044 -> LATIN CAPITAL LETTER D + 'E' # 0x0045 -> LATIN CAPITAL LETTER E + 'F' # 0x0046 -> LATIN CAPITAL LETTER F + 'G' # 0x0047 -> LATIN CAPITAL LETTER G + 'H' # 0x0048 -> LATIN CAPITAL LETTER H + 'I' # 0x0049 -> LATIN CAPITAL LETTER I + 'J' # 0x004a -> LATIN CAPITAL LETTER J + 'K' # 0x004b -> LATIN CAPITAL LETTER K + 'L' # 0x004c -> LATIN CAPITAL LETTER L + 'M' # 0x004d -> LATIN CAPITAL LETTER M + 'N' # 0x004e -> LATIN CAPITAL LETTER N + 'O' # 0x004f -> LATIN CAPITAL LETTER O + 'P' # 0x0050 -> LATIN CAPITAL LETTER P + 'Q' # 0x0051 -> LATIN CAPITAL LETTER Q + 'R' # 0x0052 -> LATIN CAPITAL LETTER R + 'S' # 0x0053 -> LATIN CAPITAL LETTER S + 'T' # 0x0054 -> LATIN CAPITAL LETTER T + 'U' # 0x0055 -> LATIN CAPITAL LETTER U + 'V' # 0x0056 -> LATIN CAPITAL LETTER V + 'W' # 0x0057 -> LATIN CAPITAL LETTER W + 'X' # 0x0058 -> LATIN CAPITAL LETTER X + 'Y' # 0x0059 -> LATIN CAPITAL LETTER Y + 'Z' # 0x005a -> LATIN CAPITAL LETTER Z + '[' # 0x005b -> LEFT SQUARE BRACKET + '\\' # 0x005c -> REVERSE SOLIDUS + ']' # 0x005d -> RIGHT SQUARE BRACKET + '^' # 0x005e -> CIRCUMFLEX ACCENT + '_' # 0x005f -> LOW LINE + '`' # 0x0060 -> GRAVE ACCENT + 'a' # 0x0061 -> LATIN SMALL LETTER A + 'b' # 0x0062 -> LATIN SMALL LETTER B + 'c' # 0x0063 -> LATIN SMALL LETTER C + 'd' # 0x0064 -> LATIN SMALL LETTER D + 'e' # 0x0065 -> LATIN SMALL LETTER E + 'f' # 0x0066 -> LATIN SMALL LETTER F + 'g' # 0x0067 -> LATIN SMALL LETTER G + 'h' # 0x0068 -> LATIN SMALL LETTER H + 'i' # 0x0069 -> LATIN SMALL LETTER I + 'j' # 0x006a -> LATIN SMALL LETTER J + 'k' # 0x006b -> LATIN SMALL LETTER K + 'l' # 0x006c -> LATIN SMALL LETTER L + 'm' # 0x006d -> LATIN SMALL LETTER M + 'n' # 0x006e -> LATIN SMALL LETTER N + 'o' # 0x006f -> LATIN SMALL LETTER O + 'p' # 0x0070 -> LATIN SMALL LETTER P + 'q' # 0x0071 -> LATIN SMALL LETTER Q + 'r' # 0x0072 -> LATIN SMALL LETTER R + 's' # 0x0073 -> LATIN SMALL LETTER S + 't' # 0x0074 -> LATIN SMALL LETTER T + 'u' # 0x0075 -> LATIN SMALL LETTER U + 'v' # 0x0076 -> LATIN SMALL LETTER V + 'w' # 0x0077 -> LATIN SMALL LETTER W + 'x' # 0x0078 -> LATIN SMALL LETTER X + 'y' # 0x0079 -> LATIN SMALL LETTER Y + 'z' # 0x007a -> LATIN SMALL LETTER Z + '{' # 0x007b -> LEFT CURLY BRACKET + '|' # 0x007c -> VERTICAL LINE + '}' # 0x007d -> RIGHT CURLY BRACKET + '~' # 0x007e -> TILDE + '\x7f' # 0x007f -> DELETE + '\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA + '\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS + '\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE + '\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX + '\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS + '\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE + '\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE + '\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA + '\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE + '\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS + '\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + '\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE + '\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX + '\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE + '\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS + '\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE + '\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE + '\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE + '\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE + '\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX + '\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS + '\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON + '\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON + '\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE + '\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE + '\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS + '\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS + '\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON + '\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON + '\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE + '\xd7' # 0x009e -> MULTIPLICATION SIGN + '\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON + '\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE + '\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE + '\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE + '\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE + '\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK + '\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK + '\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON + '\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON + '\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK + '\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK + '\xac' # 0x00aa -> NOT SIGN + '\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE + '\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON + '\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA + '\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\u2591' # 0x00b0 -> LIGHT SHADE + '\u2592' # 0x00b1 -> MEDIUM SHADE + '\u2593' # 0x00b2 -> DARK SHADE + '\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL + '\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT + '\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE + '\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX + '\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON + '\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA + '\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT + '\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL + '\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT + '\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT + '\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE + '\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE + '\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT + '\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT + '\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL + '\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + '\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT + '\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL + '\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + '\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE + '\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE + '\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT + '\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT + '\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL + '\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + '\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + '\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL + '\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + '\xa4' # 0x00cf -> CURRENCY SIGN + '\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE + '\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE + '\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON + '\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS + '\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON + '\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON + '\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE + '\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX + '\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON + '\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT + '\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT + '\u2588' # 0x00db -> FULL BLOCK + '\u2584' # 0x00dc -> LOWER HALF BLOCK + '\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA + '\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE + '\u2580' # 0x00df -> UPPER HALF BLOCK + '\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE + '\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S + '\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX + '\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE + '\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE + '\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON + '\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON + '\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON + '\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE + '\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE + '\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE + '\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + '\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE + '\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE + '\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA + '\xb4' # 0x00ef -> ACUTE ACCENT + '\xad' # 0x00f0 -> SOFT HYPHEN + '\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT + '\u02db' # 0x00f2 -> OGONEK + '\u02c7' # 0x00f3 -> CARON + '\u02d8' # 0x00f4 -> BREVE + '\xa7' # 0x00f5 -> SECTION SIGN + '\xf7' # 0x00f6 -> DIVISION SIGN + '\xb8' # 0x00f7 -> CEDILLA + '\xb0' # 0x00f8 -> DEGREE SIGN + '\xa8' # 0x00f9 -> DIAERESIS + '\u02d9' # 0x00fa -> DOT ABOVE + '\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE + '\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON + '\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON + '\u25a0' # 0x00fe -> BLACK SQUARE + '\xa0' # 0x00ff -> NO-BREAK SPACE +) + +### Encoding Map + +encoding_map = { + 0x0000: 0x0000, # NULL + 0x0001: 0x0001, # START OF HEADING + 0x0002: 0x0002, # START OF TEXT + 0x0003: 0x0003, # END OF TEXT + 0x0004: 0x0004, # END OF TRANSMISSION + 0x0005: 0x0005, # ENQUIRY + 0x0006: 0x0006, # ACKNOWLEDGE + 0x0007: 0x0007, # BELL + 0x0008: 0x0008, # BACKSPACE + 0x0009: 0x0009, # HORIZONTAL TABULATION + 0x000a: 0x000a, # LINE FEED + 0x000b: 0x000b, # VERTICAL TABULATION + 0x000c: 0x000c, # FORM FEED + 0x000d: 0x000d, # CARRIAGE RETURN + 0x000e: 0x000e, # SHIFT OUT + 0x000f: 0x000f, # SHIFT IN + 0x0010: 0x0010, # DATA LINK ESCAPE + 0x0011: 0x0011, # DEVICE CONTROL ONE + 0x0012: 0x0012, # DEVICE CONTROL TWO + 0x0013: 0x0013, # DEVICE CONTROL THREE + 0x0014: 0x0014, # DEVICE CONTROL FOUR + 0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE + 0x0016: 0x0016, # SYNCHRONOUS IDLE + 0x0017: 0x0017, # END OF TRANSMISSION BLOCK + 0x0018: 0x0018, # CANCEL + 0x0019: 0x0019, # END OF MEDIUM + 0x001a: 0x001a, # SUBSTITUTE + 0x001b: 0x001b, # ESCAPE + 0x001c: 0x001c, # FILE SEPARATOR + 0x001d: 0x001d, # GROUP SEPARATOR + 0x001e: 0x001e, # RECORD SEPARATOR + 0x001f: 0x001f, # UNIT SEPARATOR + 0x0020: 0x0020, # SPACE + 0x0021: 0x0021, # EXCLAMATION MARK + 0x0022: 0x0022, # QUOTATION MARK + 0x0023: 0x0023, # NUMBER SIGN + 0x0024: 0x0024, # DOLLAR SIGN + 0x0025: 0x0025, # PERCENT SIGN + 0x0026: 0x0026, # AMPERSAND + 0x0027: 0x0027, # APOSTROPHE + 0x0028: 0x0028, # LEFT PARENTHESIS + 0x0029: 0x0029, # RIGHT PARENTHESIS + 0x002a: 0x002a, # ASTERISK + 0x002b: 0x002b, # PLUS SIGN + 0x002c: 0x002c, # COMMA + 0x002d: 0x002d, # HYPHEN-MINUS + 0x002e: 0x002e, # FULL STOP + 0x002f: 0x002f, # SOLIDUS + 0x0030: 0x0030, # DIGIT ZERO + 0x0031: 0x0031, # DIGIT ONE + 0x0032: 0x0032, # DIGIT TWO + 0x0033: 0x0033, # DIGIT THREE + 0x0034: 0x0034, # DIGIT FOUR + 0x0035: 0x0035, # DIGIT FIVE + 0x0036: 0x0036, # DIGIT SIX + 0x0037: 0x0037, # DIGIT SEVEN + 0x0038: 0x0038, # DIGIT EIGHT + 0x0039: 0x0039, # DIGIT NINE + 0x003a: 0x003a, # COLON + 0x003b: 0x003b, # SEMICOLON + 0x003c: 0x003c, # LESS-THAN SIGN + 0x003d: 0x003d, # EQUALS SIGN + 0x003e: 0x003e, # GREATER-THAN SIGN + 0x003f: 0x003f, # QUESTION MARK + 0x0040: 0x0040, # COMMERCIAL AT + 0x0041: 0x0041, # LATIN CAPITAL LETTER A + 0x0042: 0x0042, # LATIN CAPITAL LETTER B + 0x0043: 0x0043, # LATIN CAPITAL LETTER C + 0x0044: 0x0044, # LATIN CAPITAL LETTER D + 0x0045: 0x0045, # LATIN CAPITAL LETTER E + 0x0046: 0x0046, # LATIN CAPITAL LETTER F + 0x0047: 0x0047, # LATIN CAPITAL LETTER G + 0x0048: 0x0048, # LATIN CAPITAL LETTER H + 0x0049: 0x0049, # LATIN CAPITAL LETTER I + 0x004a: 0x004a, # LATIN CAPITAL LETTER J + 0x004b: 0x004b, # LATIN CAPITAL LETTER K + 0x004c: 0x004c, # LATIN CAPITAL LETTER L + 0x004d: 0x004d, # LATIN CAPITAL LETTER M + 0x004e: 0x004e, # LATIN CAPITAL LETTER N + 0x004f: 0x004f, # LATIN CAPITAL LETTER O + 0x0050: 0x0050, # LATIN CAPITAL LETTER P + 0x0051: 0x0051, # LATIN CAPITAL LETTER Q + 0x0052: 0x0052, # LATIN CAPITAL LETTER R + 0x0053: 0x0053, # LATIN CAPITAL LETTER S + 0x0054: 0x0054, # LATIN CAPITAL LETTER T + 0x0055: 0x0055, # LATIN CAPITAL LETTER U + 0x0056: 0x0056, # LATIN CAPITAL LETTER V + 0x0057: 0x0057, # LATIN CAPITAL LETTER W + 0x0058: 0x0058, # LATIN CAPITAL LETTER X + 0x0059: 0x0059, # LATIN CAPITAL LETTER Y + 0x005a: 0x005a, # LATIN CAPITAL LETTER Z + 0x005b: 0x005b, # LEFT SQUARE BRACKET + 0x005c: 0x005c, # REVERSE SOLIDUS + 0x005d: 0x005d, # RIGHT SQUARE BRACKET + 0x005e: 0x005e, # CIRCUMFLEX ACCENT + 0x005f: 0x005f, # LOW LINE + 0x0060: 0x0060, # GRAVE ACCENT + 0x0061: 0x0061, # LATIN SMALL LETTER A + 0x0062: 0x0062, # LATIN SMALL LETTER B + 0x0063: 0x0063, # LATIN SMALL LETTER C + 0x0064: 0x0064, # LATIN SMALL LETTER D + 0x0065: 0x0065, # LATIN SMALL LETTER E + 0x0066: 0x0066, # LATIN SMALL LETTER F + 0x0067: 0x0067, # LATIN SMALL LETTER G + 0x0068: 0x0068, # LATIN SMALL LETTER H + 0x0069: 0x0069, # LATIN SMALL LETTER I + 0x006a: 0x006a, # LATIN SMALL LETTER J + 0x006b: 0x006b, # LATIN SMALL LETTER K + 0x006c: 0x006c, # LATIN SMALL LETTER L + 0x006d: 0x006d, # LATIN SMALL LETTER M + 0x006e: 0x006e, # LATIN SMALL LETTER N + 0x006f: 0x006f, # LATIN SMALL LETTER O + 0x0070: 0x0070, # LATIN SMALL LETTER P + 0x0071: 0x0071, # LATIN SMALL LETTER Q + 0x0072: 0x0072, # LATIN SMALL LETTER R + 0x0073: 0x0073, # LATIN SMALL LETTER S + 0x0074: 0x0074, # LATIN SMALL LETTER T + 0x0075: 0x0075, # LATIN SMALL LETTER U + 0x0076: 0x0076, # LATIN SMALL LETTER V + 0x0077: 0x0077, # LATIN SMALL LETTER W + 0x0078: 0x0078, # LATIN SMALL LETTER X + 0x0079: 0x0079, # LATIN SMALL LETTER Y + 0x007a: 0x007a, # LATIN SMALL LETTER Z + 0x007b: 0x007b, # LEFT CURLY BRACKET + 0x007c: 0x007c, # VERTICAL LINE + 0x007d: 0x007d, # RIGHT CURLY BRACKET + 0x007e: 0x007e, # TILDE + 0x007f: 0x007f, # DELETE + 0x00a0: 0x00ff, # NO-BREAK SPACE + 0x00a4: 0x00cf, # CURRENCY SIGN + 0x00a7: 0x00f5, # SECTION SIGN + 0x00a8: 0x00f9, # DIAERESIS + 0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00ac: 0x00aa, # NOT SIGN + 0x00ad: 0x00f0, # SOFT HYPHEN + 0x00b0: 0x00f8, # DEGREE SIGN + 0x00b4: 0x00ef, # ACUTE ACCENT + 0x00b8: 0x00f7, # CEDILLA + 0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + 0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE + 0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX + 0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS + 0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA + 0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE + 0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS + 0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE + 0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX + 0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE + 0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX + 0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS + 0x00d7: 0x009e, # MULTIPLICATION SIGN + 0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE + 0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS + 0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE + 0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S + 0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE + 0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX + 0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS + 0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA + 0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE + 0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS + 0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE + 0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX + 0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE + 0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX + 0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS + 0x00f7: 0x00f6, # DIVISION SIGN + 0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE + 0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS + 0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE + 0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE + 0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE + 0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK + 0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK + 0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE + 0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE + 0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON + 0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON + 0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON + 0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON + 0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE + 0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE + 0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK + 0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK + 0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON + 0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON + 0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE + 0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE + 0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON + 0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON + 0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE + 0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE + 0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE + 0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE + 0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON + 0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON + 0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE + 0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE + 0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE + 0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE + 0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON + 0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON + 0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE + 0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE + 0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA + 0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA + 0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON + 0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON + 0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA + 0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA + 0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON + 0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON + 0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE + 0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE + 0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE + 0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE + 0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE + 0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE + 0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE + 0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE + 0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON + 0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON + 0x02c7: 0x00f3, # CARON + 0x02d8: 0x00f4, # BREVE + 0x02d9: 0x00fa, # DOT ABOVE + 0x02db: 0x00f2, # OGONEK + 0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT + 0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL + 0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL + 0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT + 0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT + 0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT + 0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT + 0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT + 0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT + 0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL + 0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL + 0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL + 0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL + 0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL + 0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT + 0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT + 0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT + 0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT + 0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT + 0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT + 0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL + 0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL + 0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL + 0x2580: 0x00df, # UPPER HALF BLOCK + 0x2584: 0x00dc, # LOWER HALF BLOCK + 0x2588: 0x00db, # FULL BLOCK + 0x2591: 0x00b0, # LIGHT SHADE + 0x2592: 0x00b1, # MEDIUM SHADE + 0x2593: 0x00b2, # DARK SHADE + 0x25a0: 0x00fe, # BLACK SQUARE +} diff --git a/janus/lib/python3.10/encodings/cp875.py b/janus/lib/python3.10/encodings/cp875.py new file mode 100644 index 0000000000000000000000000000000000000000..c25a5a43bc49e1ce821793979b87d39fbbd05d18 --- /dev/null +++ b/janus/lib/python3.10/encodings/cp875.py @@ -0,0 +1,307 @@ +""" Python Character Mapping Codec cp875 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP875.TXT' with gencodec.py. + +"""#" + +import codecs + +### Codec APIs + +class Codec(codecs.Codec): + + def encode(self,input,errors='strict'): + return codecs.charmap_encode(input,errors,encoding_table) + + def decode(self,input,errors='strict'): + return codecs.charmap_decode(input,errors,decoding_table) + +class IncrementalEncoder(codecs.IncrementalEncoder): + def encode(self, input, final=False): + return codecs.charmap_encode(input,self.errors,encoding_table)[0] + +class IncrementalDecoder(codecs.IncrementalDecoder): + def decode(self, input, final=False): + return codecs.charmap_decode(input,self.errors,decoding_table)[0] + +class StreamWriter(Codec,codecs.StreamWriter): + pass + +class StreamReader(Codec,codecs.StreamReader): + pass + +### encodings module API + +def getregentry(): + return codecs.CodecInfo( + name='cp875', + encode=Codec().encode, + decode=Codec().decode, + incrementalencoder=IncrementalEncoder, + incrementaldecoder=IncrementalDecoder, + streamreader=StreamReader, + streamwriter=StreamWriter, + ) + + +### Decoding Table + +decoding_table = ( + '\x00' # 0x00 -> NULL + '\x01' # 0x01 -> START OF HEADING + '\x02' # 0x02 -> START OF TEXT + '\x03' # 0x03 -> END OF TEXT + '\x9c' # 0x04 -> CONTROL + '\t' # 0x05 -> HORIZONTAL TABULATION + '\x86' # 0x06 -> CONTROL + '\x7f' # 0x07 -> DELETE + '\x97' # 0x08 -> CONTROL + '\x8d' # 0x09 -> CONTROL + '\x8e' # 0x0A -> CONTROL + '\x0b' # 0x0B -> VERTICAL TABULATION + '\x0c' # 0x0C -> FORM FEED + '\r' # 0x0D -> CARRIAGE RETURN + '\x0e' # 0x0E -> SHIFT OUT + '\x0f' # 0x0F -> SHIFT IN + '\x10' # 0x10 -> DATA LINK ESCAPE + '\x11' # 0x11 -> DEVICE CONTROL ONE + '\x12' # 0x12 -> DEVICE CONTROL TWO + '\x13' # 0x13 -> DEVICE CONTROL THREE + '\x9d' # 0x14 -> CONTROL + '\x85' # 0x15 -> CONTROL + '\x08' # 0x16 -> BACKSPACE + '\x87' # 0x17 -> CONTROL + '\x18' # 0x18 -> CANCEL + '\x19' # 0x19 -> END OF MEDIUM + '\x92' # 0x1A -> CONTROL + '\x8f' # 0x1B -> CONTROL + '\x1c' # 0x1C -> FILE SEPARATOR + '\x1d' # 0x1D -> GROUP SEPARATOR + '\x1e' # 0x1E -> RECORD SEPARATOR + '\x1f' # 0x1F -> UNIT SEPARATOR + '\x80' # 0x20 -> CONTROL + '\x81' # 0x21 -> CONTROL + '\x82' # 0x22 -> CONTROL + '\x83' # 0x23 -> CONTROL + '\x84' # 0x24 -> CONTROL + '\n' # 0x25 -> LINE FEED + '\x17' # 0x26 -> END OF TRANSMISSION BLOCK + '\x1b' # 0x27 -> ESCAPE + '\x88' # 0x28 -> CONTROL + '\x89' # 0x29 -> CONTROL + '\x8a' # 0x2A -> CONTROL + '\x8b' # 0x2B -> CONTROL + '\x8c' # 0x2C -> CONTROL + '\x05' # 0x2D -> ENQUIRY + '\x06' # 0x2E -> ACKNOWLEDGE + '\x07' # 0x2F -> BELL + '\x90' # 0x30 -> CONTROL + '\x91' # 0x31 -> CONTROL + '\x16' # 0x32 -> SYNCHRONOUS IDLE + '\x93' # 0x33 -> CONTROL + '\x94' # 0x34 -> CONTROL + '\x95' # 0x35 -> CONTROL + '\x96' # 0x36 -> CONTROL + '\x04' # 0x37 -> END OF TRANSMISSION + '\x98' # 0x38 -> CONTROL + '\x99' # 0x39 -> CONTROL + '\x9a' # 0x3A -> CONTROL + '\x9b' # 0x3B -> CONTROL + '\x14' # 0x3C -> DEVICE CONTROL FOUR + '\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE + '\x9e' # 0x3E -> CONTROL + '\x1a' # 0x3F -> SUBSTITUTE + ' ' # 0x40 -> SPACE + '\u0391' # 0x41 -> GREEK CAPITAL LETTER ALPHA + '\u0392' # 0x42 -> GREEK CAPITAL LETTER BETA + '\u0393' # 0x43 -> GREEK CAPITAL LETTER GAMMA + '\u0394' # 0x44 -> GREEK CAPITAL LETTER DELTA + '\u0395' # 0x45 -> GREEK CAPITAL LETTER EPSILON + '\u0396' # 0x46 -> GREEK CAPITAL LETTER ZETA + '\u0397' # 0x47 -> GREEK CAPITAL LETTER ETA + '\u0398' # 0x48 -> GREEK CAPITAL LETTER THETA + '\u0399' # 0x49 -> GREEK CAPITAL LETTER IOTA + '[' # 0x4A -> LEFT SQUARE BRACKET + '.' # 0x4B -> FULL STOP + '<' # 0x4C -> LESS-THAN SIGN + '(' # 0x4D -> LEFT PARENTHESIS + '+' # 0x4E -> PLUS SIGN + '!' # 0x4F -> EXCLAMATION MARK + '&' # 0x50 -> AMPERSAND + '\u039a' # 0x51 -> GREEK CAPITAL LETTER KAPPA + '\u039b' # 0x52 -> GREEK CAPITAL LETTER LAMDA + '\u039c' # 0x53 -> GREEK CAPITAL LETTER MU + '\u039d' # 0x54 -> GREEK CAPITAL LETTER NU + '\u039e' # 0x55 -> GREEK CAPITAL LETTER XI + '\u039f' # 0x56 -> GREEK CAPITAL LETTER OMICRON + '\u03a0' # 0x57 -> GREEK CAPITAL LETTER PI + '\u03a1' # 0x58 -> GREEK CAPITAL LETTER RHO + '\u03a3' # 0x59 -> GREEK CAPITAL LETTER SIGMA + ']' # 0x5A -> RIGHT SQUARE BRACKET + '$' # 0x5B -> DOLLAR SIGN + '*' # 0x5C -> ASTERISK + ')' # 0x5D -> RIGHT PARENTHESIS + ';' # 0x5E -> SEMICOLON + '^' # 0x5F -> CIRCUMFLEX ACCENT + '-' # 0x60 -> HYPHEN-MINUS + '/' # 0x61 -> SOLIDUS + '\u03a4' # 0x62 -> GREEK CAPITAL LETTER TAU + '\u03a5' # 0x63 -> GREEK CAPITAL LETTER UPSILON + '\u03a6' # 0x64 -> GREEK CAPITAL LETTER PHI + '\u03a7' # 0x65 -> GREEK CAPITAL LETTER CHI + '\u03a8' # 0x66 -> GREEK CAPITAL LETTER PSI + '\u03a9' # 0x67 -> GREEK CAPITAL LETTER OMEGA + '\u03aa' # 0x68 -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA + '\u03ab' # 0x69 -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA + '|' # 0x6A -> VERTICAL LINE + ',' # 0x6B -> COMMA + '%' # 0x6C -> PERCENT SIGN + '_' # 0x6D -> LOW LINE + '>' # 0x6E -> GREATER-THAN SIGN + '?' # 0x6F -> QUESTION MARK + '\xa8' # 0x70 -> DIAERESIS + '\u0386' # 0x71 -> GREEK CAPITAL LETTER ALPHA WITH TONOS + '\u0388' # 0x72 -> GREEK CAPITAL LETTER EPSILON WITH TONOS + '\u0389' # 0x73 -> GREEK CAPITAL LETTER ETA WITH TONOS + '\xa0' # 0x74 -> NO-BREAK SPACE + '\u038a' # 0x75 -> GREEK CAPITAL LETTER IOTA WITH TONOS + '\u038c' # 0x76 -> GREEK CAPITAL LETTER OMICRON WITH TONOS + '\u038e' # 0x77 -> GREEK CAPITAL LETTER UPSILON WITH TONOS + '\u038f' # 0x78 -> GREEK CAPITAL LETTER OMEGA WITH TONOS + '`' # 0x79 -> GRAVE ACCENT + ':' # 0x7A -> COLON + '#' # 0x7B -> NUMBER SIGN + '@' # 0x7C -> COMMERCIAL AT + "'" # 0x7D -> APOSTROPHE + '=' # 0x7E -> EQUALS SIGN + '"' # 0x7F -> QUOTATION MARK + '\u0385' # 0x80 -> GREEK DIALYTIKA TONOS + 'a' # 0x81 -> LATIN SMALL LETTER A + 'b' # 0x82 -> LATIN SMALL LETTER B + 'c' # 0x83 -> LATIN SMALL LETTER C + 'd' # 0x84 -> LATIN SMALL LETTER D + 'e' # 0x85 -> LATIN SMALL LETTER E + 'f' # 0x86 -> LATIN SMALL LETTER F + 'g' # 0x87 -> LATIN SMALL LETTER G + 'h' # 0x88 -> LATIN SMALL LETTER H + 'i' # 0x89 -> LATIN SMALL LETTER I + '\u03b1' # 0x8A -> GREEK SMALL LETTER ALPHA + '\u03b2' # 0x8B -> GREEK SMALL LETTER BETA + '\u03b3' # 0x8C -> GREEK SMALL LETTER GAMMA + '\u03b4' # 0x8D -> GREEK SMALL LETTER DELTA + '\u03b5' # 0x8E -> GREEK SMALL LETTER EPSILON + '\u03b6' # 0x8F -> GREEK SMALL LETTER ZETA + '\xb0' # 0x90 -> DEGREE SIGN + 'j' # 0x91 -> LATIN SMALL LETTER J + 'k' # 0x92 -> LATIN SMALL LETTER K + 'l' # 0x93 -> LATIN SMALL LETTER L + 'm' # 0x94 -> LATIN SMALL LETTER M + 'n' # 0x95 -> LATIN SMALL LETTER N + 'o' # 0x96 -> LATIN SMALL LETTER O + 'p' # 0x97 -> LATIN SMALL LETTER P + 'q' # 0x98 -> LATIN SMALL LETTER Q + 'r' # 0x99 -> LATIN SMALL LETTER R + '\u03b7' # 0x9A -> GREEK SMALL LETTER ETA + '\u03b8' # 0x9B -> GREEK SMALL LETTER THETA + '\u03b9' # 0x9C -> GREEK SMALL LETTER IOTA + '\u03ba' # 0x9D -> GREEK SMALL LETTER KAPPA + '\u03bb' # 0x9E -> GREEK SMALL LETTER LAMDA + '\u03bc' # 0x9F -> GREEK SMALL LETTER MU + '\xb4' # 0xA0 -> ACUTE ACCENT + '~' # 0xA1 -> TILDE + 's' # 0xA2 -> LATIN SMALL LETTER S + 't' # 0xA3 -> LATIN SMALL LETTER T + 'u' # 0xA4 -> LATIN SMALL LETTER U + 'v' # 0xA5 -> LATIN SMALL LETTER V + 'w' # 0xA6 -> LATIN SMALL LETTER W + 'x' # 0xA7 -> LATIN SMALL LETTER X + 'y' # 0xA8 -> LATIN SMALL LETTER Y + 'z' # 0xA9 -> LATIN SMALL LETTER Z + '\u03bd' # 0xAA -> GREEK SMALL LETTER NU + '\u03be' # 0xAB -> GREEK SMALL LETTER XI + '\u03bf' # 0xAC -> GREEK SMALL LETTER OMICRON + '\u03c0' # 0xAD -> GREEK SMALL LETTER PI + '\u03c1' # 0xAE -> GREEK SMALL LETTER RHO + '\u03c3' # 0xAF -> GREEK SMALL LETTER SIGMA + '\xa3' # 0xB0 -> POUND SIGN + '\u03ac' # 0xB1 -> GREEK SMALL LETTER ALPHA WITH TONOS + '\u03ad' # 0xB2 -> GREEK SMALL LETTER EPSILON WITH TONOS + '\u03ae' # 0xB3 -> GREEK SMALL LETTER ETA WITH TONOS + '\u03ca' # 0xB4 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA + '\u03af' # 0xB5 -> GREEK SMALL LETTER IOTA WITH TONOS + '\u03cc' # 0xB6 -> GREEK SMALL LETTER OMICRON WITH TONOS + '\u03cd' # 0xB7 -> GREEK SMALL LETTER UPSILON WITH TONOS + '\u03cb' # 0xB8 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA + '\u03ce' # 0xB9 -> GREEK SMALL LETTER OMEGA WITH TONOS + '\u03c2' # 0xBA -> GREEK SMALL LETTER FINAL SIGMA + '\u03c4' # 0xBB -> GREEK SMALL LETTER TAU + '\u03c5' # 0xBC -> GREEK SMALL LETTER UPSILON + '\u03c6' # 0xBD -> GREEK SMALL LETTER PHI + '\u03c7' # 0xBE -> GREEK SMALL LETTER CHI + '\u03c8' # 0xBF -> GREEK SMALL LETTER PSI + '{' # 0xC0 -> LEFT CURLY BRACKET + 'A' # 0xC1 -> LATIN CAPITAL LETTER A + 'B' # 0xC2 -> LATIN CAPITAL LETTER B + 'C' # 0xC3 -> LATIN CAPITAL LETTER C + 'D' # 0xC4 -> LATIN CAPITAL LETTER D + 'E' # 0xC5 -> LATIN CAPITAL LETTER E + 'F' # 0xC6 -> LATIN CAPITAL LETTER F + 'G' # 0xC7 -> LATIN CAPITAL LETTER G + 'H' # 0xC8 -> LATIN CAPITAL LETTER H + 'I' # 0xC9 -> LATIN CAPITAL LETTER I + '\xad' # 0xCA -> SOFT HYPHEN + '\u03c9' # 0xCB -> GREEK SMALL LETTER OMEGA + '\u0390' # 0xCC -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS + '\u03b0' # 0xCD -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS + '\u2018' # 0xCE -> LEFT SINGLE QUOTATION MARK + '\u2015' # 0xCF -> HORIZONTAL BAR + '}' # 0xD0 -> RIGHT CURLY BRACKET + 'J' # 0xD1 -> LATIN CAPITAL LETTER J + 'K' # 0xD2 -> LATIN CAPITAL LETTER K + 'L' # 0xD3 -> LATIN CAPITAL LETTER L + 'M' # 0xD4 -> LATIN CAPITAL LETTER M + 'N' # 0xD5 -> LATIN CAPITAL LETTER N + 'O' # 0xD6 -> LATIN CAPITAL LETTER O + 'P' # 0xD7 -> LATIN CAPITAL LETTER P + 'Q' # 0xD8 -> LATIN CAPITAL LETTER Q + 'R' # 0xD9 -> LATIN CAPITAL LETTER R + '\xb1' # 0xDA -> PLUS-MINUS SIGN + '\xbd' # 0xDB -> VULGAR FRACTION ONE HALF + '\x1a' # 0xDC -> SUBSTITUTE + '\u0387' # 0xDD -> GREEK ANO TELEIA + '\u2019' # 0xDE -> RIGHT SINGLE QUOTATION MARK + '\xa6' # 0xDF -> BROKEN BAR + '\\' # 0xE0 -> REVERSE SOLIDUS + '\x1a' # 0xE1 -> SUBSTITUTE + 'S' # 0xE2 -> LATIN CAPITAL LETTER S + 'T' # 0xE3 -> LATIN CAPITAL LETTER T + 'U' # 0xE4 -> LATIN CAPITAL LETTER U + 'V' # 0xE5 -> LATIN CAPITAL LETTER V + 'W' # 0xE6 -> LATIN CAPITAL LETTER W + 'X' # 0xE7 -> LATIN CAPITAL LETTER X + 'Y' # 0xE8 -> LATIN CAPITAL LETTER Y + 'Z' # 0xE9 -> LATIN CAPITAL LETTER Z + '\xb2' # 0xEA -> SUPERSCRIPT TWO + '\xa7' # 0xEB -> SECTION SIGN + '\x1a' # 0xEC -> SUBSTITUTE + '\x1a' # 0xED -> SUBSTITUTE + '\xab' # 0xEE -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK + '\xac' # 0xEF -> NOT SIGN + '0' # 0xF0 -> DIGIT ZERO + '1' # 0xF1 -> DIGIT ONE + '2' # 0xF2 -> DIGIT TWO + '3' # 0xF3 -> DIGIT THREE + '4' # 0xF4 -> DIGIT FOUR + '5' # 0xF5 -> DIGIT FIVE + '6' # 0xF6 -> DIGIT SIX + '7' # 0xF7 -> DIGIT SEVEN + '8' # 0xF8 -> DIGIT EIGHT + '9' # 0xF9 -> DIGIT NINE + '\xb3' # 0xFA -> SUPERSCRIPT THREE + '\xa9' # 0xFB -> COPYRIGHT SIGN + '\x1a' # 0xFC -> SUBSTITUTE + '\x1a' # 0xFD -> SUBSTITUTE + '\xbb' # 0xFE -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK + '\x9f' # 0xFF -> CONTROL +) + +### Encoding table +encoding_table=codecs.charmap_build(decoding_table)