diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d74ab9115151438df8ab756cb405080868aa4172 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21a4cc420baf99d2608caa8c62af70de05fc7c15 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cdfc15a577a4b57c2e1d9b55d11aa1b3e062f08 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/bigquery_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/databricks_uc_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/databricks_uc_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c25a1422d4856c53c8746941d199436291944082 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/databricks_uc_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/delta_sharing_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/delta_sharing_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c945ebb9090100e5af0ca08e7808c3f67af58bf9 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/delta_sharing_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/hudi_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/hudi_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9764e605d339efe1c3877cd7ad43ef890f12e3d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/hudi_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/huggingface_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/huggingface_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f357a2a3372f56b1174df35ad2cc896ceb159fb Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/huggingface_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/iceberg_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/iceberg_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f545580796cd4785cad0a46460eabd0f33af77d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/iceberg_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0054a4c3e270078f56383e5409887e7b2d06dcf Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4fe71e8fa53305055a704df126da708ca15da76 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/image_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3066a9e22bcaab115264d5478690ea1ac166327 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3d57c545b97fd9827b016255aa89dc10d3eed9a Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/json_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/lance_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/lance_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..646b3b25a91906dbaa4439e67dc404af6e44aa7c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/lance_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee9a7031e536139c08ffe5ed3bbb73fc701fbcfb Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67368718311a8e9055a02df4570439d0592d21a7 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/mongo_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/numpy_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/numpy_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbd13a7ccaf73d303d40bc83c9714e60447fe284 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/numpy_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/parquet_bulk_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/parquet_bulk_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9318f18e67e417603e8c9fdfd48a0f7f421379aa Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/parquet_bulk_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/sql_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/sql_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e66c746f1b3f49737738f27821da8280f60affb Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/sql_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/text_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/text_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4f8b833c5ffe58fb9bda2f85cb97cffa96327db Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/text_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a3c0a51d2d04179552897240cf733f36ad6c727 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3cc487e5e1daf0bb73607bdce51fad7301dfed2 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/tfrecords_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/torch_datasource.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/torch_datasource.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8661a947f8151410f09b8d708c7dee58d9f5e724 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/torch_datasource.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/webdataset_datasink.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/webdataset_datasink.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b72bff87ecbdd15540b8740811ca9c82d78ed5a6 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/__pycache__/webdataset_datasink.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/bigquery_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/bigquery_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..082199ef618af215cf539704e87f67c9930d722a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/bigquery_datasource.py @@ -0,0 +1,118 @@ +import logging +from typing import List, Optional + +from ray.data._internal.util import _check_import +from ray.data.block import Block, BlockMetadata +from ray.data.datasource.datasource import Datasource, ReadTask + +logger = logging.getLogger(__name__) + + +class BigQueryDatasource(Datasource): + def __init__( + self, + project_id: str, + dataset: Optional[str] = None, + query: Optional[str] = None, + ): + _check_import(self, module="google.cloud", package="bigquery") + _check_import(self, module="google.cloud", package="bigquery_storage") + _check_import(self, module="google.api_core", package="exceptions") + + self._project_id = project_id + self._dataset = dataset + self._query = query + + if query is not None and dataset is not None: + raise ValueError( + "Query and dataset kwargs cannot both be provided " + + "(must be mutually exclusive)." + ) + + def get_read_tasks(self, parallelism: int) -> List[ReadTask]: + from google.cloud import bigquery, bigquery_storage + + def _read_single_partition(stream) -> Block: + client = bigquery_storage.BigQueryReadClient() + reader = client.read_rows(stream.name) + return reader.to_arrow() + + if self._query: + query_client = bigquery.Client(project=self._project_id) + query_job = query_client.query(self._query) + query_job.result() + destination = str(query_job.destination) + dataset_id = destination.split(".")[-2] + table_id = destination.split(".")[-1] + else: + self._validate_dataset_table_exist(self._project_id, self._dataset) + dataset_id = self._dataset.split(".")[0] + table_id = self._dataset.split(".")[1] + + bqs_client = bigquery_storage.BigQueryReadClient() + table = f"projects/{self._project_id}/datasets/{dataset_id}/tables/{table_id}" + + if parallelism == -1: + parallelism = None + requested_session = bigquery_storage.types.ReadSession( + table=table, + data_format=bigquery_storage.types.DataFormat.ARROW, + ) + read_session = bqs_client.create_read_session( + parent=f"projects/{self._project_id}", + read_session=requested_session, + max_stream_count=parallelism, + ) + + read_tasks = [] + logger.info("Created streams: " + str(len(read_session.streams))) + if len(read_session.streams) < parallelism: + logger.info( + "The number of streams created by the " + + "BigQuery Storage Read API is less than the requested " + + "parallelism due to the size of the dataset." + ) + + for stream in read_session.streams: + # Create a metadata block object to store schema, etc. + metadata = BlockMetadata( + num_rows=None, + size_bytes=None, + schema=None, + input_files=None, + exec_stats=None, + ) + + # Create the read task and pass the no-arg wrapper and metadata in + read_task = ReadTask( + lambda stream=stream: [_read_single_partition(stream)], + metadata, + ) + read_tasks.append(read_task) + + return read_tasks + + def estimate_inmemory_data_size(self) -> Optional[int]: + return None + + def _validate_dataset_table_exist(self, project_id: str, dataset: str) -> None: + from google.api_core import exceptions + from google.cloud import bigquery + + client = bigquery.Client(project=project_id) + dataset_id = dataset.split(".")[0] + try: + client.get_dataset(dataset_id) + except exceptions.NotFound: + raise ValueError( + "Dataset {} is not found. Please ensure that it exists.".format( + dataset_id + ) + ) + + try: + client.get_table(dataset) + except exceptions.NotFound: + raise ValueError( + "Table {} is not found. Please ensure that it exists.".format(dataset) + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/image_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/image_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..bcbf481f863b9020474bc4216b8ba70c771f29f0 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/image_datasource.py @@ -0,0 +1,175 @@ +import io +import logging +import time +from typing import TYPE_CHECKING, Iterator, List, Optional, Tuple, Union + +import numpy as np + +from ray.data._internal.delegating_block_builder import DelegatingBlockBuilder +from ray.data._internal.util import _check_import +from ray.data.block import Block, BlockMetadata +from ray.data.datasource.file_based_datasource import FileBasedDatasource +from ray.data.datasource.file_meta_provider import DefaultFileMetadataProvider + +if TYPE_CHECKING: + import pyarrow + + +logger = logging.getLogger(__name__) + +# The default size multiplier for reading image data source. +# This essentially is using image on-disk file size to estimate +# in-memory data size. +IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT = 1 + +# The lower bound value to estimate image encoding ratio. +IMAGE_ENCODING_RATIO_ESTIMATE_LOWER_BOUND = 0.5 + + +class ImageDatasource(FileBasedDatasource): + """A datasource that lets you read images.""" + + _WRITE_FILE_PER_ROW = True + _FILE_EXTENSIONS = ["png", "jpg", "jpeg", "tif", "tiff", "bmp", "gif"] + # Use 8 threads per task to read image files. + _NUM_THREADS_PER_TASK = 8 + + def __init__( + self, + paths: Union[str, List[str]], + size: Optional[Tuple[int, int]] = None, + mode: Optional[str] = None, + **file_based_datasource_kwargs, + ): + super().__init__(paths, **file_based_datasource_kwargs) + + _check_import(self, module="PIL", package="Pillow") + + if size is not None and len(size) != 2: + raise ValueError( + "Expected `size` to contain two integers for height and width, " + f"but got {len(size)} integers instead." + ) + + if size is not None and (size[0] < 0 or size[1] < 0): + raise ValueError( + f"Expected `size` to contain positive integers, but got {size} instead." + ) + + self.size = size + self.mode = mode + + meta_provider = file_based_datasource_kwargs.get("meta_provider", None) + if isinstance(meta_provider, ImageFileMetadataProvider): + self._encoding_ratio = self._estimate_files_encoding_ratio() + meta_provider._set_encoding_ratio(self._encoding_ratio) + else: + self._encoding_ratio = IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT + + def _read_stream( + self, + f: "pyarrow.NativeFile", + path: str, + ) -> Iterator[Block]: + from PIL import Image, UnidentifiedImageError + + data = f.readall() + + try: + image = Image.open(io.BytesIO(data)) + except UnidentifiedImageError as e: + raise ValueError(f"PIL couldn't load image file at path '{path}'.") from e + + if self.size is not None: + height, width = self.size + image = image.resize((width, height), resample=Image.BILINEAR) + if self.mode is not None: + image = image.convert(self.mode) + + builder = DelegatingBlockBuilder() + array = np.array(image) + item = {"image": array} + builder.add(item) + block = builder.build() + + yield block + + def _rows_per_file(self): + return 1 + + def estimate_inmemory_data_size(self) -> Optional[int]: + total_size = 0 + for file_size in self._file_sizes(): + # NOTE: check if file size is not None, because some metadata provider + # such as FastFileMetadataProvider does not provide file size information. + if file_size is not None: + total_size += file_size + return total_size * self._encoding_ratio + + def _estimate_files_encoding_ratio(self) -> float: + """Return an estimate of the image files encoding ratio.""" + start_time = time.perf_counter() + # Filter out empty file to avoid noise. + non_empty_path_and_size = list( + filter(lambda p: p[1] > 0, zip(self._paths(), self._file_sizes())) + ) + num_files = len(non_empty_path_and_size) + if num_files == 0: + logger.warn( + "All input image files are empty. " + "Use on-disk file size to estimate images in-memory size." + ) + return IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT + + if self.size is not None and self.mode is not None: + # Use image size and mode to calculate data size for all images, + # because all images are homogeneous with same size after resizing. + # Resizing is enforced when reading every image in `ImageDatasource` + # when `size` argument is provided. + if self.mode in ["1", "L", "P"]: + dimension = 1 + elif self.mode in ["RGB", "YCbCr", "LAB", "HSV"]: + dimension = 3 + elif self.mode in ["RGBA", "CMYK", "I", "F"]: + dimension = 4 + else: + logger.warn(f"Found unknown image mode: {self.mode}.") + return IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT + height, width = self.size + single_image_size = height * width * dimension + total_estimated_size = single_image_size * num_files + total_file_size = sum(p[1] for p in non_empty_path_and_size) + ratio = total_estimated_size / total_file_size + else: + # TODO(chengsu): sample images to estimate data size + ratio = IMAGE_ENCODING_RATIO_ESTIMATE_DEFAULT + + sampling_duration = time.perf_counter() - start_time + if sampling_duration > 5: + logger.warn( + "Image input size estimation took " + f"{round(sampling_duration, 2)} seconds." + ) + logger.debug(f"Estimated image encoding ratio from sampling is {ratio}.") + return max(ratio, IMAGE_ENCODING_RATIO_ESTIMATE_LOWER_BOUND) + + +class ImageFileMetadataProvider(DefaultFileMetadataProvider): + def _set_encoding_ratio(self, encoding_ratio: int): + """Set image file encoding ratio, to provide accurate size in bytes metadata.""" + self._encoding_ratio = encoding_ratio + + def _get_block_metadata( + self, + paths: List[str], + schema: Optional[Union[type, "pyarrow.lib.Schema"]], + *, + rows_per_file: Optional[int], + file_sizes: List[Optional[int]], + ) -> BlockMetadata: + metadata = super()._get_block_metadata( + paths, schema, rows_per_file=rows_per_file, file_sizes=file_sizes + ) + if metadata.size_bytes is not None: + metadata.size_bytes = int(metadata.size_bytes * self._encoding_ratio) + return metadata diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/json_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/json_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..09e685a7e12d221388e630b31f06f398f2c539e5 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/json_datasource.py @@ -0,0 +1,139 @@ +import logging +from io import BytesIO +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union + +from ray.air.util.tensor_extensions.arrow import pyarrow_table_from_pydict +from ray.data.context import DataContext +from ray.data.datasource.file_based_datasource import FileBasedDatasource + +if TYPE_CHECKING: + import pyarrow + +logger = logging.getLogger(__name__) + + +class JSONDatasource(FileBasedDatasource): + """JSON datasource, for reading and writing JSON and JSONL files.""" + + _FILE_EXTENSIONS = ["json", "jsonl"] + + def __init__( + self, + paths: Union[str, List[str]], + *, + arrow_json_args: Optional[Dict[str, Any]] = None, + **file_based_datasource_kwargs, + ): + from pyarrow import json + + super().__init__(paths, **file_based_datasource_kwargs) + + if arrow_json_args is None: + arrow_json_args = {} + + self.read_options = arrow_json_args.pop( + "read_options", json.ReadOptions(use_threads=False) + ) + self.arrow_json_args = arrow_json_args + + def _read_with_pyarrow_read_json(self, buffer: "pyarrow.lib.Buffer"): + """Read with PyArrow JSON reader, trying to auto-increase the + read block size in the case of the read object + straddling block boundaries.""" + import pyarrow as pa + + # When reading large files, the default block size configured in PyArrow can be + # too small, resulting in the following error: `pyarrow.lib.ArrowInvalid: + # straddling object straddles two block boundaries (try to increase block + # size?)`. More information on this issue can be found here: + # https://github.com/apache/arrow/issues/25674 + # The read will be retried with geometrically increasing block size + # until the size reaches `DataContext.get_current().target_max_block_size`. + # The initial block size will start at the PyArrow default block size + # or it can be manually set through the `read_options` parameter as follows. + # >>> import pyarrow.json as pajson + # >>> block_size = 10 << 20 # Set block size to 10MB + # >>> ray.data.read_json( # doctest: +SKIP + # ... "s3://anonymous@ray-example-data/log.json", + # ... read_options=pajson.ReadOptions(block_size=block_size) + # ... ) + + init_block_size = self.read_options.block_size + max_block_size = DataContext.get_current().target_max_block_size + while True: + try: + yield pa.json.read_json( + BytesIO(buffer), + read_options=self.read_options, + **self.arrow_json_args, + ) + self.read_options.block_size = init_block_size + break + except pa.ArrowInvalid as e: + if "straddling object straddles two block boundaries" in str(e): + if self.read_options.block_size < max_block_size: + # Increase the block size in case it was too small. + logger.debug( + f"JSONDatasource read failed with " + f"block_size={self.read_options.block_size}. Retrying with " + f"block_size={self.read_options.block_size * 2}." + ) + self.read_options.block_size *= 2 + else: + raise pa.ArrowInvalid( + f"{e} - Auto-increasing block size to " + f"{self.read_options.block_size} bytes failed. " + f"Please try manually increasing the block size through " + f"the `read_options` parameter to a larger size. " + f"For example: `read_json(..., read_options=" + f"pyarrow.json.ReadOptions(block_size=10 << 25))`" + f"More information on this issue can be found here: " + f"https://github.com/apache/arrow/issues/25674" + ) + else: + # unrelated error, simply reraise + raise e + + def _read_with_python_json(self, buffer: "pyarrow.lib.Buffer"): + """Fallback method to read JSON files with Python's native json.load(), + in case the default pyarrow json reader fails.""" + import json + + import pyarrow as pa + + # Check if the buffer is empty + if buffer.size == 0: + return + + parsed_json = json.load(BytesIO(buffer)) + try: + yield pa.Table.from_pylist(parsed_json) + except AttributeError as e: + # For PyArrow < 7.0.0, `pa.Table.from_pylist()` is not available. + # Construct a dict from the list and call + # `pa.Table.from_pydict()` instead. + assert "no attribute 'from_pylist'" in str(e), str(e) + from collections import defaultdict + + dct = defaultdict(list) + for row in parsed_json: + for k, v in row.items(): + dct[k].append(v) + yield pyarrow_table_from_pydict(dct) + + # TODO(ekl) The PyArrow JSON reader doesn't support streaming reads. + def _read_stream(self, f: "pyarrow.NativeFile", path: str): + import pyarrow as pa + + buffer: pa.lib.Buffer = f.read_buffer() + + try: + yield from self._read_with_pyarrow_read_json(buffer) + except pa.ArrowInvalid as e: + # If read with PyArrow fails, try falling back to native json.load(). + logger.warning( + f"Error reading with pyarrow.json.read_json(). " + f"Falling back to native json.load(), which may be slower. " + f"PyArrow error was:\n{e}" + ) + yield from self._read_with_python_json(buffer) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/lance_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/lance_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..2854aa0e62a551d4cab71602dbf64b17c8a356b7 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/lance_datasource.py @@ -0,0 +1,129 @@ +import logging +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional + +import numpy as np + +from ray.data._internal.util import _check_import, call_with_retry +from ray.data.block import BlockMetadata +from ray.data.context import DataContext +from ray.data.datasource.datasource import Datasource, ReadTask + +if TYPE_CHECKING: + import pyarrow + + +logger = logging.getLogger(__name__) + + +class LanceDatasource(Datasource): + """Lance datasource, for reading Lance dataset.""" + + # Errors to retry when reading Lance fragments. + READ_FRAGMENTS_ERRORS_TO_RETRY = ["LanceError(IO)"] + # Maximum number of attempts to read Lance fragments. + READ_FRAGMENTS_MAX_ATTEMPTS = 10 + # Maximum backoff seconds between attempts to read Lance fragments. + READ_FRAGMENTS_RETRY_MAX_BACKOFF_SECONDS = 32 + + def __init__( + self, + uri: str, + columns: Optional[List[str]] = None, + filter: Optional[str] = None, + storage_options: Optional[Dict[str, str]] = None, + scanner_options: Optional[Dict[str, Any]] = None, + ): + _check_import(self, module="lance", package="pylance") + + import lance + + self.uri = uri + self.scanner_options = scanner_options or {} + if columns is not None: + self.scanner_options["columns"] = columns + if filter is not None: + self.scanner_options["filter"] = filter + self.storage_options = storage_options + self.lance_ds = lance.dataset(uri=uri, storage_options=storage_options) + + match = [] + match.extend(self.READ_FRAGMENTS_ERRORS_TO_RETRY) + match.extend(DataContext.get_current().retried_io_errors) + self._retry_params = { + "description": "read lance fragments", + "match": match, + "max_attempts": self.READ_FRAGMENTS_MAX_ATTEMPTS, + "max_backoff_s": self.READ_FRAGMENTS_RETRY_MAX_BACKOFF_SECONDS, + } + + def get_read_tasks(self, parallelism: int) -> List[ReadTask]: + read_tasks = [] + for fragments in np.array_split(self.lance_ds.get_fragments(), parallelism): + if len(fragments) <= 0: + continue + + fragment_ids = [f.metadata.id for f in fragments] + num_rows = sum(f.count_rows() for f in fragments) + input_files = [ + data_file.path() for f in fragments for data_file in f.data_files() + ] + + # TODO(chengsu): Take column projection into consideration for schema. + metadata = BlockMetadata( + num_rows=num_rows, + schema=fragments[0].schema, + input_files=input_files, + size_bytes=None, + exec_stats=None, + ) + scanner_options = self.scanner_options + lance_ds = self.lance_ds + retry_params = self._retry_params + + read_task = ReadTask( + lambda f=fragment_ids: _read_fragments_with_retry( + f, + lance_ds, + scanner_options, + retry_params, + ), + metadata, + ) + read_tasks.append(read_task) + + return read_tasks + + def estimate_inmemory_data_size(self) -> Optional[int]: + # TODO(chengsu): Add memory size estimation to improve auto-tune of parallelism. + return None + + +def _read_fragments_with_retry( + fragment_ids, + lance_ds, + scanner_options, + retry_params, +) -> Iterator["pyarrow.Table"]: + return call_with_retry( + lambda: _read_fragments(fragment_ids, lance_ds, scanner_options), + **retry_params, + ) + + +def _read_fragments( + fragment_ids, + lance_ds, + scanner_options, +) -> Iterator["pyarrow.Table"]: + """Read Lance fragments in batches. + + NOTE: Use fragment ids, instead of fragments as parameter, because pickling + LanceFragment is expensive. + """ + import pyarrow + + fragments = [lance_ds.get_fragment(id) for id in fragment_ids] + scanner_options["fragments"] = fragments + scanner = lance_ds.scanner(**scanner_options) + for batch in scanner.to_reader(): + yield pyarrow.Table.from_batches([batch]) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/webdataset_datasource.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/webdataset_datasource.py new file mode 100644 index 0000000000000000000000000000000000000000..d9f5f876d4ec5b300b86d335eca72cc1e6b6d571 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/datasource/webdataset_datasource.py @@ -0,0 +1,365 @@ +# Copyright NVIDIA Corporation 2023 +# SPDX-License-Identifier: Apache-2.0 + +import fnmatch +import io +import re +import tarfile +from functools import partial +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union + +import ray +from ray.data._internal.util import iterate_with_retry +from ray.data.block import BlockAccessor +from ray.data.datasource.file_based_datasource import FileBasedDatasource + +if TYPE_CHECKING: + import pyarrow + + +def _base_plus_ext(path: str): + """Split off all file extensions. + + Returns base, allext. + + Args: + path: path with extensions + + Returns: + str: path with all extensions removed + """ + match = re.match(r"^((?:.*/|)[^.]+)[.]([^/]*)$", path) + if not match: + return None, None + return match.group(1), match.group(2) + + +def _valid_sample(sample: Dict[str, Any]): + """Check whether a sample is valid. + + Args: + sample: sample to be checked + """ + return ( + sample is not None + and isinstance(sample, dict) + and len(list(sample.keys())) > 0 + and not sample.get("__bad__", False) + ) + + +def _apply_list( + f: Union[Callable, List[Callable]], sample: Dict[str, Any], default: Callable = None +): + """Apply a list of functions to a sample. + + Args: + f: function or list of functions + sample: sample to be modified + default: default function to be applied to all keys. + Defaults to None. + + Returns: + modified sample + """ + if f is None: + return sample + if not isinstance(f, list): + f = [f] + for g in f: + if default is not None and not callable(g): + g = partial(default, format=g) + sample = g(sample) + return sample + + +def _check_suffix(suffix: str, suffixes: Union[list, callable]): + """Check whether a suffix is valid. + + Suffixes can be either None (=accept everything), a callable, + or a list of patterns. If the pattern contains */? it is treated + as a glob pattern, otherwise it is treated as a literal. + + Args: + suffix: suffix to be checked + suffixes: list of valid suffixes + """ + if suffixes is None: + return True + if callable(suffixes): + return suffixes(suffix) + for pattern in suffixes: + if "*" in pattern or "?" in pattern: + if fnmatch.fnmatch("." + suffix, pattern): + return True + elif suffix == pattern or "." + suffix == pattern: + return True + return False + + +def _tar_file_iterator( + fileobj: Any, + fileselect: Optional[Union[bool, callable, list]] = None, + filerename: Optional[Union[bool, callable, list]] = None, + verbose_open: bool = False, + meta: dict = None, +): + """Iterate over tar file, yielding filename, content pairs for the given tar stream. + + Args: + fileobj: file object + fileselect: patterns or function selecting + files to be selected + meta: metadata to be added to each sample + """ + meta = meta or {} + stream = tarfile.open(fileobj=fileobj, mode="r|*") + if verbose_open: + print(f"start {meta}") + for tarinfo in stream: + fname = tarinfo.name + if not tarinfo.isreg() or fname is None: + continue + data = stream.extractfile(tarinfo).read() + fname = _apply_list(filerename, fname) + assert isinstance(fname, str) + if not _check_suffix(fname, fileselect): + continue + result = dict(fname=fname, data=data) + yield result + if verbose_open: + print(f"done {meta}") + + +def _group_by_keys( + data: List[Dict[str, Any]], + keys: callable = _base_plus_ext, + suffixes: Optional[Union[list, callable]] = None, + meta: dict = None, +): + """Return function over iterator that groups key, value pairs into samples. + + Args: + data: iterator over key, value pairs + keys: function that returns key, suffix for a given key + suffixes: list of suffixes to be included in the sample + meta: metadata to be added to each sample + """ + meta = meta or {} + current_sample = None + for filesample in data: + assert isinstance(filesample, dict) + fname, value = filesample["fname"], filesample["data"] + prefix, suffix = keys(fname) + if prefix is None: + continue + if current_sample is None or prefix != current_sample["__key__"]: + if _valid_sample(current_sample): + current_sample.update(meta) + yield current_sample + current_sample = dict(__key__=prefix) + if "__url__" in filesample: + current_sample["__url__"] = filesample["__url__"] + if suffix in current_sample: + raise ValueError( + f"{fname}: duplicate file name in tar file " + + f"{suffix} {current_sample.keys()}" + ) + if suffixes is None or _check_suffix(suffix, suffixes): + current_sample[suffix] = value + if _valid_sample(current_sample): + current_sample.update(meta) + yield current_sample + + +def _default_decoder(sample: Dict[str, Any], format: Optional[Union[bool, str]] = True): + """A default decoder for webdataset. + + This handles common file extensions: .txt, .cls, .cls2, + .jpg, .png, .json, .npy, .mp, .pt, .pth, .pickle, .pkl. + These are the most common extensions used in webdataset. + For other extensions, users can provide their own decoder. + + Args: + sample: sample, modified in place + """ + sample = dict(sample) + for key, value in sample.items(): + extension = key.split(".")[-1] + if key.startswith("__"): + continue + elif extension in ["txt", "text"]: + sample[key] = value.decode("utf-8") + elif extension in ["cls", "cls2"]: + sample[key] = int(value.decode("utf-8")) + elif extension in ["jpg", "png", "ppm", "pgm", "pbm", "pnm"]: + import numpy as np + import PIL.Image + + if format == "PIL": + sample[key] = PIL.Image.open(io.BytesIO(value)) + else: + sample[key] = np.asarray(PIL.Image.open(io.BytesIO(value))) + elif extension == "json": + import json + + sample[key] = json.loads(value) + elif extension == "npy": + import numpy as np + + sample[key] = np.load(io.BytesIO(value)) + elif extension == "mp": + import msgpack + + sample[key] = msgpack.unpackb(value, raw=False) + elif extension in ["pt", "pth"]: + import torch + + sample[key] = torch.load(io.BytesIO(value)) + elif extension in ["pickle", "pkl"]: + import pickle + + sample[key] = pickle.loads(value) + return sample + + +extension_to_format = {"jpg": "jpeg"} + + +def _default_encoder(sample: Dict[str, Any], format: Optional[Union[str, bool]] = True): + """A default encoder for webdataset. + + This handles common file extensions: .txt, .cls, .cls2, .jpg, + .png, .json, .npy, .mp, .pt, .pth, .pickle, .pkl + These are the most common extensions used in webdataset. + For other extensions, users can provide their own encoder. + + Args: + sample (Dict[str, Any]): sample + """ + sample = dict(sample) + for key, value in sample.items(): + extension = key.split(".")[-1] + if key.startswith("__"): + continue + elif extension in ["txt"]: + sample[key] = value.encode("utf-8") + elif extension in ["cls", "cls2"]: + sample[key] = str(value).encode("utf-8") + elif extension in ["jpg", "jpeg", "png", "ppm", "pgm", "pbm", "pnm"]: + import numpy as np + import PIL.Image + + if isinstance(value, np.ndarray): + value = PIL.Image.fromarray(value) + assert isinstance(value, PIL.Image.Image) + stream = io.BytesIO() + value.save( + stream, format=extension_to_format.get(extension.lower(), extension) + ) + sample[key] = stream.getvalue() + elif extension == "json": + import json + + sample[key] = json.dumps(value).encode("utf-8") + elif extension == "npy": + import numpy as np + + stream = io.BytesIO() + np.save(stream, value) + sample[key] = stream.getvalue() + elif extension == "mp": + import msgpack + + sample[key] = msgpack.dumps(value) + elif extension in ["pt", "pth"]: + import torch + + stream = io.BytesIO() + torch.save(value, stream) + sample[key] = stream.getvalue() + elif extension in ["pickle", "pkl"]: + import pickle + + stream = io.BytesIO() + pickle.dump(value, stream) + sample[key] = stream.getvalue() + return sample + + +def _make_iterable(block: BlockAccessor): + """Make a block iterable. + + This is a placeholder for dealing with more complex blocks. + + Args: + block: Ray Dataset block + + Returns: + Iterable[Dict[str,Any]]: Iterable of samples + """ + return block.iter_rows(public_row_format=False) + + +class WebDatasetDatasource(FileBasedDatasource): + """A Datasource for WebDataset datasets (tar format with naming conventions).""" + + _FILE_EXTENSIONS = ["tar"] + + def __init__( + self, + paths: Union[str, List[str]], + decoder: Optional[Union[bool, str, callable, list]] = True, + fileselect: Optional[Union[bool, callable, list]] = None, + filerename: Optional[Union[bool, callable, list]] = None, + suffixes: Optional[Union[bool, callable, list]] = None, + verbose_open: bool = False, + **file_based_datasource_kwargs, + ): + super().__init__(paths, **file_based_datasource_kwargs) + + self.decoder = decoder + self.fileselect = fileselect + self.filerename = filerename + self.suffixes = suffixes + self.verbose_open = verbose_open + + def _read_stream(self, stream: "pyarrow.NativeFile", path: str): + """Read and decode samples from a stream. + + Note that fileselect selects files during reading, while suffixes + selects files during the grouping step. + + Args: + stream: File descriptor to read from. + path: Path to the data. + decoder: decoder or list of decoders to be applied to samples + fileselect: Predicate for skipping files in tar decoder. + Defaults to lambda_:False. + suffixes: List of suffixes to be extracted. Defaults to None. + verbose_open: Print message when opening files. Defaults to False. + + Yields: + List[Dict[str, Any]]: List of sample (list of length 1). + """ + import pandas as pd + + def get_tar_file_iterator(): + return _tar_file_iterator( + stream, + fileselect=self.fileselect, + filerename=self.filerename, + verbose_open=self.verbose_open, + ) + + # S3 can raise transient errors during iteration + ctx = ray.data.DataContext.get_current() + files = iterate_with_retry( + get_tar_file_iterator, "iterate tar file", match=ctx.retried_io_errors + ) + + samples = _group_by_keys(files, meta=dict(__url__=path), suffixes=self.suffixes) + for sample in samples: + if self.decoder is not None: + sample = _apply_list(self.decoder, sample, default=_default_decoder) + yield pd.DataFrame({k: [v] for k, v in sample.items()}) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72f11a94d88d8236accca16263c8f741ee266f8f Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/optimizers.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/optimizers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb900a953224d2cc51cf0e94ab183356fc1b7a0 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/optimizers.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/util.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6344253150ff302c9c8e5c77168010ef7a91aa8 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/__pycache__/util.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92822490b22d6e5685260b7de96a38e11a694181 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__init__.py @@ -0,0 +1,16 @@ +from .logical_operator import LogicalOperator +from .logical_plan import LogicalPlan +from .operator import Operator +from .optimizer import Optimizer, Rule +from .physical_plan import PhysicalPlan +from .plan import Plan + +__all__ = [ + "LogicalOperator", + "LogicalPlan", + "Operator", + "Optimizer", + "PhysicalPlan", + "Plan", + "Rule", +] diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbc3707bc60e636e12c8b79497ffac62d66da91d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50191ced6f4609c37cd12e908cbacf932274fb2e Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_plan.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_plan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd86cf8d9d0faf6682d86736d53288b12160911d Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/logical_plan.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..006a0f8ef7b33fc5535fe7ed48fdceacac8f326b Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8597ff437124e8db54bff4428507b39b7d80974a Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/optimizer.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5aecc1b7038dcbfb5ab455483a5b62ab2f497c8 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/physical_plan.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/plan.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/plan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cba110f9576bc92adaa26342ee6a819ebca021ea Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/__pycache__/plan.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..84535706cd5080061b88aa675944dc80dcdc32bb --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_operator.py @@ -0,0 +1,79 @@ +from typing import TYPE_CHECKING, Iterator, List, Optional + +from .operator import Operator +from ray.data.block import BlockMetadata + +if TYPE_CHECKING: + from ray.data._internal.execution.interfaces import RefBundle + + +class LogicalOperator(Operator): + """Abstract class for logical operators. + + A logical operator describes transformation, and later is converted into + physical operator. + """ + + def __init__( + self, + name: str, + input_dependencies: List["LogicalOperator"], + num_outputs: Optional[int] = None, + ): + super().__init__( + name, + input_dependencies, + ) + for x in input_dependencies: + assert isinstance(x, LogicalOperator), x + self._num_outputs = num_outputs + + def estimated_num_outputs(self) -> Optional[int]: + """Returns the estimated number of blocks that + would be outputted by this logical operator. + + This method does not execute the plan, so it does not take into consideration + block splitting. This method only considers high-level block constraints like + `Dataset.repartition(num_blocks=X)`. A more accurate estimation can be given by + `PhysicalOperator.num_outputs_total()` during execution. + """ + if self._num_outputs is not None: + return self._num_outputs + elif len(self._input_dependencies) == 1: + return self._input_dependencies[0].estimated_num_outputs() + return None + + # Override the following 3 methods to correct type hints. + + @property + def input_dependencies(self) -> List["LogicalOperator"]: + return super().input_dependencies # type: ignore + + @property + def output_dependencies(self) -> List["LogicalOperator"]: + return super().output_dependencies # type: ignore + + def post_order_iter(self) -> Iterator["LogicalOperator"]: + return super().post_order_iter() # type: ignore + + def output_data(self) -> Optional[List["RefBundle"]]: + """The output data of this operator, or ``None`` if not known.""" + return None + + def aggregate_output_metadata(self) -> BlockMetadata: + """A ``BlockMetadata`` that represents the aggregate metadata of the outputs. + + This method is used by methods like :meth:`~ray.data.Dataset.schema` to + efficiently return metadata. + """ + return BlockMetadata(None, None, None, None, None) + + def is_lineage_serializable(self) -> bool: + """Returns whether the lineage of this operator can be serialized. + + An operator is lineage serializable if you can serialize it on one machine and + deserialize it on another without losing information. Operators that store + object references (e.g., ``InputData``) aren't lineage serializable because the + objects aren't available on the deserialized machine. + """ + return True diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..3e0196bb440bbdb02ff2b112e2b68d8d2be58088 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/logical_plan.py @@ -0,0 +1,31 @@ +from typing import TYPE_CHECKING, List + +from .logical_operator import LogicalOperator +from .plan import Plan + +if TYPE_CHECKING: + from ray.data import DataContext + + +class LogicalPlan(Plan): + """The plan with a DAG of logical operators.""" + + def __init__(self, dag: LogicalOperator, context: "DataContext"): + super().__init__(context) + self._dag = dag + + @property + def dag(self) -> LogicalOperator: + """Get the DAG of logical operators.""" + return self._dag + + def sources(self) -> List[LogicalOperator]: + """List of operators that are sources for this plan's DAG.""" + # If an operator has no input dependencies, it's a source. + if not any(self._dag.input_dependencies): + return [self._dag] + + sources = [] + for op in self._dag.input_dependencies: + sources.extend(LogicalPlan(op, self._context).sources()) + return sources diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py new file mode 100644 index 0000000000000000000000000000000000000000..76a320ef815a23cb319146221d32c7be10e5be52 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/operator.py @@ -0,0 +1,58 @@ +from typing import Iterator, List + + +class Operator: + """Abstract class for operators. + + Operators live on the driver side of the Dataset only. + """ + + def __init__( + self, + name: str, + input_dependencies: List["Operator"], + ): + self._name = name + self._input_dependencies = input_dependencies + self._output_dependencies = [] + for x in input_dependencies: + assert isinstance(x, Operator), x + x._output_dependencies.append(self) + + @property + def name(self) -> str: + return self._name + + @property + def input_dependencies(self) -> List["Operator"]: + """List of operators that provide inputs for this operator.""" + assert hasattr( + self, "_input_dependencies" + ), "Operator.__init__() was not called." + return self._input_dependencies + + @property + def output_dependencies(self) -> List["Operator"]: + """List of operators that consume outputs from this operator.""" + assert hasattr( + self, "_output_dependencies" + ), "Operator.__init__() was not called." + return self._output_dependencies + + def post_order_iter(self) -> Iterator["Operator"]: + """Depth-first traversal of this operator and its input dependencies.""" + for op in self.input_dependencies: + yield from op.post_order_iter() + yield self + + def __repr__(self) -> str: + if self.input_dependencies: + out_str = ", ".join([str(x) for x in self.input_dependencies]) + out_str += " -> " + else: + out_str = "" + out_str += f"{self.__class__.__name__}[{self._name}]" + return out_str + + def __str__(self) -> str: + return repr(self) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6a25a44afe624b2e3ad6182739f68ed59b5cf720 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/optimizer.py @@ -0,0 +1,29 @@ +from typing import List + +from .plan import Plan + + +class Rule: + """Abstract class for optimization rule.""" + + def apply(self, plan: Plan) -> Plan: + """Apply the optimization rule to the execution plan.""" + raise NotImplementedError + + +class Optimizer: + """Abstract class for optimizers. + + An optimizers transforms a DAG of operators with a list of predefined rules. + """ + + @property + def rules(self) -> List[Rule]: + """List of predefined rules for this optimizer.""" + raise NotImplementedError + + def optimize(self, plan: Plan) -> Plan: + """Optimize operators with a list of rules.""" + for rule in self.rules: + plan = rule.apply(plan) + return plan diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py new file mode 100644 index 0000000000000000000000000000000000000000..29503831db85e7d87f1f044d7c910826fa970515 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/interfaces/physical_plan.py @@ -0,0 +1,34 @@ +from typing import TYPE_CHECKING, Dict + +from .logical_operator import LogicalOperator +from .plan import Plan + +if TYPE_CHECKING: + from ray.data import DataContext + from ray.data._internal.execution.interfaces import PhysicalOperator + + +class PhysicalPlan(Plan): + """The plan with a DAG of physical operators.""" + + def __init__( + self, + dag: "PhysicalOperator", + op_map: Dict["PhysicalOperator", LogicalOperator], + context: "DataContext", + ): + super().__init__(context) + self._dag = dag + self._op_map = op_map + + @property + def dag(self) -> "PhysicalOperator": + """Get the DAG of physical operators.""" + return self._dag + + @property + def op_map(self) -> Dict["PhysicalOperator", LogicalOperator]: + """ + Get a mapping from physical operators to their corresponding logical operator. + """ + return self._op_map diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5d2550ed836487380e262cb987653c6a8885b10 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/count_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f09abdd9b405905bb886c13629e016e0c395751 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/input_data_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c6aa621c32b631245d25a61aef40754605c2afd Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/n_ary_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d86e4134ff72922ff2581c5ec88796d16e8b10c Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/one_to_one_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38ebd0e2ef507f64323851bbe56eb92e10faac30 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/__pycache__/read_operator.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..745103f0036fee1de56fcd0ef645aac438af33d6 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/all_to_all_operator.py @@ -0,0 +1,163 @@ +from typing import Any, Dict, List, Optional + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.planner.exchange.interfaces import ExchangeTaskSpec +from ray.data._internal.planner.exchange.shuffle_task_spec import ShuffleTaskSpec +from ray.data._internal.planner.exchange.sort_task_spec import SortKey, SortTaskSpec +from ray.data.aggregate import AggregateFn +from ray.data.block import BlockMetadata + + +class AbstractAllToAll(LogicalOperator): + """Abstract class for logical operators should be converted to physical + AllToAllOperator. + """ + + def __init__( + self, + name: str, + input_op: LogicalOperator, + num_outputs: Optional[int] = None, + sub_progress_bar_names: Optional[List[str]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + num_outputs: The number of expected output bundles outputted by this + operator. + ray_remote_args: Args to provide to ray.remote. + """ + super().__init__(name, [input_op], num_outputs) + self._num_outputs = num_outputs + self._ray_remote_args = ray_remote_args or {} + self._sub_progress_bar_names = sub_progress_bar_names + + +class RandomizeBlocks(AbstractAllToAll): + """Logical operator for randomize_block_order.""" + + def __init__( + self, + input_op: LogicalOperator, + seed: Optional[int] = None, + ): + super().__init__( + "RandomizeBlockOrder", + input_op, + ) + self._seed = seed + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class RandomShuffle(AbstractAllToAll): + """Logical operator for random_shuffle.""" + + def __init__( + self, + input_op: LogicalOperator, + name: str = "RandomShuffle", + seed: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + name, + input_op, + sub_progress_bar_names=[ + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ray_remote_args=ray_remote_args, + ) + self._seed = seed + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Repartition(AbstractAllToAll): + """Logical operator for repartition.""" + + def __init__( + self, + input_op: LogicalOperator, + num_outputs: int, + shuffle: bool, + ): + if shuffle: + sub_progress_bar_names = [ + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ] + else: + sub_progress_bar_names = [ + ShuffleTaskSpec.SPLIT_REPARTITION_SUB_PROGRESS_BAR_NAME, + ] + super().__init__( + "Repartition", + input_op, + num_outputs=num_outputs, + sub_progress_bar_names=sub_progress_bar_names, + ) + self._shuffle = shuffle + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Sort(AbstractAllToAll): + """Logical operator for sort.""" + + def __init__( + self, + input_op: LogicalOperator, + sort_key: SortKey, + batch_format: Optional[str] = "default", + ): + super().__init__( + "Sort", + input_op, + sub_progress_bar_names=[ + SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ) + self._sort_key = sort_key + self._batch_format = batch_format + + def aggregate_output_metadata(self) -> BlockMetadata: + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata() + + +class Aggregate(AbstractAllToAll): + """Logical operator for aggregate.""" + + def __init__( + self, + input_op: LogicalOperator, + key: Optional[str], + aggs: List[AggregateFn], + batch_format: Optional[str] = "default", + ): + super().__init__( + "Aggregate", + input_op, + sub_progress_bar_names=[ + SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.MAP_SUB_PROGRESS_BAR_NAME, + ExchangeTaskSpec.REDUCE_SUB_PROGRESS_BAR_NAME, + ], + ) + self._key = key + self._aggs = aggs + self._batch_format = batch_format diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..409c99e3c000698622be3f6294d419d1c59ff2d8 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/count_operator.py @@ -0,0 +1,20 @@ +from typing import List + +from ray.data._internal.logical.interfaces import LogicalOperator + + +class Count(LogicalOperator): + """Logical operator that represents counting the number of rows in inputs. + + Physical operators that implement this logical operator should produce one or more + rows with a single column named `Count.COLUMN_NAME`. When you sum the values in + this column, you should get the total number of rows in the dataset. + """ + + COLUMN_NAME = "__num_rows" + + def __init__( + self, + input_dependencies: List["LogicalOperator"], + ): + super().__init__("Count", input_dependencies) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..afe5e8200bb14040e712e4b2faef12e79516f8df --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/from_operators.py @@ -0,0 +1,105 @@ +import abc +import functools +from typing import TYPE_CHECKING, List, Optional, Union + +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import Block, BlockMetadata +from ray.types import ObjectRef + +if TYPE_CHECKING: + import pyarrow as pa + + ArrowTable = Union["pa.Table", bytes] + + +class AbstractFrom(LogicalOperator, metaclass=abc.ABCMeta): + """Abstract logical operator for `from_*`.""" + + def __init__( + self, + input_blocks: List[ObjectRef[Block]], + input_metadata: List[BlockMetadata], + ): + super().__init__(self.__class__.__name__, [], len(input_blocks)) + assert len(input_blocks) == len(input_metadata), ( + len(input_blocks), + len(input_metadata), + ) + # `owns_blocks` is False because this op may be shared by multiple Datasets. + self._input_data = [ + RefBundle([(input_blocks[i], input_metadata[i])], owns_blocks=False) + for i in range(len(input_blocks)) + ] + + @property + def input_data(self) -> List[RefBundle]: + return self._input_data + + def output_data(self) -> Optional[List[RefBundle]]: + return self._input_data + + def aggregate_output_metadata(self) -> BlockMetadata: + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=self._size_bytes(), + schema=self._schema(), + input_files=None, + exec_stats=None, + ) + + def _num_rows(self): + if all(bundle.num_rows() is not None for bundle in self._input_data): + return sum(bundle.num_rows() for bundle in self._input_data) + else: + return None + + def _size_bytes(self): + metadata = [m for bundle in self._input_data for m in bundle.metadata] + if all(m.size_bytes is not None for m in metadata): + return sum(m.size_bytes for m in metadata) + else: + return None + + def _schema(self): + metadata = [m for bundle in self._input_data for m in bundle.metadata] + return unify_block_metadata_schema(metadata) + + def is_lineage_serializable(self) -> bool: + # This operator isn't serializable because it contains ObjectRefs. + return False + + +class FromItems(AbstractFrom): + """Logical operator for `from_items`.""" + + pass + + +class FromBlocks(AbstractFrom): + """Logical operator for `from_blocks`.""" + + pass + + +class FromNumpy(AbstractFrom): + """Logical operator for `from_numpy`.""" + + pass + + +class FromArrow(AbstractFrom): + """Logical operator for `from_arrow`.""" + + pass + + +class FromPandas(AbstractFrom): + """Logical operator for `from_pandas`.""" + + pass diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..2296b0ee315441bcdb0a3acd49791576aea3e191 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/input_data_operator.py @@ -0,0 +1,74 @@ +import functools +from typing import Callable, List, Optional + +from ray.data._internal.execution.interfaces import RefBundle +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import BlockMetadata + + +class InputData(LogicalOperator): + """Logical operator for input data. + + This may hold cached blocks from a previous Dataset execution, or + the arguments for read tasks. + """ + + def __init__( + self, + input_data: Optional[List[RefBundle]] = None, + input_data_factory: Optional[Callable[[int], List[RefBundle]]] = None, + ): + assert (input_data is None) != ( + input_data_factory is None + ), "Only one of input_data and input_data_factory should be set." + super().__init__( + "InputData", [], len(input_data) if input_data is not None else None + ) + self.input_data = input_data + self.input_data_factory = input_data_factory + + def output_data(self) -> Optional[List[RefBundle]]: + if self.input_data is None: + return None + return self.input_data + + def aggregate_output_metadata(self) -> BlockMetadata: + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + if self.input_data is None: + return BlockMetadata(None, None, None, None, None) + + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=self._size_bytes(), + schema=self._schema(), + input_files=None, + exec_stats=None, + ) + + def _num_rows(self): + assert self.input_data is not None + if all(bundle.num_rows() is not None for bundle in self.input_data): + return sum(bundle.num_rows() for bundle in self.input_data) + else: + return None + + def _size_bytes(self): + assert self.input_data is not None + metadata = [m for bundle in self.input_data for m in bundle.metadata] + if all(m.size_bytes is not None for m in metadata): + return sum(m.size_bytes for m in metadata) + else: + return None + + def _schema(self): + assert self.input_data is not None + metadata = [m for bundle in self.input_data for m in bundle.metadata] + return unify_block_metadata_schema(metadata) + + def is_lineage_serializable(self) -> bool: + # This operator isn't serializable because it contains ObjectRefs. + return False diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..fd7ad71e5a885bf512b08a247de4c51346ca9de5 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/map_operator.py @@ -0,0 +1,293 @@ +import inspect +import logging +from typing import Any, Callable, Dict, Iterable, List, Optional, Union + +from ray.data._internal.compute import ComputeStrategy, TaskPoolStrategy +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.one_to_one_operator import AbstractOneToOne +from ray.data.block import UserDefinedFunction +from ray.data.context import DEFAULT_BATCH_SIZE +from ray.data.preprocessor import Preprocessor + +logger = logging.getLogger(__name__) + + +class AbstractMap(AbstractOneToOne): + """Abstract class for logical operators that should be converted to physical + MapOperator. + """ + + def __init__( + self, + name: str, + input_op: Optional[LogicalOperator] = None, + num_outputs: Optional[int] = None, + *, + min_rows_per_bundled_input: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + min_rows_per_bundled_input: The target number of rows to pass to + ``MapOperator._add_bundled_input()``. + ray_remote_args: Args to provide to ray.remote. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + """ + super().__init__(name, input_op, num_outputs) + self._min_rows_per_bundled_input = min_rows_per_bundled_input + self._ray_remote_args = ray_remote_args or {} + self._ray_remote_args_fn = ray_remote_args_fn + + +class AbstractUDFMap(AbstractMap): + """Abstract class for logical operators performing a UDF that should be converted + to physical MapOperator. + """ + + def __init__( + self, + name: str, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + min_rows_per_bundled_input: Optional[int] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + fn: User-defined function to be called. + fn_args: Arguments to `fn`. + fn_kwargs: Keyword arguments to `fn`. + fn_constructor_args: Arguments to provide to the initializor of `fn` if + `fn` is a callable class. + fn_constructor_kwargs: Keyword Arguments to provide to the initializor of + `fn` if `fn` is a callable class. + min_rows_per_bundled_input: The target number of rows to pass to + ``MapOperator._add_bundled_input()``. + compute: The compute strategy, either ``"tasks"`` (default) to use Ray + tasks, or ``"actors"`` to use an autoscaling actor pool. + ray_remote_args_fn: A function that returns a dictionary of remote args + passed to each map worker. The purpose of this argument is to generate + dynamic arguments for each actor/task, and will be called each time + prior to initializing the worker. Args returned from this dict will + always override the args in ``ray_remote_args``. Note: this is an + advanced, experimental feature. + ray_remote_args: Args to provide to ray.remote. + """ + name = self._get_operator_name(name, fn) + super().__init__( + name, + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ) + self._fn = fn + self._fn_args = fn_args + self._fn_kwargs = fn_kwargs + self._fn_constructor_args = fn_constructor_args + self._fn_constructor_kwargs = fn_constructor_kwargs + self._compute = compute or TaskPoolStrategy() + self._ray_remote_args_fn = ray_remote_args_fn + + def _get_operator_name(self, op_name: str, fn: UserDefinedFunction): + """Gets the Operator name including the map `fn` UDF name.""" + # If the input `fn` is a Preprocessor, the + # name is simply the name of the Preprocessor class. + if inspect.ismethod(fn) and isinstance(fn.__self__, Preprocessor): + return fn.__self__.__class__.__name__ + + # Otherwise, it takes the form of `()`, + # e.g. `MapBatches(my_udf)`. + try: + if inspect.isclass(fn): + # callable class + return f"{op_name}({fn.__name__})" + elif inspect.ismethod(fn): + # class method + return f"{op_name}({fn.__self__.__class__.__name__}.{fn.__name__})" + elif inspect.isfunction(fn): + # normal function or lambda function. + return f"{op_name}({fn.__name__})" + else: + # callable object. + return f"{op_name}({fn.__class__.__name__})" + except AttributeError as e: + logger.error("Failed to get name of UDF %s: %s", fn, e) + return "" + + +class MapBatches(AbstractUDFMap): + """Logical operator for map_batches.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + batch_size: Optional[int] = DEFAULT_BATCH_SIZE, + batch_format: str = "default", + zero_copy_batch: bool = False, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + min_rows_per_bundled_input: Optional[int] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "MapBatches", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + min_rows_per_bundled_input=min_rows_per_bundled_input, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + self._batch_size = batch_size + self._batch_format = batch_format + self._zero_copy_batch = zero_copy_batch + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class MapRows(AbstractUDFMap): + """Logical operator for map.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "Map", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class Filter(AbstractUDFMap): + """Logical operator for filter.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "Filter", + input_op, + fn, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return True + + +class Project(AbstractMap): + """Logical operator for select_columns.""" + + def __init__( + self, + input_op: LogicalOperator, + cols: List[str], + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__("Project", input_op=input_op, ray_remote_args=ray_remote_args) + self._compute = compute + self._batch_size = DEFAULT_BATCH_SIZE + self._cols = cols + self._batch_format = "pyarrow" + self._zero_copy_batch = True + + @property + def cols(self) -> List[str]: + return self._cols + + @property + def can_modify_num_rows(self) -> bool: + return False + + +class FlatMap(AbstractUDFMap): + """Logical operator for flat_map.""" + + def __init__( + self, + input_op: LogicalOperator, + fn: UserDefinedFunction, + fn_args: Optional[Iterable[Any]] = None, + fn_kwargs: Optional[Dict[str, Any]] = None, + fn_constructor_args: Optional[Iterable[Any]] = None, + fn_constructor_kwargs: Optional[Dict[str, Any]] = None, + compute: Optional[Union[str, ComputeStrategy]] = None, + ray_remote_args_fn: Optional[Callable[[], Dict[str, Any]]] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + ): + super().__init__( + "FlatMap", + input_op, + fn, + fn_args=fn_args, + fn_kwargs=fn_kwargs, + fn_constructor_args=fn_constructor_args, + fn_constructor_kwargs=fn_constructor_kwargs, + compute=compute, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + + @property + def can_modify_num_rows(self) -> bool: + return True diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..f2f062578e2c1d1f31b42fe5f96dfbae6cd13654 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/n_ary_operator.py @@ -0,0 +1,60 @@ +from typing import Optional + +from ray.data._internal.logical.interfaces import LogicalOperator + + +class NAry(LogicalOperator): + """Base class for n-ary operators, which take multiple input operators.""" + + def __init__( + self, + *input_ops: LogicalOperator, + num_outputs: Optional[int] = None, + ): + """ + Args: + input_ops: The input operators. + """ + super().__init__(self.__class__.__name__, list(input_ops), num_outputs) + + +class Zip(NAry): + """Logical operator for zip.""" + + def __init__( + self, + left_input_op: LogicalOperator, + right_input_op: LogicalOperator, + ): + """ + Args: + left_input_ops: The input operator at left hand side. + right_input_op: The input operator at right hand side. + """ + super().__init__(left_input_op, right_input_op) + + def estimated_num_outputs(self): + left_num_outputs = self._input_dependencies[0].estimated_num_outputs() + right_num_outputs = self._input_dependencies[1].estimated_num_outputs() + if left_num_outputs is None or right_num_outputs is None: + return None + return max(left_num_outputs, right_num_outputs) + + +class Union(NAry): + """Logical operator for union.""" + + def __init__( + self, + *input_ops: LogicalOperator, + ): + super().__init__(*input_ops) + + def estimated_num_outputs(self): + total_num_outputs = 0 + for input in self._input_dependencies: + num_outputs = input.estimated_num_outputs() + if num_outputs is None: + return None + total_num_outputs += num_outputs + return total_num_outputs diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..052d0b23ecda047b1876c8ca6adfa9bf6fd8dfa5 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/one_to_one_operator.py @@ -0,0 +1,80 @@ +import abc +from typing import Optional + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data.block import BlockMetadata + + +class AbstractOneToOne(LogicalOperator): + """Abstract class for one-to-one logical operators, which + have one input and one output dependency. + """ + + def __init__( + self, + name: str, + input_op: Optional[LogicalOperator], + num_outputs: Optional[int] = None, + ): + """ + Args: + name: Name for this operator. This is the name that will appear when + inspecting the logical plan of a Dataset. + input_op: The operator preceding this operator in the plan DAG. The outputs + of `input_op` will be the inputs to this operator. + """ + super().__init__(name, [input_op] if input_op else [], num_outputs) + + @property + def input_dependency(self) -> LogicalOperator: + return self._input_dependencies[0] + + @property + @abc.abstractmethod + def can_modify_num_rows(self) -> bool: + """Whether this operator can modify the number of rows, + i.e. number of input rows != number of output rows.""" + + +class Limit(AbstractOneToOne): + """Logical operator for limit.""" + + def __init__( + self, + input_op: LogicalOperator, + limit: int, + ): + super().__init__( + f"limit={limit}", + input_op, + ) + self._limit = limit + + @property + def can_modify_num_rows(self) -> bool: + return True + + def aggregate_output_metadata(self) -> BlockMetadata: + return BlockMetadata( + num_rows=self._num_rows(), + size_bytes=None, + schema=self._schema(), + input_files=self._input_files(), + exec_stats=None, + ) + + def _schema(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata().schema + + def _num_rows(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + input_rows = self._input_dependencies[0].aggregate_output_metadata().num_rows + if input_rows is not None: + return min(input_rows, self._limit) + else: + return None + + def _input_files(self): + assert len(self._input_dependencies) == 1, len(self._input_dependencies) + return self._input_dependencies[0].aggregate_output_metadata().input_files diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..5d958dbc59fbd5c0c09312d2c50a59de9f689d41 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/read_operator.py @@ -0,0 +1,95 @@ +import functools +from typing import Any, Dict, Optional, Union + +from ray.data._internal.logical.operators.map_operator import AbstractMap +from ray.data._internal.util import unify_block_metadata_schema +from ray.data.block import BlockMetadata +from ray.data.datasource.datasource import Datasource, Reader + + +class Read(AbstractMap): + """Logical operator for read.""" + + def __init__( + self, + datasource: Datasource, + datasource_or_legacy_reader: Union[Datasource, Reader], + parallelism: int, + mem_size: Optional[int], + num_outputs: Optional[int] = None, + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + ): + super().__init__( + f"Read{datasource.get_name()}", + None, + num_outputs, + ray_remote_args=ray_remote_args, + ) + self._datasource = datasource + self._datasource_or_legacy_reader = datasource_or_legacy_reader + self._parallelism = parallelism + self._mem_size = mem_size + self._concurrency = concurrency + self._detected_parallelism = None + + def set_detected_parallelism(self, parallelism: int): + """ + Set the true parallelism that should be used during execution. This + should be specified by the user or detected by the optimizer. + """ + self._detected_parallelism = parallelism + + def get_detected_parallelism(self) -> int: + """ + Get the true parallelism that should be used during execution. + """ + return self._detected_parallelism + + def aggregate_output_metadata(self) -> BlockMetadata: + """A ``BlockMetadata`` that represents the aggregate metadata of the outputs. + + This method gets metadata from the read tasks. It doesn't trigger any actual + execution. + """ + return self._cached_output_metadata + + @functools.cached_property + def _cached_output_metadata(self) -> BlockMetadata: + # Legacy datasources might not implement `get_read_tasks`. + if self._datasource.should_create_reader: + return BlockMetadata(None, None, None, None, None) + + # HACK: Try to get a single read task to get the metadata. + read_tasks = self._datasource.get_read_tasks(1) + if len(read_tasks) == 0: + # If there are no read tasks, the dataset is probably empty. + return BlockMetadata(None, None, None, None, None) + + # `get_read_tasks` isn't guaranteed to return exactly one read task. + metadata = [read_task.metadata for read_task in read_tasks] + + if all(meta.num_rows is not None for meta in metadata): + num_rows = sum(meta.num_rows for meta in metadata) + else: + num_rows = None + + if all(meta.size_bytes is not None for meta in metadata): + size_bytes = sum(meta.size_bytes for meta in metadata) + else: + size_bytes = None + + schema = unify_block_metadata_schema(metadata) + + input_files = [] + for meta in metadata: + if meta.input_files is not None: + input_files.extend(meta.input_files) + + return BlockMetadata( + num_rows=num_rows, + size_bytes=size_bytes, + schema=schema, + input_files=input_files, + exec_stats=None, + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py new file mode 100644 index 0000000000000000000000000000000000000000..cee1930b788f9ddc8bcead1150168f635a374658 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/operators/write_operator.py @@ -0,0 +1,35 @@ +from typing import Any, Dict, Optional, Union + +from ray.data._internal.logical.interfaces import LogicalOperator +from ray.data._internal.logical.operators.map_operator import AbstractMap +from ray.data.datasource.datasink import Datasink +from ray.data.datasource.datasource import Datasource + + +class Write(AbstractMap): + """Logical operator for write.""" + + def __init__( + self, + input_op: LogicalOperator, + datasink_or_legacy_datasource: Union[Datasink, Datasource], + ray_remote_args: Optional[Dict[str, Any]] = None, + concurrency: Optional[int] = None, + **write_args, + ): + if isinstance(datasink_or_legacy_datasource, Datasink): + min_rows_per_bundled_input = ( + datasink_or_legacy_datasource.num_rows_per_write + ) + else: + min_rows_per_bundled_input = None + + super().__init__( + "Write", + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ) + self._datasink_or_legacy_datasource = datasink_or_legacy_datasource + self._write_args = write_args + self._concurrency = concurrency diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__init__.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92a769261403be41df443ab0a740505ecc934ffd --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__init__.py @@ -0,0 +1,4 @@ +from ray.data._internal.logical.rules.operator_fusion import OperatorFusionRule +from ray.data._internal.logical.rules.randomize_blocks import ReorderRandomizeBlocksRule + +__all__ = ["ReorderRandomizeBlocksRule", "OperatorFusionRule"] diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9aed432dc782e0b92bac37694fef57d5b125ed4a Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/__init__.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c6ecb0495c6bb73b9e2b44150d4e03c5edebc53 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_batch_format.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33a9def4435dbcb4a8c7dd9df17d7e565a3a4c33 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/inherit_target_max_block_size.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..888e08819f0dba30a4db33134f4c71bbbfb68c8e Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/limit_pushdown.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..354a388e1615fa38ae44a5e0d972fcc3b3a17687 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/operator_fusion.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0deef03a32b4a68993404086afb8f5b675b4ba7e Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/randomize_blocks.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc464c03079fe93fe3aaa6571f0b7f2eb3907257 Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/set_read_parallelism.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65807fbda221adbb76dbf05a98af4ba7aa6e2d2a Binary files /dev/null and b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/__pycache__/zero_copy_map_fusion.cpython-310.pyc differ diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_batch_format.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_batch_format.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd265cd08b119250d08dcf71691e1b3701fb08a --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_batch_format.py @@ -0,0 +1,42 @@ +from collections import deque +from typing import Iterable + +from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule +from ray.data._internal.logical.operators.all_to_all_operator import AbstractAllToAll +from ray.data._internal.logical.operators.map_operator import MapBatches + + +class InheritBatchFormatRule(Rule): + """For AbstractAllToAll based operator, apply this rule + to inherit batch_format from upstream operator by traversing + the entire DAG.""" + + def apply(self, plan: LogicalPlan) -> LogicalPlan: + optimized_dag: LogicalOperator = self._apply(plan.dag) + new_plan = LogicalPlan(dag=optimized_dag, context=plan.context) + return new_plan + + def _apply(self, op: LogicalOperator): + # Post-order traversal. + nodes: Iterable[LogicalOperator] = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + + if isinstance(current_op, AbstractAllToAll): + # traversal up the DAG until we find MapBatches with batch_format + # or we reach to source op and do nothing + upstream_op = current_op.input_dependencies[0] + while upstream_op.input_dependencies: + if ( + isinstance(upstream_op, MapBatches) + and upstream_op._batch_format + ): + current_op._batch_format = upstream_op._batch_format + break + upstream_op = upstream_op.input_dependencies[0] + + # just return the default op + return op diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py new file mode 100644 index 0000000000000000000000000000000000000000..298ff6c4edbff9cfab6ea14418dc61b81c93e1e8 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/inherit_target_max_block_size.py @@ -0,0 +1,30 @@ +from typing import Optional + +from ray.data._internal.execution.interfaces import PhysicalOperator +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule + + +class InheritTargetMaxBlockSizeRule(Rule): + """For each op that has overridden the default target max block size, + propagate to upstream ops until we reach an op that has also overridden the + target max block size.""" + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._propagate_target_max_block_size_to_upstream_ops(plan.dag) + return plan + + def _propagate_target_max_block_size_to_upstream_ops( + self, dag: PhysicalOperator, target_max_block_size: Optional[int] = None + ): + if dag.target_max_block_size is not None: + # Set the target block size to inherit for + # upstream ops. + target_max_block_size = dag.target_max_block_size + elif target_max_block_size is not None: + # Inherit from downstream op. + dag.set_target_max_block_size(target_max_block_size) + + for upstream_op in dag.input_dependencies: + self._propagate_target_max_block_size_to_upstream_ops( + upstream_op, target_max_block_size + ) diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py new file mode 100644 index 0000000000000000000000000000000000000000..a13378eb991eb6bdc7e389cf6842ae3df91e66a2 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/limit_pushdown.py @@ -0,0 +1,133 @@ +import copy +from collections import deque +from typing import Iterable, List + +from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule +from ray.data._internal.logical.operators.one_to_one_operator import ( + AbstractOneToOne, + Limit, +) +from ray.data._internal.logical.operators.read_operator import Read + + +class LimitPushdownRule(Rule): + """Rule for pushing down the limit operator. + + When a limit operator is present, we apply the limit on the + most upstream operator that supports it. Notably, we move the + Limit operator downstream from Read op, any other non-OneToOne operator, + or any operator which could potentially change the number of output rows. + + In addition, we also fuse consecutive Limit operators into a single + Limit operator, i.e. `Limit[n] -> Limit[m]` becomes `Limit[min(n, m)]`. + """ + + def apply(self, plan: LogicalPlan) -> LogicalPlan: + optimized_dag = self._apply_limit_pushdown(plan.dag) + optimized_dag = self._apply_limit_fusion(optimized_dag) + return LogicalPlan(dag=optimized_dag, context=plan.context) + + def _apply_limit_pushdown(self, op: LogicalOperator) -> LogicalOperator: + """Given a DAG of LogicalOperators, traverse the DAG and push down + Limit operators, i.e. move Limit operators as far upstream as possible. + + Returns a new LogicalOperator with the Limit operators pushed down.""" + # Post-order traversal. + nodes: Iterable[LogicalOperator] = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + + # If we encounter a Limit op, move it upstream until it reaches: + # - Read operator + # - A non-AbstractOneToOne operator (e.g. AbstractAllToAll) + # - An AbstractOneToOne operator that could change the number of output rows + + # TODO(scottjlee): in our current abstraction, we have Read extend + # AbstractMap (with no input dependency), which extends AbstractOneToOne. + # So we have to explicitly separate the Read op in its own check. + # We should remove this case once we refactor Read op to no longer + # be an AbstractOneToOne op. + if isinstance(current_op, Limit): + limit_op_copy = copy.copy(current_op) + + # Traverse up the DAG until we reach the first operator that meets + # one of the conditions above, which will serve as the new input + # into the Limit operator. + new_input_into_limit = current_op.input_dependency + ops_between_new_input_and_limit: List[LogicalOperator] = [] + while ( + isinstance(new_input_into_limit, AbstractOneToOne) + and not isinstance(new_input_into_limit, Read) + and not getattr(new_input_into_limit, "can_modify_num_rows", False) + ): + new_input_into_limit_copy = copy.copy(new_input_into_limit) + ops_between_new_input_and_limit.append(new_input_into_limit_copy) + new_input_into_limit = new_input_into_limit.input_dependency + + # Link the Limit operator and its newly designated input op from above. + limit_op_copy._input_dependencies = [new_input_into_limit] + new_input_into_limit._output_dependencies = [limit_op_copy] + + # Build the chain of operator dependencies between the new + # input and the Limit operator, using copies of traversed operators. + ops_between_new_input_and_limit.append(limit_op_copy) + for idx in range(len(ops_between_new_input_and_limit) - 1): + curr_op, up_op = ( + ops_between_new_input_and_limit[idx], + ops_between_new_input_and_limit[idx + 1], + ) + curr_op._input_dependencies = [up_op] + up_op._output_dependencies = [curr_op] + # Add the copied operator to the list of nodes to be traversed. + nodes.append(curr_op) + + # Link the Limit operator to its new input operator. + for limit_output_op in current_op.output_dependencies: + limit_output_op._input_dependencies = [ + ops_between_new_input_and_limit[0] + ] + last_op = ops_between_new_input_and_limit[0] + last_op._output_dependencies = current_op.output_dependencies + + return current_op + + def _apply_limit_fusion(self, op: LogicalOperator) -> LogicalOperator: + """Given a DAG of LogicalOperators, traverse the DAG and fuse all + back-to-back Limit operators, i.e. + Limit[n] -> Limit[m] becomes Limit[min(n, m)]. + + Returns a new LogicalOperator with the Limit operators fusion applied.""" + + # Post-order traversal. + nodes: Iterable[LogicalOperator] = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + + # If we encounter two back-to-back Limit operators, fuse them. + if isinstance(current_op, Limit): + upstream_op = current_op.input_dependency + if isinstance(upstream_op, Limit): + new_limit = min(current_op._limit, upstream_op._limit) + fused_limit_op = Limit(upstream_op.input_dependency, new_limit) + + # Link the fused Limit operator to its input and output ops, i.e.: + # `upstream_input -> limit_upstream -> limit_downstream -> downstream_output` # noqa: E501 + # becomes `upstream_input -> fused_limit -> downstream_output` + fused_limit_op._input_dependencies = upstream_op.input_dependencies + fused_limit_op._output_dependencies = current_op.output_dependencies + + # Replace occurrences of the upstream Limit operator in + # output_dependencies with the newly fused Limit operator. + upstream_input = upstream_op.input_dependency + upstream_input._output_dependencies = [fused_limit_op] + + for current_output in current_op.output_dependencies: + current_output._input_dependencies = [fused_limit_op] + nodes.append(fused_limit_op) + return current_op diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..fef82a3b1f935a2b989c199c9888cc69abe12ec5 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/operator_fusion.py @@ -0,0 +1,464 @@ +from typing import List, Optional, Tuple + +# TODO(Clark): Remove compute dependency once we delete the legacy compute. +from ray.data._internal.compute import get_compute, is_task_compute +from ray.data._internal.execution.interfaces import ( + PhysicalOperator, + RefBundle, + TaskContext, +) +from ray.data._internal.execution.operators.actor_pool_map_operator import ( + ActorPoolMapOperator, +) +from ray.data._internal.execution.operators.base_physical_operator import ( + AllToAllOperator, +) +from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.execution.operators.task_pool_map_operator import ( + TaskPoolMapOperator, +) +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule +from ray.data._internal.logical.operators.all_to_all_operator import ( + AbstractAllToAll, + RandomShuffle, + Repartition, +) +from ray.data._internal.logical.operators.map_operator import AbstractUDFMap +from ray.data._internal.stats import StatsDict +from ray.data.context import DataContext + +# Scheduling strategy can be inherited from upstream operator if not specified. +INHERITABLE_REMOTE_ARGS = ["scheduling_strategy"] + + +class OperatorFusionRule(Rule): + """Fuses linear chains of compatible physical operators.""" + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._op_map = plan.op_map.copy() + # Do DFS fusion on compatible pairwise operators in two passes. + # In the first pass, only fuse back-to-back map operators together. + fused_dag = self._fuse_map_operators_in_dag(plan.dag) + + # Now that we have fused together all back-to-back map operators, + # we fuse together MapOperator -> AllToAllOperator pairs. + fused_dag = self._fuse_all_to_all_operators_in_dag(fused_dag) + + # Update output dependencies after fusion. + # TODO(hchen): Instead of updating the depdencies manually, + # we need a better abstraction for manipulating the DAG. + self._remove_output_depes(fused_dag) + self._update_output_depes(fused_dag) + + new_plan = PhysicalPlan(fused_dag, self._op_map, plan.context) + return new_plan + + def _remove_output_depes(self, op: PhysicalOperator) -> None: + for input in op._input_dependencies: + input._output_dependencies = [] + self._remove_output_depes(input) + + def _update_output_depes(self, op: PhysicalOperator) -> None: + for input in op._input_dependencies: + input._output_dependencies.append(op) + self._update_output_depes(input) + + def _fuse_map_operators_in_dag(self, dag: PhysicalOperator) -> MapOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> MapOperator pairs. + Returns the current (root) operator after completing upstream operator fusions. + """ + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, MapOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): + # Fuse operator with its upstream op. + dag = self._get_fused_map_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing back-to-back map operators together here, + # move up the DAG to find the next map operators to fuse. + dag._input_dependencies = [ + self._fuse_map_operators_in_dag(upstream_op) for upstream_op in upstream_ops + ] + return dag + + def _fuse_all_to_all_operators_in_dag( + self, dag: AllToAllOperator + ) -> AllToAllOperator: + """Starting at the given operator, traverses up the DAG of operators + and recursively fuses compatible MapOperator -> AllToAllOperator pairs. + + Also, sets the target block size of the immediately upstream map op to + match the shuffle block size. We use a larger block size for shuffles + because tiny blocks are bad for I/O performance. + + Returns the current (root) operator after completing upstream operator fusions. + """ + upstream_ops = dag.input_dependencies + while ( + len(upstream_ops) == 1 + and isinstance(dag, AllToAllOperator) + and isinstance(upstream_ops[0], MapOperator) + and self._can_fuse(dag, upstream_ops[0]) + ): + # Fuse operator with its upstream op. + dag = self._get_fused_all_to_all_operator(dag, upstream_ops[0]) + upstream_ops = dag.input_dependencies + + # Done fusing MapOperator -> AllToAllOperator together here, + # move up the DAG to find the next pair of operators to fuse. + dag._input_dependencies = [ + self._fuse_all_to_all_operators_in_dag(upstream_op) + for upstream_op in upstream_ops + ] + return dag + + def _can_fuse(self, down_op: PhysicalOperator, up_op: PhysicalOperator) -> bool: + """Returns whether the provided downstream operator can be fused with the given + upstream operator. + + We currently support fusing two operators if the following are all true: + * We are fusing either MapOperator -> MapOperator or + MapOperator -> AllToAllOperator. + * They either use the same compute configuration, or the upstream operator + uses a task pool while the downstream operator uses an actor pool. + * If both operators involve callable classes, the callable classes are + the same class AND constructor args are the same for both. + * They have compatible remote arguments. + """ + from ray.data._internal.logical.operators.map_operator import ( + AbstractMap, + AbstractUDFMap, + ) + + if not up_op.supports_fusion() or not down_op.supports_fusion(): + return False + + # We currently only support fusing for the following cases: + # - TaskPoolMapOperator -> TaskPoolMapOperator/ActorPoolMapOperator + # - TaskPoolMapOperator -> AllToAllOperator + # (only RandomShuffle and Repartition LogicalOperators are currently supported) + if not ( + ( + isinstance(up_op, TaskPoolMapOperator) + and isinstance(down_op, (TaskPoolMapOperator, ActorPoolMapOperator)) + ) + or ( + isinstance(up_op, TaskPoolMapOperator) + and isinstance(down_op, AllToAllOperator) + ) + ): + return False + + down_logical_op = self._op_map[down_op] + up_logical_op = self._op_map[up_op] + + if up_op.get_additional_split_factor() > 1: + return False + + # If the downstream operator takes no input, it cannot be fused with + # the upstream operator. + if not down_logical_op._input_dependencies: + return False + + # We currently only support fusing for the following cases: + # - AbstractMap -> AbstractMap + # - AbstractMap -> RandomShuffle + # - AbstractMap -> Repartition (shuffle=True) + if not ( + ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, AbstractMap) + ) + or ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, RandomShuffle) + ) + or ( + isinstance(up_logical_op, AbstractMap) + and isinstance(down_logical_op, Repartition) + ) + ): + return False + + # Do not fuse Repartition operator if shuffle is disabled + # (i.e. using split shuffle). + if isinstance(down_logical_op, Repartition) and not down_logical_op._shuffle: + return False + + if isinstance(down_logical_op, AbstractUDFMap) and isinstance( + up_logical_op, AbstractUDFMap + ): + # Allow fusing tasks->actors if the resources are compatible (read->map), + # but not the other way around. The latter (downstream op) will be used as + # the compute if fused. + if is_task_compute(down_logical_op._compute) and get_compute( + up_logical_op._compute + ) != get_compute(down_logical_op._compute): + return False + + # Only fuse if the ops' remote arguments are compatible. + if not _are_remote_args_compatible( + getattr(up_logical_op, "_ray_remote_args", {}), + getattr(down_logical_op, "_ray_remote_args", {}), + ): + return False + + # Do not fuse if either op specifies a `_ray_remote_args_fn`, + # since it is not known whether the generated args will be compatible. + if getattr(up_logical_op, "_ray_remote_args_fn", None) or getattr( + down_logical_op, "_ray_remote_args_fn", None + ): + return False + + if not self._can_merge_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ): + return False + + # Otherwise, ops are compatible for fusion. + return True + + def _can_merge_target_max_block_size( + self, + up_target_max_block_size: Optional[int], + down_target_max_block_size: Optional[int], + ): + # If the upstream op overrode the target max block size, only fuse if + # they are equal. + if up_target_max_block_size is not None: + if down_target_max_block_size is None: + down_target_max_block_size = ( + DataContext.get_current().target_max_block_size + ) + if up_target_max_block_size != down_target_max_block_size: + return False + return True + + def _get_merged_target_max_block_size( + self, + up_target_max_block_size: Optional[int], + down_target_max_block_size: Optional[int], + ): + if up_target_max_block_size is not None: + # If the upstream op overrode the target max block size, we can + # only merge if the downstream op matches or uses the default. + assert ( + down_target_max_block_size is None + or down_target_max_block_size == up_target_max_block_size + ) + return up_target_max_block_size + else: + # Upstream op inherits the downstream op's target max block size, + # because the downstream op is the one that outputs the final + # blocks. + return down_target_max_block_size + + def _get_fused_map_operator( + self, down_op: MapOperator, up_op: MapOperator + ) -> MapOperator: + from ray.data._internal.logical.operators.map_operator import AbstractMap + + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator->MapOperator, but received: " + f"{type(up_op).__name__} -> {type(down_op).__name__}" + ) + + # Fuse operator names. + name = up_op.name + "->" + down_op.name + + down_logical_op = self._op_map.pop(down_op) + up_logical_op = self._op_map.pop(up_op) + + # Merge minimum block sizes. + down_min_rows_per_bundled_input = ( + down_logical_op._min_rows_per_bundled_input + if isinstance(down_logical_op, AbstractMap) + else None + ) + up_min_rows_per_bundled_input = ( + up_logical_op._min_rows_per_bundled_input + if isinstance(up_logical_op, AbstractMap) + else None + ) + if ( + down_min_rows_per_bundled_input is not None + and up_min_rows_per_bundled_input is not None + ): + min_rows_per_bundled_input = max( + down_min_rows_per_bundled_input, up_min_rows_per_bundled_input + ) + elif up_min_rows_per_bundled_input is not None: + min_rows_per_bundled_input = up_min_rows_per_bundled_input + else: + min_rows_per_bundled_input = down_min_rows_per_bundled_input + + target_max_block_size = self._get_merged_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ) + + # We take the downstream op's compute in case we're fusing upstream tasks with a + # downstream actor pool (e.g. read->map). + compute = None + if isinstance(down_logical_op, AbstractUDFMap): + compute = get_compute(down_logical_op._compute) + ray_remote_args = up_logical_op._ray_remote_args + ray_remote_args_fn = ( + up_logical_op._ray_remote_args_fn or down_logical_op._ray_remote_args_fn + ) + # Make the upstream operator's inputs the new, fused operator's inputs. + input_deps = up_op.input_dependencies + assert len(input_deps) == 1 + input_op = input_deps[0] + + # Fused physical map operator. + op = MapOperator.create( + up_op.get_map_transformer().fuse(down_op.get_map_transformer()), + input_op, + target_max_block_size=target_max_block_size, + name=name, + compute_strategy=compute, + min_rows_per_bundle=min_rows_per_bundled_input, + ray_remote_args=ray_remote_args, + ray_remote_args_fn=ray_remote_args_fn, + ) + op.set_logical_operators(*up_op._logical_operators, *down_op._logical_operators) + + # Build a map logical operator to be used as a reference for further fusion. + # TODO(Scott): This is hacky, remove this once we push fusion to be purely based + # on a lower-level operator spec. + if isinstance(up_logical_op, AbstractUDFMap): + input_op = up_logical_op.input_dependency + else: + # Bottom out at the source logical op (e.g. Read()). + input_op = up_logical_op + if isinstance(down_logical_op, AbstractUDFMap): + logical_op = AbstractUDFMap( + name, + input_op, + down_logical_op._fn, + down_logical_op._fn_args, + down_logical_op._fn_kwargs, + down_logical_op._fn_constructor_args, + down_logical_op._fn_constructor_kwargs, + min_rows_per_bundled_input, + compute, + ray_remote_args_fn, + ray_remote_args, + ) + else: + from ray.data._internal.logical.operators.map_operator import AbstractMap + + # The downstream op is AbstractMap instead of AbstractUDFMap. + logical_op = AbstractMap( + name, + input_op, + min_rows_per_bundled_input=min_rows_per_bundled_input, + ray_remote_args_fn=ray_remote_args_fn, + ray_remote_args=ray_remote_args, + ) + self._op_map[op] = logical_op + # Return the fused physical operator. + return op + + def _get_fused_all_to_all_operator( + self, down_op: AllToAllOperator, up_op: MapOperator + ) -> AllToAllOperator: + assert self._can_fuse(down_op, up_op), ( + "Current rule supports fusing MapOperator -> AllToAllOperator" + f", but received: {type(up_op).__name__} -> {type(down_op).__name__}" + ) + + # Fuse operator names. + name = up_op.name + "->" + down_op.name + + down_logical_op: AbstractAllToAll = self._op_map.pop(down_op) + up_logical_op: AbstractUDFMap = self._op_map.pop(up_op) + + # Fuse transformation functions. + ray_remote_args = up_logical_op._ray_remote_args + down_transform_fn = down_op.get_transformation_fn() + up_map_transformer = up_op.get_map_transformer() + + def fused_all_to_all_transform_fn( + blocks: List[RefBundle], ctx: TaskContext + ) -> Tuple[List[RefBundle], StatsDict]: + """To fuse MapOperator->AllToAllOperator, we store the map function + in the TaskContext so that it may be used by the downstream + AllToAllOperator's transform function.""" + ctx.upstream_map_transformer = up_map_transformer + ctx.upstream_map_ray_remote_args = ray_remote_args + return down_transform_fn(blocks, ctx) + + # Make the upstream operator's inputs the new, fused operator's inputs. + input_deps = up_op.input_dependencies + assert len(input_deps) == 1 + input_op = input_deps[0] + + target_max_block_size = self._get_merged_target_max_block_size( + up_op.target_max_block_size, down_op.target_max_block_size + ) + + op = AllToAllOperator( + fused_all_to_all_transform_fn, + input_op, + target_max_block_size=target_max_block_size, + num_outputs=down_op._num_outputs, + # Transfer over the existing sub-progress bars from + # the AllToAllOperator (if any) into the fused operator. + sub_progress_bar_names=down_op._sub_progress_bar_names, + name=name, + ) + # Bottom out at the source logical op (e.g. Read()). + input_op = up_logical_op + + if isinstance(down_logical_op, RandomShuffle): + logical_op = RandomShuffle( + input_op, + name=name, + ray_remote_args=ray_remote_args, + ) + elif isinstance(down_logical_op, Repartition): + logical_op = Repartition( + input_op, + num_outputs=down_logical_op._num_outputs, + shuffle=down_logical_op._shuffle, + ) + self._op_map[op] = logical_op + # Return the fused physical operator. + return op + + +def _are_remote_args_compatible(prev_args, next_args): + """Check if Ray remote arguments are compatible for merging.""" + prev_args = _canonicalize(prev_args) + next_args = _canonicalize(next_args) + remote_args = next_args.copy() + for key in INHERITABLE_REMOTE_ARGS: + # NOTE: We only carry over inheritable value in case + # of it not being provided in the remote args + if key in prev_args and key not in remote_args: + remote_args[key] = prev_args[key] + + if prev_args != remote_args: + return False + return True + + +def _canonicalize(remote_args: dict) -> dict: + """Returns canonical form of given remote args.""" + remote_args = remote_args.copy() + if "num_cpus" not in remote_args or remote_args["num_cpus"] is None: + remote_args["num_cpus"] = 1 + if "num_gpus" not in remote_args or remote_args["num_gpus"] is None: + remote_args["num_gpus"] = 0 + resources = remote_args.get("resources", {}) + for k, v in list(resources.items()): + if v is None or v == 0.0: + del resources[k] + remote_args["resources"] = resources + return remote_args diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..8810217258ab5b55e3d83eecd41c8e6c2fe4f99f --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/randomize_blocks.py @@ -0,0 +1,77 @@ +import copy +from collections import deque + +from ray.data._internal.logical.interfaces import LogicalOperator, LogicalPlan, Rule +from ray.data._internal.logical.operators.all_to_all_operator import ( + AbstractAllToAll, + RandomizeBlocks, +) + + +class ReorderRandomizeBlocksRule(Rule): + """Rule for reordering RandomizeBlocks logical operator. + + Reordering RandomizeBlocks operators is to help fuse multiple + AbstractUDFMap operators together for better performance. + + 1. Dedupes multiple RandomizeBlocks operators if they are not seeded. + 2. Moves RandomizeBlocks operator to the end of a sequence of AbstractUDFMap + operators. RandomizeBlocks operators are not moved across AbstractAllToAll operator + boundaries. + """ + + def apply(self, plan: LogicalPlan) -> LogicalPlan: + optimized_dag: LogicalOperator = self._apply(plan.dag) + new_plan = LogicalPlan(dag=optimized_dag, context=plan.context) + return new_plan + + def _apply(self, op: LogicalOperator) -> LogicalOperator: + operators = [] + + # Post-order traversal. + nodes = deque() + for node in op.post_order_iter(): + nodes.appendleft(node) + + while len(nodes) > 0: + current_op = nodes.pop() + upstream_ops = current_op.input_dependencies + + # Iterate through all upstream ops, and remove all RandomizeBlocks + # operators. + for i in range(len(upstream_ops)): + if isinstance(upstream_ops[i], RandomizeBlocks): + # If no seeds are provided, then collapse into a single + # RandomizeBlocks operator. + current_seed = upstream_ops[i]._seed + if not operators or current_seed or operators[-1]._seed: + # We need to make a copy of the operator. + # Because the operator instance may be shared by multiple + # Datasets. We shouldn't modify it in place. + operators.append(copy.copy(upstream_ops[i])) + + # Remove RandomizeBlocks operator from the dag and wire in new input + # dependencies. + assert len(upstream_ops[i].input_dependencies) == 1 + upstream_ops[i] = upstream_ops[i].input_dependencies[0] + if isinstance(current_op, AbstractAllToAll) and not isinstance( + current_op, RandomizeBlocks + ): + # If this operator is a an AllToAll Operator, then insert + # RandomizeBlocks right before this operator rather than the end of the + # DAG. + # All-to-all operators can have only 1 input operator. + assert len(upstream_ops) == 1 + input_op = upstream_ops[0] + for random_op in operators: + random_op._input_dependencies = [input_op] + input_op = random_op + upstream_ops[0] = input_op + operators = [] + + # Add RandomizeBlocks operator as the last operator in the DAG if necessary. + for random_op in operators: + random_op._input_dependencies = [op] + op = random_op + + return op diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py new file mode 100644 index 0000000000000000000000000000000000000000..0f9bb1b56ada4375eec9061dc47d30cd1862bd98 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/set_read_parallelism.py @@ -0,0 +1,132 @@ +import logging +import math +from typing import Optional, Tuple, Union + +from ray import available_resources as ray_available_resources +from ray.data._internal.execution.interfaces import PhysicalOperator +from ray.data._internal.execution.operators.input_data_buffer import InputDataBuffer +from ray.data._internal.logical.interfaces import PhysicalPlan, Rule +from ray.data._internal.logical.operators.read_operator import Read +from ray.data._internal.util import _autodetect_parallelism +from ray.data.context import WARN_PREFIX, DataContext +from ray.data.datasource.datasource import Datasource, Reader + +logger = logging.getLogger(__name__) + + +def compute_additional_split_factor( + datasource_or_legacy_reader: Union[Datasource, Reader], + parallelism: int, + mem_size: int, + target_max_block_size: int, + cur_additional_split_factor: Optional[int] = None, +) -> Tuple[int, str, int, Optional[int]]: + ctx = DataContext.get_current() + detected_parallelism, reason, _ = _autodetect_parallelism( + parallelism, target_max_block_size, ctx, datasource_or_legacy_reader, mem_size + ) + num_read_tasks = len( + datasource_or_legacy_reader.get_read_tasks(detected_parallelism) + ) + expected_block_size = None + if mem_size: + expected_block_size = mem_size / num_read_tasks + logger.debug( + f"Expected in-memory size {mem_size}," f" block size {expected_block_size}" + ) + size_based_splits = round(max(1, expected_block_size / target_max_block_size)) + else: + size_based_splits = 1 + if cur_additional_split_factor: + size_based_splits *= cur_additional_split_factor + logger.debug(f"Size based split factor {size_based_splits}") + estimated_num_blocks = num_read_tasks * size_based_splits + logger.debug(f"Blocks after size splits {estimated_num_blocks}") + + available_cpu_slots = ray_available_resources().get("CPU", 1) + if ( + parallelism != -1 + and num_read_tasks >= available_cpu_slots * 4 + and num_read_tasks >= 5000 + ): + logger.warning( + f"{WARN_PREFIX} The requested number of read blocks of {parallelism} " + "is more than 4x the number of available CPU slots in the cluster of " + f"{available_cpu_slots}. This can " + "lead to slowdowns during the data reading phase due to excessive " + "task creation. Reduce the value to match with the available " + "CPU slots in the cluster, or set override_num_blocks to -1 for Ray Data " + "to automatically determine the number of read tasks blocks." + "You can ignore this message if the cluster is expected to autoscale." + ) + + # Add more output splitting for each read task if needed. + # TODO(swang): For parallelism=-1 (user did not explicitly set + # parallelism), and if the following operator produces much larger blocks, + # we should scale down the target max block size here instead of using + # splitting, which can have higher memory usage. + if estimated_num_blocks < detected_parallelism and estimated_num_blocks > 0: + k = math.ceil(detected_parallelism / estimated_num_blocks) + estimated_num_blocks = estimated_num_blocks * k + return detected_parallelism, reason, estimated_num_blocks, k + + return detected_parallelism, reason, estimated_num_blocks, None + + +class SetReadParallelismRule(Rule): + """ + This rule sets the read op's task parallelism based on the target block + size, the requested parallelism, the number of read files, and the + available resources in the cluster. + + If the parallelism is lower than requested, this rule also sets a split + factor to split the output blocks of the read task, so that the following + operator will have the desired parallelism. + """ + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + ops = [plan.dag] + + while len(ops) > 0: + op = ops.pop(0) + if isinstance(op, InputDataBuffer): + continue + logical_op = plan.op_map[op] + if isinstance(logical_op, Read): + self._apply(op, logical_op) + ops += op.input_dependencies + + return plan + + def _apply(self, op: PhysicalOperator, logical_op: Read): + ( + detected_parallelism, + reason, + estimated_num_blocks, + k, + ) = compute_additional_split_factor( + logical_op._datasource_or_legacy_reader, + logical_op._parallelism, + logical_op._mem_size, + op.actual_target_max_block_size, + op._additional_split_factor, + ) + + if logical_op._parallelism == -1: + assert reason != "" + logger.debug( + f"Using autodetected parallelism={detected_parallelism} " + f"for operator {logical_op.name} to satisfy {reason}." + ) + logical_op.set_detected_parallelism(detected_parallelism) + + if k is not None: + logger.debug( + f"To satisfy the requested parallelism of {detected_parallelism}, " + f"each read task output is split into {k} smaller blocks." + ) + + if k is not None: + op.set_additional_split_factor(k) + + logger.debug(f"Estimated num output blocks {estimated_num_blocks}") diff --git a/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py new file mode 100644 index 0000000000000000000000000000000000000000..6495f64f10a49e781c75b1b5061ba78e66715ad2 --- /dev/null +++ b/minigpt2/lib/python3.10/site-packages/ray/data/_internal/logical/rules/zero_copy_map_fusion.py @@ -0,0 +1,88 @@ +from abc import abstractmethod +from typing import List + +from ray.data._internal.execution.operators.map_operator import MapOperator +from ray.data._internal.execution.operators.map_transformer import ( + BuildOutputBlocksMapTransformFn, + MapTransformFn, + MapTransformFnDataType, +) +from ray.data._internal.logical.interfaces.optimizer import Rule +from ray.data._internal.logical.interfaces.physical_plan import PhysicalPlan + + +class ZeroCopyMapFusionRule(Rule): + """Base abstract class for all zero-copy map fusion rules. + + A zero-copy map fusion rule is a rule that optimizes the transform_fn chain of + a fused MapOperator. The optimization is usually done by removing unnecessary + data conversions. + + This base abstract class defines the common util functions. And subclasses + should implement the `_optimize` method for the concrete optimization + strategy. + """ + + def apply(self, plan: PhysicalPlan) -> PhysicalPlan: + self._traverse(plan.dag) + return plan + + def _traverse(self, op): + """Traverse the DAG and apply the optimization to each MapOperator.""" + if isinstance(op, MapOperator): + map_transformer = op.get_map_transformer() + transform_fns = map_transformer.get_transform_fns() + new_transform_fns = self._optimize(transform_fns) + # Physical operators won't be shared, + # so it's safe to modify the transform_fns in place. + map_transformer.set_transform_fns(new_transform_fns) + + for input_op in op.input_dependencies: + self._traverse(input_op) + + @abstractmethod + def _optimize(self, transform_fns: List[MapTransformFn]) -> List[MapTransformFn]: + """Optimize the transform_fns chain of a MapOperator. + + Args: + transform_fns: The old transform_fns chain. + Returns: + The optimized transform_fns chain. + """ + ... + + +class EliminateBuildOutputBlocks(ZeroCopyMapFusionRule): + """This rule eliminates unnecessary BuildOutputBlocksMapTransformFn, + if the previous fn already outputs blocks. + + This happens for the "Read -> Map/Write" fusion. + """ + + def _optimize(self, transform_fns: List[MapTransformFn]) -> List[MapTransformFn]: + # For the following subsquence, + # 1. Any MapTransformFn with block output. + # 2. BuildOutputBlocksMapTransformFn + # 3. Any MapTransformFn with block input. + # We drop the BuildOutputBlocksMapTransformFn in the middle. + new_transform_fns = [] + + for i in range(len(transform_fns)): + cur_fn = transform_fns[i] + drop = False + if ( + i > 0 + and i < len(transform_fns) - 1 + and isinstance(cur_fn, BuildOutputBlocksMapTransformFn) + ): + prev_fn = transform_fns[i - 1] + next_fn = transform_fns[i + 1] + if ( + prev_fn.output_type == MapTransformFnDataType.Block + and next_fn.input_type == MapTransformFnDataType.Block + ): + drop = True + if not drop: + new_transform_fns.append(cur_fn) + + return new_transform_fns