Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/filelock.py +46 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/mlflow.py +342 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/usage.py +257 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__pycache__/custom_trainer.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/custom_trainer.py +61 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__init__.py +5 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/actor_manager.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/barrier.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/event_manager.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/tracked_actor.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/tracked_actor_task.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/actor_manager.py +894 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/barrier.py +93 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/event_manager.py +148 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/tracked_actor.py +54 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/tracked_actor_task.py +42 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/__pycache__/placement_group.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/resource_manager.py +155 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/comet.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/keras.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/mlflow.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/wandb.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/comet.py +260 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/keras.py +185 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/mlflow.py +325 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/wandb.py +750 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__init__.py +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/data_batch_conversion.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/node.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/torch_dist.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/transform_pyarrow.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/check_ingest.py +201 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/data_batch_conversion.py +353 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/node.py +69 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/__init__.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/arrow.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/pandas.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/arrow.py +119 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/pandas.py +118 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/utils.cpython-310.pyc +0 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/torch_dist.py +191 -0
- infer_4_37_2/lib/python3.10/site-packages/ray/air/util/transform_pyarrow.py +39 -0
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/filelock.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import hashlib
|
| 2 |
+
import os
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from filelock import FileLock
|
| 6 |
+
|
| 7 |
+
import ray
|
| 8 |
+
|
| 9 |
+
RAY_LOCKFILE_DIR = "_ray_lockfiles"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TempFileLock:
|
| 13 |
+
"""FileLock wrapper that uses temporary file locks.
|
| 14 |
+
|
| 15 |
+
The temporary directory that these locks are saved to can be configured via
|
| 16 |
+
the `RAY_TMPDIR` environment variable.
|
| 17 |
+
|
| 18 |
+
Args:
|
| 19 |
+
path: The file path that this temporary file lock is used for.
|
| 20 |
+
This will be used to generate the lockfile filename.
|
| 21 |
+
Ex: For concurrent writes to a file, this is the common filepath
|
| 22 |
+
that multiple processes are writing to.
|
| 23 |
+
**kwargs: Additional keyword arguments to pass to the underlying `FileLock`.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, path: str, **kwargs):
|
| 27 |
+
self.path = path
|
| 28 |
+
temp_dir = Path(ray._private.utils.get_user_temp_dir()).resolve()
|
| 29 |
+
self._lock_dir = temp_dir / RAY_LOCKFILE_DIR
|
| 30 |
+
self._path_hash = hashlib.sha1(
|
| 31 |
+
str(Path(self.path).resolve()).encode("utf-8")
|
| 32 |
+
).hexdigest()
|
| 33 |
+
self._lock_path = self._lock_dir / f"{self._path_hash}.lock"
|
| 34 |
+
|
| 35 |
+
os.makedirs(str(self._lock_dir), exist_ok=True)
|
| 36 |
+
self._lock = FileLock(self._lock_path, **kwargs)
|
| 37 |
+
|
| 38 |
+
def __enter__(self):
|
| 39 |
+
self._lock.acquire()
|
| 40 |
+
return self
|
| 41 |
+
|
| 42 |
+
def __exit__(self, type, value, traceback):
|
| 43 |
+
self._lock.release()
|
| 44 |
+
|
| 45 |
+
def __getattr__(self, name):
|
| 46 |
+
return getattr(self._lock, name)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/mlflow.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from copy import deepcopy
|
| 4 |
+
from typing import TYPE_CHECKING, Dict, Optional
|
| 5 |
+
|
| 6 |
+
from packaging import version
|
| 7 |
+
|
| 8 |
+
from ray._private.dict import flatten_dict
|
| 9 |
+
|
| 10 |
+
if TYPE_CHECKING:
|
| 11 |
+
from mlflow.entities import Run
|
| 12 |
+
from mlflow.tracking import MlflowClient
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class _MLflowLoggerUtil:
|
| 18 |
+
"""Util class for setting up and logging to MLflow.
|
| 19 |
+
|
| 20 |
+
Use this util for any library that needs MLflow logging/tracking logic
|
| 21 |
+
such as Ray Tune or Ray Train.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
def __init__(self):
|
| 25 |
+
import mlflow
|
| 26 |
+
|
| 27 |
+
self._mlflow = mlflow
|
| 28 |
+
self.experiment_id = None
|
| 29 |
+
|
| 30 |
+
def __deepcopy__(self, memo=None):
|
| 31 |
+
# mlflow is a module, and thus cannot be copied
|
| 32 |
+
_mlflow = self._mlflow
|
| 33 |
+
self.__dict__.pop("_mlflow")
|
| 34 |
+
dict_copy = deepcopy(self.__dict__, memo)
|
| 35 |
+
copied_object = _MLflowLoggerUtil()
|
| 36 |
+
copied_object.__dict__.update(dict_copy)
|
| 37 |
+
self._mlflow = _mlflow
|
| 38 |
+
copied_object._mlflow = _mlflow
|
| 39 |
+
return copied_object
|
| 40 |
+
|
| 41 |
+
def setup_mlflow(
|
| 42 |
+
self,
|
| 43 |
+
tracking_uri: Optional[str] = None,
|
| 44 |
+
registry_uri: Optional[str] = None,
|
| 45 |
+
experiment_id: Optional[str] = None,
|
| 46 |
+
experiment_name: Optional[str] = None,
|
| 47 |
+
tracking_token: Optional[str] = None,
|
| 48 |
+
artifact_location: Optional[str] = None,
|
| 49 |
+
create_experiment_if_not_exists: bool = True,
|
| 50 |
+
):
|
| 51 |
+
"""
|
| 52 |
+
Sets up MLflow.
|
| 53 |
+
|
| 54 |
+
Sets the Mlflow tracking uri & token, and registry URI. Also sets
|
| 55 |
+
the MLflow experiment that the logger should use, and possibly
|
| 56 |
+
creates new experiment if it does not exist.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
tracking_uri: The tracking URI for the MLflow tracking
|
| 60 |
+
server.
|
| 61 |
+
registry_uri: The registry URI for the MLflow model registry.
|
| 62 |
+
experiment_id: The id of an already existing MLflow
|
| 63 |
+
experiment to use for logging. If None is passed in
|
| 64 |
+
here and the MFLOW_EXPERIMENT_ID is not set, or the
|
| 65 |
+
experiment with this id does not exist,
|
| 66 |
+
``experiment_name`` will be used instead. This argument takes
|
| 67 |
+
precedence over ``experiment_name`` if both are passed in.
|
| 68 |
+
experiment_name: The experiment name to use for logging.
|
| 69 |
+
If None is passed in here, the MLFLOW_EXPERIMENT_NAME environment
|
| 70 |
+
variable is used to determine the experiment name.
|
| 71 |
+
If the experiment with the name already exists with MLflow,
|
| 72 |
+
it will be reused. If not, a new experiment will be created
|
| 73 |
+
with the provided name if
|
| 74 |
+
``create_experiment_if_not_exists`` is set to True.
|
| 75 |
+
artifact_location: The location to store run artifacts.
|
| 76 |
+
If not provided, MLFlow picks an appropriate default.
|
| 77 |
+
Ignored if experiment already exists.
|
| 78 |
+
tracking_token: Tracking token used to authenticate with MLflow.
|
| 79 |
+
create_experiment_if_not_exists: Whether to create an
|
| 80 |
+
experiment with the provided name if it does not already
|
| 81 |
+
exist. Defaults to True.
|
| 82 |
+
|
| 83 |
+
Returns:
|
| 84 |
+
Whether setup is successful.
|
| 85 |
+
"""
|
| 86 |
+
if tracking_token:
|
| 87 |
+
os.environ["MLFLOW_TRACKING_TOKEN"] = tracking_token
|
| 88 |
+
|
| 89 |
+
self._mlflow.set_tracking_uri(tracking_uri)
|
| 90 |
+
self._mlflow.set_registry_uri(registry_uri)
|
| 91 |
+
|
| 92 |
+
# First check experiment_id.
|
| 93 |
+
experiment_id = (
|
| 94 |
+
experiment_id
|
| 95 |
+
if experiment_id is not None
|
| 96 |
+
else os.environ.get("MLFLOW_EXPERIMENT_ID")
|
| 97 |
+
)
|
| 98 |
+
if experiment_id is not None:
|
| 99 |
+
from mlflow.exceptions import MlflowException
|
| 100 |
+
|
| 101 |
+
try:
|
| 102 |
+
self._mlflow.get_experiment(experiment_id=experiment_id)
|
| 103 |
+
logger.debug(
|
| 104 |
+
f"Experiment with provided id {experiment_id} "
|
| 105 |
+
"exists. Setting that as the experiment."
|
| 106 |
+
)
|
| 107 |
+
self.experiment_id = experiment_id
|
| 108 |
+
return
|
| 109 |
+
except MlflowException:
|
| 110 |
+
pass
|
| 111 |
+
|
| 112 |
+
# Then check experiment_name.
|
| 113 |
+
experiment_name = (
|
| 114 |
+
experiment_name
|
| 115 |
+
if experiment_name is not None
|
| 116 |
+
else os.environ.get("MLFLOW_EXPERIMENT_NAME")
|
| 117 |
+
)
|
| 118 |
+
if experiment_name is not None and self._mlflow.get_experiment_by_name(
|
| 119 |
+
name=experiment_name
|
| 120 |
+
):
|
| 121 |
+
logger.debug(
|
| 122 |
+
f"Experiment with provided name {experiment_name} "
|
| 123 |
+
"exists. Setting that as the experiment."
|
| 124 |
+
)
|
| 125 |
+
self.experiment_id = self._mlflow.get_experiment_by_name(
|
| 126 |
+
experiment_name
|
| 127 |
+
).experiment_id
|
| 128 |
+
return
|
| 129 |
+
|
| 130 |
+
# An experiment with the provided id or name does not exist.
|
| 131 |
+
# Create a new experiment if applicable.
|
| 132 |
+
if experiment_name and create_experiment_if_not_exists:
|
| 133 |
+
logger.debug(
|
| 134 |
+
"Existing experiment not found. Creating new "
|
| 135 |
+
f"experiment with name: {experiment_name}"
|
| 136 |
+
)
|
| 137 |
+
self.experiment_id = self._mlflow.create_experiment(
|
| 138 |
+
name=experiment_name, artifact_location=artifact_location
|
| 139 |
+
)
|
| 140 |
+
return
|
| 141 |
+
|
| 142 |
+
if create_experiment_if_not_exists:
|
| 143 |
+
raise ValueError(
|
| 144 |
+
f"Experiment with the provided experiment_id: "
|
| 145 |
+
f"{experiment_id} does not exist and no "
|
| 146 |
+
f"experiment_name provided. At least one of "
|
| 147 |
+
f"these has to be provided."
|
| 148 |
+
)
|
| 149 |
+
else:
|
| 150 |
+
raise ValueError(
|
| 151 |
+
f"Experiment with the provided experiment_id: "
|
| 152 |
+
f"{experiment_id} or experiment_name: "
|
| 153 |
+
f"{experiment_name} does not exist. Please "
|
| 154 |
+
f"create an MLflow experiment and provide "
|
| 155 |
+
f"either its id or name."
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
def _parse_dict(self, dict_to_log: Dict) -> Dict:
|
| 159 |
+
"""Parses provided dict to convert all values to float.
|
| 160 |
+
|
| 161 |
+
MLflow can only log metrics that are floats. This does not apply to
|
| 162 |
+
logging parameters or artifacts.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
dict_to_log: The dictionary containing the metrics to log.
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
A dictionary containing the metrics to log with all values being
|
| 169 |
+
converted to floats, or skipped if not able to be converted.
|
| 170 |
+
"""
|
| 171 |
+
new_dict = {}
|
| 172 |
+
for key, value in dict_to_log.items():
|
| 173 |
+
try:
|
| 174 |
+
value = float(value)
|
| 175 |
+
new_dict[key] = value
|
| 176 |
+
except (ValueError, TypeError):
|
| 177 |
+
logger.debug(
|
| 178 |
+
"Cannot log key {} with value {} since the "
|
| 179 |
+
"value cannot be converted to float.".format(key, value)
|
| 180 |
+
)
|
| 181 |
+
continue
|
| 182 |
+
|
| 183 |
+
return new_dict
|
| 184 |
+
|
| 185 |
+
def start_run(
|
| 186 |
+
self,
|
| 187 |
+
run_name: Optional[str] = None,
|
| 188 |
+
tags: Optional[Dict] = None,
|
| 189 |
+
set_active: bool = False,
|
| 190 |
+
) -> "Run":
|
| 191 |
+
"""Starts a new run and possibly sets it as the active run.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
tags: Tags to set for the new run.
|
| 195 |
+
set_active: Whether to set the new run as the active run.
|
| 196 |
+
If an active run already exists, then that run is returned.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
The newly created MLflow run.
|
| 200 |
+
"""
|
| 201 |
+
import mlflow
|
| 202 |
+
from mlflow.utils.mlflow_tags import MLFLOW_RUN_NAME
|
| 203 |
+
|
| 204 |
+
if tags is None:
|
| 205 |
+
tags = {}
|
| 206 |
+
|
| 207 |
+
if set_active:
|
| 208 |
+
return self._start_active_run(run_name=run_name, tags=tags)
|
| 209 |
+
|
| 210 |
+
client = self._get_client()
|
| 211 |
+
# If `mlflow==1.30.0` and we don't use `run_name`, then MLflow might error. For
|
| 212 |
+
# more information, see #29749.
|
| 213 |
+
if version.parse(mlflow.__version__) >= version.parse("1.30.0"):
|
| 214 |
+
run = client.create_run(
|
| 215 |
+
run_name=run_name, experiment_id=self.experiment_id, tags=tags
|
| 216 |
+
)
|
| 217 |
+
else:
|
| 218 |
+
tags[MLFLOW_RUN_NAME] = run_name
|
| 219 |
+
run = client.create_run(experiment_id=self.experiment_id, tags=tags)
|
| 220 |
+
|
| 221 |
+
return run
|
| 222 |
+
|
| 223 |
+
def _start_active_run(
|
| 224 |
+
self, run_name: Optional[str] = None, tags: Optional[Dict] = None
|
| 225 |
+
) -> "Run":
|
| 226 |
+
"""Starts a run and sets it as the active run if one does not exist.
|
| 227 |
+
|
| 228 |
+
If an active run already exists, then returns it.
|
| 229 |
+
"""
|
| 230 |
+
active_run = self._mlflow.active_run()
|
| 231 |
+
if active_run:
|
| 232 |
+
return active_run
|
| 233 |
+
|
| 234 |
+
return self._mlflow.start_run(
|
| 235 |
+
run_name=run_name, experiment_id=self.experiment_id, tags=tags
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
def _run_exists(self, run_id: str) -> bool:
|
| 239 |
+
"""Check if run with the provided id exists."""
|
| 240 |
+
from mlflow.exceptions import MlflowException
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
self._mlflow.get_run(run_id=run_id)
|
| 244 |
+
return True
|
| 245 |
+
except MlflowException:
|
| 246 |
+
return False
|
| 247 |
+
|
| 248 |
+
def _get_client(self) -> "MlflowClient":
|
| 249 |
+
"""Returns an ml.tracking.MlflowClient instance to use for logging."""
|
| 250 |
+
tracking_uri = self._mlflow.get_tracking_uri()
|
| 251 |
+
registry_uri = self._mlflow.get_registry_uri()
|
| 252 |
+
|
| 253 |
+
from mlflow.tracking import MlflowClient
|
| 254 |
+
|
| 255 |
+
return MlflowClient(tracking_uri=tracking_uri, registry_uri=registry_uri)
|
| 256 |
+
|
| 257 |
+
def log_params(self, params_to_log: Dict, run_id: Optional[str] = None):
|
| 258 |
+
"""Logs the provided parameters to the run specified by run_id.
|
| 259 |
+
|
| 260 |
+
If no ``run_id`` is passed in, then logs to the current active run.
|
| 261 |
+
If there is not active run, then creates a new run and sets it as
|
| 262 |
+
the active run.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
params_to_log: Dictionary of parameters to log.
|
| 266 |
+
run_id (Optional[str]): The ID of the run to log to.
|
| 267 |
+
"""
|
| 268 |
+
params_to_log = flatten_dict(params_to_log)
|
| 269 |
+
|
| 270 |
+
if run_id and self._run_exists(run_id):
|
| 271 |
+
client = self._get_client()
|
| 272 |
+
for key, value in params_to_log.items():
|
| 273 |
+
client.log_param(run_id=run_id, key=key, value=value)
|
| 274 |
+
|
| 275 |
+
else:
|
| 276 |
+
for key, value in params_to_log.items():
|
| 277 |
+
self._mlflow.log_param(key=key, value=value)
|
| 278 |
+
|
| 279 |
+
def log_metrics(self, step, metrics_to_log: Dict, run_id: Optional[str] = None):
|
| 280 |
+
"""Logs the provided metrics to the run specified by run_id.
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
If no ``run_id`` is passed in, then logs to the current active run.
|
| 284 |
+
If there is not active run, then creates a new run and sets it as
|
| 285 |
+
the active run.
|
| 286 |
+
|
| 287 |
+
Args:
|
| 288 |
+
metrics_to_log: Dictionary of metrics to log.
|
| 289 |
+
run_id (Optional[str]): The ID of the run to log to.
|
| 290 |
+
"""
|
| 291 |
+
metrics_to_log = flatten_dict(metrics_to_log)
|
| 292 |
+
metrics_to_log = self._parse_dict(metrics_to_log)
|
| 293 |
+
|
| 294 |
+
if run_id and self._run_exists(run_id):
|
| 295 |
+
client = self._get_client()
|
| 296 |
+
for key, value in metrics_to_log.items():
|
| 297 |
+
client.log_metric(run_id=run_id, key=key, value=value, step=step)
|
| 298 |
+
|
| 299 |
+
else:
|
| 300 |
+
for key, value in metrics_to_log.items():
|
| 301 |
+
self._mlflow.log_metric(key=key, value=value, step=step)
|
| 302 |
+
|
| 303 |
+
def save_artifacts(self, dir: str, run_id: Optional[str] = None):
|
| 304 |
+
"""Saves directory as artifact to the run specified by run_id.
|
| 305 |
+
|
| 306 |
+
If no ``run_id`` is passed in, then saves to the current active run.
|
| 307 |
+
If there is not active run, then creates a new run and sets it as
|
| 308 |
+
the active run.
|
| 309 |
+
|
| 310 |
+
Args:
|
| 311 |
+
dir: Path to directory containing the files to save.
|
| 312 |
+
run_id (Optional[str]): The ID of the run to log to.
|
| 313 |
+
"""
|
| 314 |
+
if run_id and self._run_exists(run_id):
|
| 315 |
+
client = self._get_client()
|
| 316 |
+
client.log_artifacts(run_id=run_id, local_dir=dir)
|
| 317 |
+
else:
|
| 318 |
+
self._mlflow.log_artifacts(local_dir=dir)
|
| 319 |
+
|
| 320 |
+
def end_run(self, status: Optional[str] = None, run_id=None):
|
| 321 |
+
"""Terminates the run specified by run_id.
|
| 322 |
+
|
| 323 |
+
If no ``run_id`` is passed in, then terminates the
|
| 324 |
+
active run if one exists.
|
| 325 |
+
|
| 326 |
+
Args:
|
| 327 |
+
status (Optional[str]): The status to set when terminating the run.
|
| 328 |
+
run_id (Optional[str]): The ID of the run to terminate.
|
| 329 |
+
|
| 330 |
+
"""
|
| 331 |
+
if (
|
| 332 |
+
run_id
|
| 333 |
+
and self._run_exists(run_id)
|
| 334 |
+
and not (
|
| 335 |
+
self._mlflow.active_run()
|
| 336 |
+
and self._mlflow.active_run().info.run_id == run_id
|
| 337 |
+
)
|
| 338 |
+
):
|
| 339 |
+
client = self._get_client()
|
| 340 |
+
client.set_terminated(run_id=run_id, status=status)
|
| 341 |
+
else:
|
| 342 |
+
self._mlflow.end_run(status=status)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/_internal/usage.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Set, Union
|
| 6 |
+
|
| 7 |
+
from ray._private.usage.usage_lib import TagKey, record_extra_usage_tag
|
| 8 |
+
|
| 9 |
+
if TYPE_CHECKING:
|
| 10 |
+
from ray.train._internal.storage import StorageContext
|
| 11 |
+
from ray.train.trainer import BaseTrainer
|
| 12 |
+
from ray.tune import Callback
|
| 13 |
+
from ray.tune.schedulers import TrialScheduler
|
| 14 |
+
from ray.tune.search import BasicVariantGenerator, Searcher
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
AIR_TRAINERS = {
|
| 18 |
+
"HorovodTrainer",
|
| 19 |
+
"LightGBMTrainer",
|
| 20 |
+
"TensorflowTrainer",
|
| 21 |
+
"TorchTrainer",
|
| 22 |
+
"XGBoostTrainer",
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
# searchers implemented by Ray Tune.
|
| 26 |
+
TUNE_SEARCHERS = {
|
| 27 |
+
"AxSearch",
|
| 28 |
+
"BayesOptSearch",
|
| 29 |
+
"TuneBOHB",
|
| 30 |
+
"HEBOSearch",
|
| 31 |
+
"HyperOptSearch",
|
| 32 |
+
"NevergradSearch",
|
| 33 |
+
"OptunaSearch",
|
| 34 |
+
"ZOOptSearch",
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
# These are just wrappers around real searchers.
|
| 38 |
+
# We don't want to double tag in this case, otherwise, the real tag
|
| 39 |
+
# will be overwritten.
|
| 40 |
+
TUNE_SEARCHER_WRAPPERS = {
|
| 41 |
+
"ConcurrencyLimiter",
|
| 42 |
+
"Repeater",
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
TUNE_SCHEDULERS = {
|
| 46 |
+
"FIFOScheduler",
|
| 47 |
+
"AsyncHyperBandScheduler",
|
| 48 |
+
"MedianStoppingRule",
|
| 49 |
+
"HyperBandScheduler",
|
| 50 |
+
"HyperBandForBOHB",
|
| 51 |
+
"PopulationBasedTraining",
|
| 52 |
+
"PopulationBasedTrainingReplay",
|
| 53 |
+
"PB2",
|
| 54 |
+
"ResourceChangingScheduler",
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class AirEntrypoint(Enum):
|
| 59 |
+
TUNER = "Tuner.fit"
|
| 60 |
+
TRAINER = "Trainer.fit"
|
| 61 |
+
TUNE_RUN = "tune.run"
|
| 62 |
+
TUNE_RUN_EXPERIMENTS = "tune.run_experiments"
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def _find_class_name(obj, allowed_module_path_prefix: str, whitelist: Set[str]):
|
| 66 |
+
"""Find the class name of the object. If the object is not
|
| 67 |
+
under `allowed_module_path_prefix` or if its class is not in the whitelist,
|
| 68 |
+
return "Custom".
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
obj: The object under inspection.
|
| 72 |
+
allowed_module_path_prefix: If the `obj`'s class is not under
|
| 73 |
+
the `allowed_module_path_prefix`, its class name will be anonymized.
|
| 74 |
+
whitelist: If the `obj`'s class is not in the `whitelist`,
|
| 75 |
+
it will be anonymized.
|
| 76 |
+
Returns:
|
| 77 |
+
The class name to be tagged with telemetry.
|
| 78 |
+
"""
|
| 79 |
+
module_path = obj.__module__
|
| 80 |
+
cls_name = obj.__class__.__name__
|
| 81 |
+
if module_path.startswith(allowed_module_path_prefix) and cls_name in whitelist:
|
| 82 |
+
return cls_name
|
| 83 |
+
else:
|
| 84 |
+
return "Custom"
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def tag_air_trainer(trainer: "BaseTrainer"):
|
| 88 |
+
from ray.train.trainer import BaseTrainer
|
| 89 |
+
|
| 90 |
+
assert isinstance(trainer, BaseTrainer)
|
| 91 |
+
trainer_name = _find_class_name(trainer, "ray.train", AIR_TRAINERS)
|
| 92 |
+
record_extra_usage_tag(TagKey.AIR_TRAINER, trainer_name)
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def tag_searcher(searcher: Union["BasicVariantGenerator", "Searcher"]):
|
| 96 |
+
from ray.tune.search import BasicVariantGenerator, Searcher
|
| 97 |
+
|
| 98 |
+
if isinstance(searcher, BasicVariantGenerator):
|
| 99 |
+
# Note this could be highly inflated as all train flows are treated
|
| 100 |
+
# as using BasicVariantGenerator.
|
| 101 |
+
record_extra_usage_tag(TagKey.TUNE_SEARCHER, "BasicVariantGenerator")
|
| 102 |
+
elif isinstance(searcher, Searcher):
|
| 103 |
+
searcher_name = _find_class_name(
|
| 104 |
+
searcher, "ray.tune.search", TUNE_SEARCHERS.union(TUNE_SEARCHER_WRAPPERS)
|
| 105 |
+
)
|
| 106 |
+
if searcher_name in TUNE_SEARCHER_WRAPPERS:
|
| 107 |
+
# ignore to avoid double tagging with wrapper name.
|
| 108 |
+
return
|
| 109 |
+
record_extra_usage_tag(TagKey.TUNE_SEARCHER, searcher_name)
|
| 110 |
+
else:
|
| 111 |
+
assert False, (
|
| 112 |
+
"Not expecting a non-BasicVariantGenerator, "
|
| 113 |
+
"non-Searcher type passed in for `tag_searcher`."
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def tag_scheduler(scheduler: "TrialScheduler"):
|
| 118 |
+
from ray.tune.schedulers import TrialScheduler
|
| 119 |
+
|
| 120 |
+
assert isinstance(scheduler, TrialScheduler)
|
| 121 |
+
scheduler_name = _find_class_name(scheduler, "ray.tune.schedulers", TUNE_SCHEDULERS)
|
| 122 |
+
record_extra_usage_tag(TagKey.TUNE_SCHEDULER, scheduler_name)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def tag_setup_wandb():
|
| 126 |
+
record_extra_usage_tag(TagKey.AIR_SETUP_WANDB_INTEGRATION_USED, "1")
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def tag_setup_mlflow():
|
| 130 |
+
record_extra_usage_tag(TagKey.AIR_SETUP_MLFLOW_INTEGRATION_USED, "1")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def _count_callbacks(callbacks: Optional[List["Callback"]]) -> Dict[str, int]:
|
| 134 |
+
"""Creates a map of callback class name -> count given a list of callbacks."""
|
| 135 |
+
from ray.air.integrations.comet import CometLoggerCallback
|
| 136 |
+
from ray.air.integrations.mlflow import MLflowLoggerCallback
|
| 137 |
+
from ray.air.integrations.wandb import WandbLoggerCallback
|
| 138 |
+
from ray.tune import Callback
|
| 139 |
+
from ray.tune.logger import LoggerCallback
|
| 140 |
+
from ray.tune.logger.aim import AimLoggerCallback
|
| 141 |
+
from ray.tune.utils.callback import DEFAULT_CALLBACK_CLASSES
|
| 142 |
+
|
| 143 |
+
built_in_callbacks = (
|
| 144 |
+
WandbLoggerCallback,
|
| 145 |
+
MLflowLoggerCallback,
|
| 146 |
+
CometLoggerCallback,
|
| 147 |
+
AimLoggerCallback,
|
| 148 |
+
) + DEFAULT_CALLBACK_CLASSES
|
| 149 |
+
|
| 150 |
+
callback_names = [callback_cls.__name__ for callback_cls in built_in_callbacks]
|
| 151 |
+
callback_counts = collections.defaultdict(int)
|
| 152 |
+
|
| 153 |
+
callbacks = callbacks or []
|
| 154 |
+
for callback in callbacks:
|
| 155 |
+
if not isinstance(callback, Callback):
|
| 156 |
+
# This will error later, but don't include this as custom usage.
|
| 157 |
+
continue
|
| 158 |
+
|
| 159 |
+
callback_name = callback.__class__.__name__
|
| 160 |
+
|
| 161 |
+
if callback_name in callback_names:
|
| 162 |
+
callback_counts[callback_name] += 1
|
| 163 |
+
elif isinstance(callback, LoggerCallback):
|
| 164 |
+
callback_counts["CustomLoggerCallback"] += 1
|
| 165 |
+
else:
|
| 166 |
+
callback_counts["CustomCallback"] += 1
|
| 167 |
+
|
| 168 |
+
return callback_counts
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def tag_callbacks(callbacks: Optional[List["Callback"]]) -> bool:
|
| 172 |
+
"""Records built-in callback usage via a JSON str representing a
|
| 173 |
+
dictionary mapping callback class name -> counts.
|
| 174 |
+
|
| 175 |
+
User-defined callbacks will increment the count under the `CustomLoggerCallback`
|
| 176 |
+
or `CustomCallback` key depending on which of the provided interfaces they subclass.
|
| 177 |
+
NOTE: This will NOT track the name of the user-defined callback,
|
| 178 |
+
nor its implementation.
|
| 179 |
+
|
| 180 |
+
This will NOT report telemetry if no callbacks are provided by the user.
|
| 181 |
+
|
| 182 |
+
Returns:
|
| 183 |
+
bool: True if usage was recorded, False otherwise.
|
| 184 |
+
"""
|
| 185 |
+
if not callbacks:
|
| 186 |
+
# User didn't pass in any callbacks -> no usage recorded.
|
| 187 |
+
return False
|
| 188 |
+
|
| 189 |
+
callback_counts = _count_callbacks(callbacks)
|
| 190 |
+
|
| 191 |
+
if callback_counts:
|
| 192 |
+
callback_counts_str = json.dumps(callback_counts)
|
| 193 |
+
record_extra_usage_tag(TagKey.AIR_CALLBACKS, callback_counts_str)
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def tag_storage_type(storage: "StorageContext"):
|
| 197 |
+
"""Records the storage configuration of an experiment.
|
| 198 |
+
|
| 199 |
+
The storage configuration is set by `RunConfig(storage_path, storage_filesystem)`.
|
| 200 |
+
|
| 201 |
+
The possible storage types (defined by `pyarrow.fs.FileSystem.type_name`) are:
|
| 202 |
+
- 'local' = pyarrow.fs.LocalFileSystem. This includes NFS usage.
|
| 203 |
+
- 'mock' = pyarrow.fs._MockFileSystem. This is used for testing.
|
| 204 |
+
- ('s3', 'gcs', 'abfs', 'hdfs'): Various remote storage schemes
|
| 205 |
+
with default implementations in pyarrow.
|
| 206 |
+
- 'custom' = All other storage schemes, which includes ALL cases where a
|
| 207 |
+
custom `storage_filesystem` is provided.
|
| 208 |
+
- 'other' = catches any other cases not explicitly handled above.
|
| 209 |
+
"""
|
| 210 |
+
whitelist = {"local", "mock", "s3", "gcs", "abfs", "hdfs"}
|
| 211 |
+
|
| 212 |
+
if storage.custom_fs_provided:
|
| 213 |
+
storage_config_tag = "custom"
|
| 214 |
+
elif storage.storage_filesystem.type_name in whitelist:
|
| 215 |
+
storage_config_tag = storage.storage_filesystem.type_name
|
| 216 |
+
else:
|
| 217 |
+
storage_config_tag = "other"
|
| 218 |
+
|
| 219 |
+
record_extra_usage_tag(TagKey.AIR_STORAGE_CONFIGURATION, storage_config_tag)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
def tag_ray_air_env_vars() -> bool:
|
| 223 |
+
"""Records usage of environment variables exposed by the Ray AIR libraries.
|
| 224 |
+
|
| 225 |
+
NOTE: This does not track the values of the environment variables, nor
|
| 226 |
+
does this track environment variables not explicitly included in the
|
| 227 |
+
`all_ray_air_env_vars` allow-list.
|
| 228 |
+
|
| 229 |
+
Returns:
|
| 230 |
+
bool: True if at least one environment var is supplied by the user.
|
| 231 |
+
"""
|
| 232 |
+
from ray.air.constants import AIR_ENV_VARS
|
| 233 |
+
from ray.train.constants import TRAIN_ENV_VARS
|
| 234 |
+
from ray.tune.constants import TUNE_ENV_VARS
|
| 235 |
+
|
| 236 |
+
all_ray_air_env_vars = sorted(
|
| 237 |
+
set().union(AIR_ENV_VARS, TUNE_ENV_VARS, TRAIN_ENV_VARS)
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
user_supplied_env_vars = []
|
| 241 |
+
|
| 242 |
+
for env_var in all_ray_air_env_vars:
|
| 243 |
+
if env_var in os.environ:
|
| 244 |
+
user_supplied_env_vars.append(env_var)
|
| 245 |
+
|
| 246 |
+
if user_supplied_env_vars:
|
| 247 |
+
env_vars_str = json.dumps(user_supplied_env_vars)
|
| 248 |
+
record_extra_usage_tag(TagKey.AIR_ENV_VARS, env_vars_str)
|
| 249 |
+
return True
|
| 250 |
+
|
| 251 |
+
return False
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def tag_air_entrypoint(entrypoint: AirEntrypoint) -> None:
|
| 255 |
+
"""Records the entrypoint to an AIR training run."""
|
| 256 |
+
assert entrypoint in AirEntrypoint
|
| 257 |
+
record_extra_usage_tag(TagKey.AIR_ENTRYPOINT, entrypoint.value)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (174 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/__pycache__/custom_trainer.cpython-310.pyc
ADDED
|
Binary file (1.56 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/examples/custom_trainer.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# flake8: noqa
|
| 2 |
+
# isort: skip_file
|
| 3 |
+
# TODO(rliaw): Include this in the docs.
|
| 4 |
+
|
| 5 |
+
# fmt: off
|
| 6 |
+
# __custom_trainer_begin__
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
from ray import train
|
| 10 |
+
from ray.train.trainer import BaseTrainer
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class MyPytorchTrainer(BaseTrainer):
|
| 14 |
+
def setup(self):
|
| 15 |
+
self.model = torch.nn.Linear(1, 1)
|
| 16 |
+
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=0.1)
|
| 17 |
+
|
| 18 |
+
def training_loop(self):
|
| 19 |
+
# You can access any Trainer attributes directly in this method.
|
| 20 |
+
# self.datasets["train"] has already been
|
| 21 |
+
# preprocessed by self.preprocessor
|
| 22 |
+
dataset = self.datasets["train"]
|
| 23 |
+
|
| 24 |
+
loss_fn = torch.nn.MSELoss()
|
| 25 |
+
|
| 26 |
+
for epoch_idx in range(10):
|
| 27 |
+
loss = 0
|
| 28 |
+
num_batches = 0
|
| 29 |
+
for batch in dataset.iter_torch_batches(dtypes=torch.float):
|
| 30 |
+
# Compute prediction error
|
| 31 |
+
X, y = torch.unsqueeze(batch["x"], 1), batch["y"]
|
| 32 |
+
pred = self.model(X)
|
| 33 |
+
batch_loss = loss_fn(pred, y)
|
| 34 |
+
|
| 35 |
+
# Backpropagation
|
| 36 |
+
self.optimizer.zero_grad()
|
| 37 |
+
batch_loss.backward()
|
| 38 |
+
self.optimizer.step()
|
| 39 |
+
|
| 40 |
+
loss += batch_loss.item()
|
| 41 |
+
num_batches += 1
|
| 42 |
+
loss /= num_batches
|
| 43 |
+
|
| 44 |
+
# Use Tune functions to report intermediate
|
| 45 |
+
# results.
|
| 46 |
+
train.report({"loss": loss, "epoch": epoch_idx})
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# __custom_trainer_end__
|
| 50 |
+
# fmt: on
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# fmt: off
|
| 54 |
+
# __custom_trainer_usage_begin__
|
| 55 |
+
import ray
|
| 56 |
+
|
| 57 |
+
train_dataset = ray.data.from_items([{"x": i, "y": i} for i in range(3)])
|
| 58 |
+
my_trainer = MyPytorchTrainer(datasets={"train": train_dataset})
|
| 59 |
+
result = my_trainer.fit()
|
| 60 |
+
# __custom_trainer_usage_end__
|
| 61 |
+
# fmt: on
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (583 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ray.air.execution._internal.actor_manager import RayActorManager
|
| 2 |
+
from ray.air.execution._internal.barrier import Barrier
|
| 3 |
+
from ray.air.execution._internal.tracked_actor import TrackedActor
|
| 4 |
+
|
| 5 |
+
__all__ = ["Barrier", "RayActorManager", "TrackedActor"]
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (447 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/actor_manager.cpython-310.pyc
ADDED
|
Binary file (24.4 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/barrier.cpython-310.pyc
ADDED
|
Binary file (3.39 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/event_manager.cpython-310.pyc
ADDED
|
Binary file (5.28 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/tracked_actor.cpython-310.pyc
ADDED
|
Binary file (2.32 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/__pycache__/tracked_actor_task.cpython-310.pyc
ADDED
|
Binary file (1.64 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/actor_manager.py
ADDED
|
@@ -0,0 +1,894 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
import uuid
|
| 5 |
+
from collections import Counter, defaultdict
|
| 6 |
+
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
|
| 7 |
+
|
| 8 |
+
import ray
|
| 9 |
+
from ray.air.execution._internal.event_manager import RayEventManager
|
| 10 |
+
from ray.air.execution._internal.tracked_actor import TrackedActor
|
| 11 |
+
from ray.air.execution._internal.tracked_actor_task import TrackedActorTask
|
| 12 |
+
from ray.air.execution.resources import (
|
| 13 |
+
AcquiredResources,
|
| 14 |
+
ResourceManager,
|
| 15 |
+
ResourceRequest,
|
| 16 |
+
)
|
| 17 |
+
from ray.exceptions import RayActorError, RayTaskError
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class RayActorManager:
|
| 23 |
+
"""Management class for Ray actors and actor tasks.
|
| 24 |
+
|
| 25 |
+
This class provides an event-based management interface for actors, and
|
| 26 |
+
actor tasks.
|
| 27 |
+
|
| 28 |
+
The manager can be used to start actors, stop actors, and schedule and
|
| 29 |
+
track task futures on these actors.
|
| 30 |
+
The manager will then invoke callbacks related to the tracked entities.
|
| 31 |
+
|
| 32 |
+
For instance, when an actor is added with
|
| 33 |
+
:meth:`add_actor() <RayActorManager.add_actor>`,
|
| 34 |
+
a :ref:`TrackedActor <ray.air.execution._internal.tracked_actor.TrackedActor`
|
| 35 |
+
object is returned. An ``on_start`` callback can be specified that is invoked
|
| 36 |
+
once the actor successfully started. Similarly, ``on_stop`` and ``on_error``
|
| 37 |
+
can be used to specify callbacks relating to the graceful or ungraceful
|
| 38 |
+
end of an actor's lifetime.
|
| 39 |
+
|
| 40 |
+
When scheduling an actor task using
|
| 41 |
+
:meth:`schedule_actor_task()
|
| 42 |
+
<ray.air.execution._internal.actor_manager.RayActorManager.schedule_actor_task>`,
|
| 43 |
+
an ``on_result`` callback can be specified that is invoked when the task
|
| 44 |
+
successfully resolves, and an ``on_error`` callback will resolve when the
|
| 45 |
+
task fails.
|
| 46 |
+
|
| 47 |
+
The RayActorManager does not implement any true asynchronous processing. Control
|
| 48 |
+
has to be explicitly yielded to the event manager via :meth:`RayActorManager.next`.
|
| 49 |
+
Callbacks will only be invoked when control is with the RayActorManager, and
|
| 50 |
+
callbacks will always be executed sequentially in order of arriving events.
|
| 51 |
+
|
| 52 |
+
Args:
|
| 53 |
+
resource_manager: Resource manager used to request resources for the actors.
|
| 54 |
+
|
| 55 |
+
Example:
|
| 56 |
+
|
| 57 |
+
.. code-block:: python
|
| 58 |
+
|
| 59 |
+
from ray.air.execution import ResourceRequest
|
| 60 |
+
from ray.air.execution._internal import RayActorManager
|
| 61 |
+
|
| 62 |
+
actor_manager = RayActorManager()
|
| 63 |
+
|
| 64 |
+
# Request an actor
|
| 65 |
+
tracked_actor = actor_manager.add_actor(
|
| 66 |
+
ActorClass,
|
| 67 |
+
kwargs={},
|
| 68 |
+
resource_request=ResourceRequest([{"CPU": 1}]),
|
| 69 |
+
on_start=actor_start_callback,
|
| 70 |
+
on_stop=actor_stop_callback,
|
| 71 |
+
on_error=actor_error_callback
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Yield control to event manager to start actor
|
| 75 |
+
actor_manager.next()
|
| 76 |
+
|
| 77 |
+
# Start task on the actor (ActorClass.foo.remote())
|
| 78 |
+
tracked_actor_task = actor_manager.schedule_actor_task(
|
| 79 |
+
tracked_actor,
|
| 80 |
+
method_name="foo",
|
| 81 |
+
on_result=task_result_callback,
|
| 82 |
+
on_error=task_error_callback
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
# Again yield control to event manager to process task futures
|
| 86 |
+
actor_manager.wait()
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
def __init__(self, resource_manager: ResourceManager):
|
| 91 |
+
self._resource_manager: ResourceManager = resource_manager
|
| 92 |
+
|
| 93 |
+
self._actor_state_events = RayEventManager()
|
| 94 |
+
self._actor_task_events = RayEventManager()
|
| 95 |
+
|
| 96 |
+
# ---
|
| 97 |
+
# Tracked actor futures.
|
| 98 |
+
|
| 99 |
+
# This maps TrackedActor objects to their futures. We use this to see if an
|
| 100 |
+
# actor has any futures scheduled and to remove them when we terminate an actor.
|
| 101 |
+
|
| 102 |
+
# Actors to actor task futures
|
| 103 |
+
self._tracked_actors_to_task_futures: Dict[
|
| 104 |
+
TrackedActor, Set[ray.ObjectRef]
|
| 105 |
+
] = defaultdict(set)
|
| 106 |
+
|
| 107 |
+
# Actors to actor state futures (start/terminate)
|
| 108 |
+
self._tracked_actors_to_state_futures: Dict[
|
| 109 |
+
TrackedActor, Set[ray.ObjectRef]
|
| 110 |
+
] = defaultdict(set)
|
| 111 |
+
|
| 112 |
+
# ---
|
| 113 |
+
# Pending actors.
|
| 114 |
+
# We use three dicts for actors that are requested but not yet started.
|
| 115 |
+
|
| 116 |
+
# This dict keeps a list of actors associated with each resource request.
|
| 117 |
+
# We use this to start actors in the correct order when their resources
|
| 118 |
+
# become available.
|
| 119 |
+
self._resource_request_to_pending_actors: Dict[
|
| 120 |
+
ResourceRequest, List[TrackedActor]
|
| 121 |
+
] = defaultdict(list)
|
| 122 |
+
|
| 123 |
+
# This dict stores the actor class, kwargs, and resource request of
|
| 124 |
+
# pending actors. Once the resources are available, we start the remote
|
| 125 |
+
# actor class with its args. We need the resource request to cancel it
|
| 126 |
+
# if needed.
|
| 127 |
+
self._pending_actors_to_attrs: Dict[
|
| 128 |
+
TrackedActor, Tuple[Type, Dict[str, Any], ResourceRequest]
|
| 129 |
+
] = {}
|
| 130 |
+
|
| 131 |
+
# This dict keeps track of cached actor tasks. We can't schedule actor
|
| 132 |
+
# tasks before the actor is actually scheduled/live. So when the caller
|
| 133 |
+
# tries to schedule a task, we cache it here, and schedule it once the
|
| 134 |
+
# actor is started.
|
| 135 |
+
self._pending_actors_to_enqueued_actor_tasks: Dict[
|
| 136 |
+
TrackedActor, List[Tuple[TrackedActorTask, str, Tuple[Any], Dict[str, Any]]]
|
| 137 |
+
] = defaultdict(list)
|
| 138 |
+
|
| 139 |
+
# ---
|
| 140 |
+
# Live actors.
|
| 141 |
+
# We keep one dict for actors that are currently running and a set of
|
| 142 |
+
# actors that we should forcefully kill.
|
| 143 |
+
|
| 144 |
+
# This dict associates the TrackedActor object with the Ray actor handle
|
| 145 |
+
# and the resources associated to the actor. We use it to schedule the
|
| 146 |
+
# actual ray tasks, and to return the resources when the actor stopped.
|
| 147 |
+
self._live_actors_to_ray_actors_resources: Dict[
|
| 148 |
+
TrackedActor, Tuple[ray.actor.ActorHandle, AcquiredResources]
|
| 149 |
+
] = {}
|
| 150 |
+
self._live_resource_cache: Optional[Dict[str, Any]] = None
|
| 151 |
+
|
| 152 |
+
# This dict contains all actors that should be killed (after calling
|
| 153 |
+
# `remove_actor()`). Kill requests will be handled in wait().
|
| 154 |
+
self._live_actors_to_kill: Set[TrackedActor] = set()
|
| 155 |
+
|
| 156 |
+
# Track failed actors
|
| 157 |
+
self._failed_actor_ids: Set[int] = set()
|
| 158 |
+
|
| 159 |
+
def next(self, timeout: Optional[Union[int, float]] = None) -> bool:
|
| 160 |
+
"""Yield control to event manager to await the next event and invoke callbacks.
|
| 161 |
+
|
| 162 |
+
Calling this method will wait for up to ``timeout`` seconds for the next
|
| 163 |
+
event to arrive.
|
| 164 |
+
|
| 165 |
+
When events arrive, callbacks relating to the events will be
|
| 166 |
+
invoked. A timeout of ``None`` will block until the next event arrives.
|
| 167 |
+
|
| 168 |
+
Note:
|
| 169 |
+
If an actor task fails with a ``RayActorError``, this is one event,
|
| 170 |
+
but it may trigger _two_ `on_error` callbacks: One for the actor,
|
| 171 |
+
and one for the task.
|
| 172 |
+
|
| 173 |
+
Note:
|
| 174 |
+
The ``timeout`` argument is used for pure waiting time for events. It does
|
| 175 |
+
not include time spent on processing callbacks. Depending on the processing
|
| 176 |
+
time of the callbacks, it can take much longer for this function to
|
| 177 |
+
return than the specified timeout.
|
| 178 |
+
|
| 179 |
+
Args:
|
| 180 |
+
timeout: Timeout in seconds to wait for next event.
|
| 181 |
+
|
| 182 |
+
Returns:
|
| 183 |
+
True if at least one event was processed.
|
| 184 |
+
|
| 185 |
+
"""
|
| 186 |
+
# First issue any pending forceful actor kills
|
| 187 |
+
actor_killed = self._try_kill_actor()
|
| 188 |
+
|
| 189 |
+
# We always try to start actors as this won't trigger an event callback
|
| 190 |
+
self._try_start_actors()
|
| 191 |
+
|
| 192 |
+
# If an actor was killed, this was our event, and we return.
|
| 193 |
+
if actor_killed:
|
| 194 |
+
return True
|
| 195 |
+
|
| 196 |
+
# Otherwise, collect all futures and await the next.
|
| 197 |
+
resource_futures = self._resource_manager.get_resource_futures()
|
| 198 |
+
actor_state_futures = self._actor_state_events.get_futures()
|
| 199 |
+
actor_task_futures = self._actor_task_events.get_futures()
|
| 200 |
+
|
| 201 |
+
# Shuffle state futures
|
| 202 |
+
shuffled_state_futures = list(actor_state_futures)
|
| 203 |
+
random.shuffle(shuffled_state_futures)
|
| 204 |
+
|
| 205 |
+
# Shuffle task futures
|
| 206 |
+
shuffled_task_futures = list(actor_task_futures)
|
| 207 |
+
random.shuffle(shuffled_task_futures)
|
| 208 |
+
|
| 209 |
+
# Prioritize resource futures over actor state over task futures
|
| 210 |
+
all_futures = resource_futures + shuffled_state_futures + shuffled_task_futures
|
| 211 |
+
|
| 212 |
+
start_wait = time.monotonic()
|
| 213 |
+
ready, _ = ray.wait(all_futures, num_returns=1, timeout=timeout)
|
| 214 |
+
|
| 215 |
+
if not ready:
|
| 216 |
+
return False
|
| 217 |
+
|
| 218 |
+
[future] = ready
|
| 219 |
+
|
| 220 |
+
if future in actor_state_futures:
|
| 221 |
+
self._actor_state_events.resolve_future(future)
|
| 222 |
+
elif future in actor_task_futures:
|
| 223 |
+
self._actor_task_events.resolve_future(future)
|
| 224 |
+
else:
|
| 225 |
+
self._handle_ready_resource_future()
|
| 226 |
+
# Ready resource futures don't count as one event as they don't trigger
|
| 227 |
+
# any callbacks. So we repeat until we hit anything that is not a resource
|
| 228 |
+
# future.
|
| 229 |
+
time_taken = time.monotonic() - start_wait
|
| 230 |
+
return self.next(
|
| 231 |
+
timeout=max(1e-9, timeout - time_taken) if timeout is not None else None
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
self._try_start_actors()
|
| 235 |
+
return True
|
| 236 |
+
|
| 237 |
+
def _actor_start_resolved(self, tracked_actor: TrackedActor, future: ray.ObjectRef):
|
| 238 |
+
"""Callback to be invoked when actor started"""
|
| 239 |
+
self._tracked_actors_to_state_futures[tracked_actor].remove(future)
|
| 240 |
+
|
| 241 |
+
if tracked_actor._on_start:
|
| 242 |
+
tracked_actor._on_start(tracked_actor)
|
| 243 |
+
|
| 244 |
+
def _actor_stop_resolved(self, tracked_actor: TrackedActor):
|
| 245 |
+
"""Callback to be invoked when actor stopped"""
|
| 246 |
+
self._cleanup_actor(tracked_actor=tracked_actor)
|
| 247 |
+
|
| 248 |
+
if tracked_actor._on_stop:
|
| 249 |
+
tracked_actor._on_stop(tracked_actor)
|
| 250 |
+
|
| 251 |
+
def _actor_start_failed(self, tracked_actor: TrackedActor, exception: Exception):
|
| 252 |
+
"""Callback to be invoked when actor start/stop failed"""
|
| 253 |
+
self._failed_actor_ids.add(tracked_actor.actor_id)
|
| 254 |
+
|
| 255 |
+
self._cleanup_actor(tracked_actor=tracked_actor)
|
| 256 |
+
|
| 257 |
+
if tracked_actor._on_error:
|
| 258 |
+
tracked_actor._on_error(tracked_actor, exception)
|
| 259 |
+
|
| 260 |
+
def _actor_task_failed(
|
| 261 |
+
self, tracked_actor_task: TrackedActorTask, exception: Exception
|
| 262 |
+
):
|
| 263 |
+
"""Handle an actor task future that became ready.
|
| 264 |
+
|
| 265 |
+
- On actor error, trigger actor error callback AND error task error callback
|
| 266 |
+
- On task error, trigger actor task error callback
|
| 267 |
+
- On success, trigger actor task result callback
|
| 268 |
+
"""
|
| 269 |
+
tracked_actor = tracked_actor_task._tracked_actor
|
| 270 |
+
|
| 271 |
+
if isinstance(exception, RayActorError):
|
| 272 |
+
self._failed_actor_ids.add(tracked_actor.actor_id)
|
| 273 |
+
|
| 274 |
+
# Clean up any references to the actor and its futures
|
| 275 |
+
self._cleanup_actor(tracked_actor=tracked_actor)
|
| 276 |
+
|
| 277 |
+
# Handle actor state callbacks
|
| 278 |
+
if tracked_actor._on_error:
|
| 279 |
+
tracked_actor._on_error(tracked_actor, exception)
|
| 280 |
+
|
| 281 |
+
# Then trigger actor task error callback
|
| 282 |
+
if tracked_actor_task._on_error:
|
| 283 |
+
tracked_actor_task._on_error(tracked_actor, exception)
|
| 284 |
+
|
| 285 |
+
elif isinstance(exception, RayTaskError):
|
| 286 |
+
# Otherwise only the task failed. Invoke callback
|
| 287 |
+
if tracked_actor_task._on_error:
|
| 288 |
+
tracked_actor_task._on_error(tracked_actor, exception)
|
| 289 |
+
else:
|
| 290 |
+
raise RuntimeError(
|
| 291 |
+
f"Caught unexpected exception: {exception}"
|
| 292 |
+
) from exception
|
| 293 |
+
|
| 294 |
+
def _actor_task_resolved(self, tracked_actor_task: TrackedActorTask, result: Any):
|
| 295 |
+
tracked_actor = tracked_actor_task._tracked_actor
|
| 296 |
+
|
| 297 |
+
# Trigger actor task result callback
|
| 298 |
+
if tracked_actor_task._on_result:
|
| 299 |
+
tracked_actor_task._on_result(tracked_actor, result)
|
| 300 |
+
|
| 301 |
+
def _handle_ready_resource_future(self):
|
| 302 |
+
"""Handle a resource future that became ready.
|
| 303 |
+
|
| 304 |
+
- Update state of the resource manager
|
| 305 |
+
- Try to start one actor
|
| 306 |
+
"""
|
| 307 |
+
# Force resource manager to update internal state
|
| 308 |
+
self._resource_manager.update_state()
|
| 309 |
+
# We handle resource futures one by one, so only try to start 1 actor at a time
|
| 310 |
+
self._try_start_actors(max_actors=1)
|
| 311 |
+
|
| 312 |
+
def _try_start_actors(self, max_actors: Optional[int] = None) -> int:
|
| 313 |
+
"""Try to start up to ``max_actors`` actors.
|
| 314 |
+
|
| 315 |
+
This function will iterate through all resource requests we collected for
|
| 316 |
+
pending actors. As long as a resource request can be fulfilled (resources
|
| 317 |
+
are available), we try to start as many actors as possible.
|
| 318 |
+
|
| 319 |
+
This will schedule a `Actor.__ray_ready__()` future which, once resolved,
|
| 320 |
+
will trigger the `TrackedActor.on_start` callback.
|
| 321 |
+
"""
|
| 322 |
+
started_actors = 0
|
| 323 |
+
|
| 324 |
+
# Iterate through all resource requests
|
| 325 |
+
for resource_request in self._resource_request_to_pending_actors:
|
| 326 |
+
if max_actors is not None and started_actors >= max_actors:
|
| 327 |
+
break
|
| 328 |
+
|
| 329 |
+
# While we have resources ready and there are actors left to schedule
|
| 330 |
+
while (
|
| 331 |
+
self._resource_manager.has_resources_ready(resource_request)
|
| 332 |
+
and self._resource_request_to_pending_actors[resource_request]
|
| 333 |
+
):
|
| 334 |
+
# Acquire resources for actor
|
| 335 |
+
acquired_resources = self._resource_manager.acquire_resources(
|
| 336 |
+
resource_request
|
| 337 |
+
)
|
| 338 |
+
assert acquired_resources
|
| 339 |
+
|
| 340 |
+
# Get tracked actor to start
|
| 341 |
+
candidate_actors = self._resource_request_to_pending_actors[
|
| 342 |
+
resource_request
|
| 343 |
+
]
|
| 344 |
+
assert candidate_actors
|
| 345 |
+
|
| 346 |
+
tracked_actor = candidate_actors.pop(0)
|
| 347 |
+
|
| 348 |
+
# Get actor class and arguments
|
| 349 |
+
actor_cls, kwargs, _ = self._pending_actors_to_attrs.pop(tracked_actor)
|
| 350 |
+
|
| 351 |
+
if not isinstance(actor_cls, ray.actor.ActorClass):
|
| 352 |
+
actor_cls = ray.remote(actor_cls)
|
| 353 |
+
|
| 354 |
+
# Associate to acquired resources
|
| 355 |
+
[remote_actor_cls] = acquired_resources.annotate_remote_entities(
|
| 356 |
+
[actor_cls]
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
# Start Ray actor
|
| 360 |
+
actor = remote_actor_cls.remote(**kwargs)
|
| 361 |
+
|
| 362 |
+
# Track
|
| 363 |
+
self._live_actors_to_ray_actors_resources[tracked_actor] = (
|
| 364 |
+
actor,
|
| 365 |
+
acquired_resources,
|
| 366 |
+
)
|
| 367 |
+
self._live_resource_cache = None
|
| 368 |
+
|
| 369 |
+
# Schedule ready future
|
| 370 |
+
future = actor.__ray_ready__.remote()
|
| 371 |
+
|
| 372 |
+
self._tracked_actors_to_state_futures[tracked_actor].add(future)
|
| 373 |
+
|
| 374 |
+
# We need to create the callbacks in a function so tracked_actors
|
| 375 |
+
# are captured correctly.
|
| 376 |
+
def create_callbacks(
|
| 377 |
+
tracked_actor: TrackedActor, future: ray.ObjectRef
|
| 378 |
+
):
|
| 379 |
+
def on_actor_start(result: Any):
|
| 380 |
+
self._actor_start_resolved(
|
| 381 |
+
tracked_actor=tracked_actor, future=future
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
def on_error(exception: Exception):
|
| 385 |
+
self._actor_start_failed(
|
| 386 |
+
tracked_actor=tracked_actor, exception=exception
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
return on_actor_start, on_error
|
| 390 |
+
|
| 391 |
+
on_actor_start, on_error = create_callbacks(
|
| 392 |
+
tracked_actor=tracked_actor, future=future
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
self._actor_state_events.track_future(
|
| 396 |
+
future=future,
|
| 397 |
+
on_result=on_actor_start,
|
| 398 |
+
on_error=on_error,
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
self._enqueue_cached_actor_tasks(tracked_actor=tracked_actor)
|
| 402 |
+
|
| 403 |
+
started_actors += 1
|
| 404 |
+
|
| 405 |
+
return started_actors
|
| 406 |
+
|
| 407 |
+
def _enqueue_cached_actor_tasks(self, tracked_actor: TrackedActor):
|
| 408 |
+
assert tracked_actor in self._live_actors_to_ray_actors_resources
|
| 409 |
+
|
| 410 |
+
# Enqueue cached futures
|
| 411 |
+
cached_tasks = self._pending_actors_to_enqueued_actor_tasks.pop(
|
| 412 |
+
tracked_actor, []
|
| 413 |
+
)
|
| 414 |
+
for tracked_actor_task, method_name, args, kwargs in cached_tasks:
|
| 415 |
+
self._schedule_tracked_actor_task(
|
| 416 |
+
tracked_actor_task=tracked_actor_task,
|
| 417 |
+
method_name=method_name,
|
| 418 |
+
args=args,
|
| 419 |
+
kwargs=kwargs,
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
def _try_kill_actor(self) -> bool:
|
| 423 |
+
"""Try to kill actor scheduled for termination."""
|
| 424 |
+
if not self._live_actors_to_kill:
|
| 425 |
+
return False
|
| 426 |
+
|
| 427 |
+
tracked_actor = self._live_actors_to_kill.pop()
|
| 428 |
+
|
| 429 |
+
# Remove from tracked actors
|
| 430 |
+
(
|
| 431 |
+
ray_actor,
|
| 432 |
+
acquired_resources,
|
| 433 |
+
) = self._live_actors_to_ray_actors_resources[tracked_actor]
|
| 434 |
+
|
| 435 |
+
# Hard kill if requested
|
| 436 |
+
ray.kill(ray_actor)
|
| 437 |
+
|
| 438 |
+
self._cleanup_actor_futures(tracked_actor)
|
| 439 |
+
|
| 440 |
+
self._actor_stop_resolved(tracked_actor)
|
| 441 |
+
|
| 442 |
+
return True
|
| 443 |
+
|
| 444 |
+
def _cleanup_actor(self, tracked_actor: TrackedActor):
|
| 445 |
+
self._cleanup_actor_futures(tracked_actor)
|
| 446 |
+
|
| 447 |
+
# Remove from tracked actors
|
| 448 |
+
(
|
| 449 |
+
ray_actor,
|
| 450 |
+
acquired_resources,
|
| 451 |
+
) = self._live_actors_to_ray_actors_resources.pop(tracked_actor)
|
| 452 |
+
self._live_resource_cache = None
|
| 453 |
+
|
| 454 |
+
# Return resources
|
| 455 |
+
self._resource_manager.free_resources(acquired_resource=acquired_resources)
|
| 456 |
+
|
| 457 |
+
@property
|
| 458 |
+
def all_actors(self) -> List[TrackedActor]:
|
| 459 |
+
"""Return all ``TrackedActor`` objects managed by this manager instance."""
|
| 460 |
+
return self.live_actors + self.pending_actors
|
| 461 |
+
|
| 462 |
+
@property
|
| 463 |
+
def live_actors(self) -> List[TrackedActor]:
|
| 464 |
+
"""Return all ``TrackedActor`` objects that are currently alive."""
|
| 465 |
+
return list(self._live_actors_to_ray_actors_resources)
|
| 466 |
+
|
| 467 |
+
@property
|
| 468 |
+
def pending_actors(self) -> List[TrackedActor]:
|
| 469 |
+
"""Return all ``TrackedActor`` objects that are currently pending."""
|
| 470 |
+
return list(self._pending_actors_to_attrs)
|
| 471 |
+
|
| 472 |
+
@property
|
| 473 |
+
def num_live_actors(self):
|
| 474 |
+
"""Return number of started actors."""
|
| 475 |
+
return len(self.live_actors)
|
| 476 |
+
|
| 477 |
+
@property
|
| 478 |
+
def num_pending_actors(self) -> int:
|
| 479 |
+
"""Return number of pending (not yet started) actors."""
|
| 480 |
+
return len(self.pending_actors)
|
| 481 |
+
|
| 482 |
+
@property
|
| 483 |
+
def num_total_actors(self):
|
| 484 |
+
"""Return number of total actors."""
|
| 485 |
+
return len(self.all_actors)
|
| 486 |
+
|
| 487 |
+
@property
|
| 488 |
+
def num_actor_tasks(self):
|
| 489 |
+
"""Return number of pending tasks"""
|
| 490 |
+
return self._actor_task_events.num_futures
|
| 491 |
+
|
| 492 |
+
def get_live_actors_resources(self):
|
| 493 |
+
if self._live_resource_cache:
|
| 494 |
+
return self._live_resource_cache
|
| 495 |
+
|
| 496 |
+
counter = Counter()
|
| 497 |
+
for _, acq in self._live_actors_to_ray_actors_resources.values():
|
| 498 |
+
for bdl in acq.resource_request.bundles:
|
| 499 |
+
counter.update(bdl)
|
| 500 |
+
self._live_resource_cache = dict(counter)
|
| 501 |
+
return self._live_resource_cache
|
| 502 |
+
|
| 503 |
+
def add_actor(
|
| 504 |
+
self,
|
| 505 |
+
cls: Union[Type, ray.actor.ActorClass],
|
| 506 |
+
kwargs: Dict[str, Any],
|
| 507 |
+
resource_request: ResourceRequest,
|
| 508 |
+
*,
|
| 509 |
+
on_start: Optional[Callable[[TrackedActor], None]] = None,
|
| 510 |
+
on_stop: Optional[Callable[[TrackedActor], None]] = None,
|
| 511 |
+
on_error: Optional[Callable[[TrackedActor, Exception], None]] = None,
|
| 512 |
+
) -> TrackedActor:
|
| 513 |
+
"""Add an actor to be tracked.
|
| 514 |
+
|
| 515 |
+
This method will request resources to start the actor. Once the resources
|
| 516 |
+
are available, the actor will be started and the
|
| 517 |
+
:meth:`TrackedActor.on_start
|
| 518 |
+
<ray.air.execution._internal.tracked_actor.TrackedActor.on_start>` callback
|
| 519 |
+
will be invoked.
|
| 520 |
+
|
| 521 |
+
Args:
|
| 522 |
+
cls: Actor class to schedule.
|
| 523 |
+
kwargs: Keyword arguments to pass to actor class on construction.
|
| 524 |
+
resource_request: Resources required to start the actor.
|
| 525 |
+
on_start: Callback to invoke when the actor started.
|
| 526 |
+
on_stop: Callback to invoke when the actor stopped.
|
| 527 |
+
on_error: Callback to invoke when the actor failed.
|
| 528 |
+
|
| 529 |
+
Returns:
|
| 530 |
+
Tracked actor object to reference actor in subsequent API calls.
|
| 531 |
+
|
| 532 |
+
"""
|
| 533 |
+
tracked_actor = TrackedActor(
|
| 534 |
+
uuid.uuid4().int, on_start=on_start, on_stop=on_stop, on_error=on_error
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
self._pending_actors_to_attrs[tracked_actor] = cls, kwargs, resource_request
|
| 538 |
+
self._resource_request_to_pending_actors[resource_request].append(tracked_actor)
|
| 539 |
+
|
| 540 |
+
self._resource_manager.request_resources(resource_request=resource_request)
|
| 541 |
+
|
| 542 |
+
return tracked_actor
|
| 543 |
+
|
| 544 |
+
def remove_actor(
|
| 545 |
+
self,
|
| 546 |
+
tracked_actor: TrackedActor,
|
| 547 |
+
kill: bool = False,
|
| 548 |
+
stop_future: Optional[ray.ObjectRef] = None,
|
| 549 |
+
) -> bool:
|
| 550 |
+
"""Remove a tracked actor.
|
| 551 |
+
|
| 552 |
+
If the actor has already been started, this will stop the actor. This will
|
| 553 |
+
trigger the :meth:`TrackedActor.on_stop
|
| 554 |
+
<ray.air.execution._internal.tracked_actor.TrackedActor.on_stop>`
|
| 555 |
+
callback once the actor stopped.
|
| 556 |
+
|
| 557 |
+
If the actor has only been requested, but not started, yet, this will cancel
|
| 558 |
+
the actor request. This will not trigger any callback.
|
| 559 |
+
|
| 560 |
+
If ``kill=True``, this will use ``ray.kill()`` to forcefully terminate the
|
| 561 |
+
actor. Otherwise, graceful actor deconstruction will be scheduled after
|
| 562 |
+
all currently tracked futures are resolved.
|
| 563 |
+
|
| 564 |
+
This method returns a boolean, indicating if a stop future is tracked and
|
| 565 |
+
the ``on_stop`` callback will be invoked. If the actor has been alive,
|
| 566 |
+
this will be ``True``. If the actor hasn't been scheduled, yet, or failed
|
| 567 |
+
(and triggered the ``on_error`` callback), this will be ``False``.
|
| 568 |
+
|
| 569 |
+
Args:
|
| 570 |
+
tracked_actor: Tracked actor to be removed.
|
| 571 |
+
kill: If set, will forcefully terminate the actor instead of gracefully
|
| 572 |
+
scheduling termination.
|
| 573 |
+
stop_future: If set, use this future to track actor termination.
|
| 574 |
+
Otherwise, schedule a ``__ray_terminate__`` future.
|
| 575 |
+
|
| 576 |
+
Returns:
|
| 577 |
+
Boolean indicating if the actor was previously alive, and thus whether
|
| 578 |
+
a callback will be invoked once it is terminated.
|
| 579 |
+
|
| 580 |
+
"""
|
| 581 |
+
if tracked_actor.actor_id in self._failed_actor_ids:
|
| 582 |
+
logger.debug(
|
| 583 |
+
f"Tracked actor already failed, no need to remove: {tracked_actor}"
|
| 584 |
+
)
|
| 585 |
+
return False
|
| 586 |
+
elif tracked_actor in self._live_actors_to_ray_actors_resources:
|
| 587 |
+
# Ray actor is running.
|
| 588 |
+
|
| 589 |
+
if not kill:
|
| 590 |
+
# Schedule __ray_terminate__ future
|
| 591 |
+
ray_actor, _ = self._live_actors_to_ray_actors_resources[tracked_actor]
|
| 592 |
+
|
| 593 |
+
# Clear state futures here to avoid resolving __ray_ready__ futures
|
| 594 |
+
for future in list(
|
| 595 |
+
self._tracked_actors_to_state_futures[tracked_actor]
|
| 596 |
+
):
|
| 597 |
+
self._actor_state_events.discard_future(future)
|
| 598 |
+
self._tracked_actors_to_state_futures[tracked_actor].remove(future)
|
| 599 |
+
|
| 600 |
+
# If the __ray_ready__ future hasn't resolved yet, but we already
|
| 601 |
+
# scheduled the actor via Actor.remote(), we just want to stop
|
| 602 |
+
# it but not trigger any callbacks. This is in accordance with
|
| 603 |
+
# the contract defined in the docstring.
|
| 604 |
+
tracked_actor._on_start = None
|
| 605 |
+
tracked_actor._on_stop = None
|
| 606 |
+
tracked_actor._on_error = None
|
| 607 |
+
|
| 608 |
+
def on_actor_stop(*args, **kwargs):
|
| 609 |
+
self._actor_stop_resolved(tracked_actor=tracked_actor)
|
| 610 |
+
|
| 611 |
+
if stop_future:
|
| 612 |
+
# If the stop future was schedule via the actor manager,
|
| 613 |
+
# discard (track it as state future instead).
|
| 614 |
+
self._actor_task_events.discard_future(stop_future)
|
| 615 |
+
else:
|
| 616 |
+
stop_future = ray_actor.__ray_terminate__.remote()
|
| 617 |
+
|
| 618 |
+
self._actor_state_events.track_future(
|
| 619 |
+
future=stop_future,
|
| 620 |
+
on_result=on_actor_stop,
|
| 621 |
+
on_error=on_actor_stop,
|
| 622 |
+
)
|
| 623 |
+
|
| 624 |
+
self._tracked_actors_to_state_futures[tracked_actor].add(stop_future)
|
| 625 |
+
else:
|
| 626 |
+
# kill = True
|
| 627 |
+
self._live_actors_to_kill.add(tracked_actor)
|
| 628 |
+
|
| 629 |
+
return True
|
| 630 |
+
|
| 631 |
+
elif tracked_actor in self._pending_actors_to_attrs:
|
| 632 |
+
# Actor is pending, stop
|
| 633 |
+
_, _, resource_request = self._pending_actors_to_attrs.pop(tracked_actor)
|
| 634 |
+
self._resource_request_to_pending_actors[resource_request].remove(
|
| 635 |
+
tracked_actor
|
| 636 |
+
)
|
| 637 |
+
self._resource_manager.cancel_resource_request(
|
| 638 |
+
resource_request=resource_request
|
| 639 |
+
)
|
| 640 |
+
return False
|
| 641 |
+
else:
|
| 642 |
+
raise ValueError(f"Unknown tracked actor: {tracked_actor}")
|
| 643 |
+
|
| 644 |
+
def is_actor_started(self, tracked_actor: TrackedActor) -> bool:
|
| 645 |
+
"""Returns True if the actor has been started.
|
| 646 |
+
|
| 647 |
+
Args:
|
| 648 |
+
tracked_actor: Tracked actor object.
|
| 649 |
+
"""
|
| 650 |
+
return (
|
| 651 |
+
tracked_actor in self._live_actors_to_ray_actors_resources
|
| 652 |
+
and tracked_actor.actor_id not in self._failed_actor_ids
|
| 653 |
+
)
|
| 654 |
+
|
| 655 |
+
def is_actor_failed(self, tracked_actor: TrackedActor) -> bool:
|
| 656 |
+
return tracked_actor.actor_id in self._failed_actor_ids
|
| 657 |
+
|
| 658 |
+
def get_actor_resources(
|
| 659 |
+
self, tracked_actor: TrackedActor
|
| 660 |
+
) -> Optional[AcquiredResources]:
|
| 661 |
+
"""Returns the acquired resources of an actor that has been started.
|
| 662 |
+
|
| 663 |
+
This will return ``None`` if the actor has not been started, yet.
|
| 664 |
+
|
| 665 |
+
Args:
|
| 666 |
+
tracked_actor: Tracked actor object.
|
| 667 |
+
"""
|
| 668 |
+
if not self.is_actor_started(tracked_actor):
|
| 669 |
+
return None
|
| 670 |
+
|
| 671 |
+
return self._live_actors_to_ray_actors_resources[tracked_actor][1]
|
| 672 |
+
|
| 673 |
+
def schedule_actor_task(
|
| 674 |
+
self,
|
| 675 |
+
tracked_actor: TrackedActor,
|
| 676 |
+
method_name: str,
|
| 677 |
+
args: Optional[Tuple] = None,
|
| 678 |
+
kwargs: Optional[Dict] = None,
|
| 679 |
+
on_result: Optional[Callable[[TrackedActor, Any], None]] = None,
|
| 680 |
+
on_error: Optional[Callable[[TrackedActor, Exception], None]] = None,
|
| 681 |
+
_return_future: bool = False,
|
| 682 |
+
) -> Optional[ray.ObjectRef]:
|
| 683 |
+
"""Schedule and track a task on an actor.
|
| 684 |
+
|
| 685 |
+
This method will schedule a remote task ``method_name`` on the
|
| 686 |
+
``tracked_actor``.
|
| 687 |
+
|
| 688 |
+
This method accepts two optional callbacks that will be invoked when
|
| 689 |
+
their respective events are triggered.
|
| 690 |
+
|
| 691 |
+
The ``on_result`` callback is triggered when a task resolves successfully.
|
| 692 |
+
It should accept two arguments: The actor for which the
|
| 693 |
+
task resolved, and the result received from the remote call.
|
| 694 |
+
|
| 695 |
+
The ``on_error`` callback is triggered when a task fails.
|
| 696 |
+
It should accept two arguments: The actor for which the
|
| 697 |
+
task threw an error, and the exception.
|
| 698 |
+
|
| 699 |
+
Args:
|
| 700 |
+
tracked_actor: Actor to schedule task on.
|
| 701 |
+
method_name: Remote method name to invoke on the actor. If this is
|
| 702 |
+
e.g. ``foo``, then ``actor.foo.remote(*args, **kwargs)`` will be
|
| 703 |
+
scheduled.
|
| 704 |
+
args: Arguments to pass to the task.
|
| 705 |
+
kwargs: Keyword arguments to pass to the task.
|
| 706 |
+
on_result: Callback to invoke when the task resolves.
|
| 707 |
+
on_error: Callback to invoke when the task fails.
|
| 708 |
+
|
| 709 |
+
Raises:
|
| 710 |
+
ValueError: If the ``tracked_actor`` is not managed by this event manager.
|
| 711 |
+
|
| 712 |
+
"""
|
| 713 |
+
args = args or tuple()
|
| 714 |
+
kwargs = kwargs or {}
|
| 715 |
+
|
| 716 |
+
if tracked_actor.actor_id in self._failed_actor_ids:
|
| 717 |
+
return
|
| 718 |
+
|
| 719 |
+
tracked_actor_task = TrackedActorTask(
|
| 720 |
+
tracked_actor=tracked_actor, on_result=on_result, on_error=on_error
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
if tracked_actor not in self._live_actors_to_ray_actors_resources:
|
| 724 |
+
# Actor is not started, yet
|
| 725 |
+
if tracked_actor not in self._pending_actors_to_attrs:
|
| 726 |
+
raise ValueError(
|
| 727 |
+
f"Tracked actor is not managed by this event manager: "
|
| 728 |
+
f"{tracked_actor}"
|
| 729 |
+
)
|
| 730 |
+
|
| 731 |
+
# Cache tasks for future execution
|
| 732 |
+
self._pending_actors_to_enqueued_actor_tasks[tracked_actor].append(
|
| 733 |
+
(tracked_actor_task, method_name, args, kwargs)
|
| 734 |
+
)
|
| 735 |
+
else:
|
| 736 |
+
res = self._schedule_tracked_actor_task(
|
| 737 |
+
tracked_actor_task=tracked_actor_task,
|
| 738 |
+
method_name=method_name,
|
| 739 |
+
args=args,
|
| 740 |
+
kwargs=kwargs,
|
| 741 |
+
_return_future=_return_future,
|
| 742 |
+
)
|
| 743 |
+
if _return_future:
|
| 744 |
+
return res[1]
|
| 745 |
+
|
| 746 |
+
def _schedule_tracked_actor_task(
|
| 747 |
+
self,
|
| 748 |
+
tracked_actor_task: TrackedActorTask,
|
| 749 |
+
method_name: str,
|
| 750 |
+
*,
|
| 751 |
+
args: Optional[Tuple] = None,
|
| 752 |
+
kwargs: Optional[Dict] = None,
|
| 753 |
+
_return_future: bool = False,
|
| 754 |
+
) -> Union[TrackedActorTask, Tuple[TrackedActorTask, ray.ObjectRef]]:
|
| 755 |
+
tracked_actor = tracked_actor_task._tracked_actor
|
| 756 |
+
ray_actor, _ = self._live_actors_to_ray_actors_resources[tracked_actor]
|
| 757 |
+
|
| 758 |
+
try:
|
| 759 |
+
remote_fn = getattr(ray_actor, method_name)
|
| 760 |
+
except AttributeError as e:
|
| 761 |
+
raise AttributeError(
|
| 762 |
+
f"Remote function `{method_name}()` does not exist for this actor."
|
| 763 |
+
) from e
|
| 764 |
+
|
| 765 |
+
def on_result(result: Any):
|
| 766 |
+
self._actor_task_resolved(
|
| 767 |
+
tracked_actor_task=tracked_actor_task, result=result
|
| 768 |
+
)
|
| 769 |
+
|
| 770 |
+
def on_error(exception: Exception):
|
| 771 |
+
self._actor_task_failed(
|
| 772 |
+
tracked_actor_task=tracked_actor_task, exception=exception
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
future = remote_fn.remote(*args, **kwargs)
|
| 776 |
+
|
| 777 |
+
self._actor_task_events.track_future(
|
| 778 |
+
future=future, on_result=on_result, on_error=on_error
|
| 779 |
+
)
|
| 780 |
+
|
| 781 |
+
self._tracked_actors_to_task_futures[tracked_actor].add(future)
|
| 782 |
+
|
| 783 |
+
if _return_future:
|
| 784 |
+
return tracked_actor_task, future
|
| 785 |
+
|
| 786 |
+
return tracked_actor_task
|
| 787 |
+
|
| 788 |
+
def schedule_actor_tasks(
|
| 789 |
+
self,
|
| 790 |
+
tracked_actors: List[TrackedActor],
|
| 791 |
+
method_name: str,
|
| 792 |
+
*,
|
| 793 |
+
args: Optional[Union[Tuple, List[Tuple]]] = None,
|
| 794 |
+
kwargs: Optional[Union[Dict, List[Dict]]] = None,
|
| 795 |
+
on_result: Optional[Callable[[TrackedActor, Any], None]] = None,
|
| 796 |
+
on_error: Optional[Callable[[TrackedActor, Exception], None]] = None,
|
| 797 |
+
) -> None:
|
| 798 |
+
"""Schedule and track tasks on a list of actors.
|
| 799 |
+
|
| 800 |
+
This method will schedule a remote task ``method_name`` on all
|
| 801 |
+
``tracked_actors``.
|
| 802 |
+
|
| 803 |
+
``args`` and ``kwargs`` can be a single tuple/dict, in which case the same
|
| 804 |
+
(keyword) arguments are passed to all actors. If a list is passed instead,
|
| 805 |
+
they are mapped to the respective actors. In that case, the list of
|
| 806 |
+
(keyword) arguments must be the same length as the list of actors.
|
| 807 |
+
|
| 808 |
+
This method accepts two optional callbacks that will be invoked when
|
| 809 |
+
their respective events are triggered.
|
| 810 |
+
|
| 811 |
+
The ``on_result`` callback is triggered when a task resolves successfully.
|
| 812 |
+
It should accept two arguments: The actor for which the
|
| 813 |
+
task resolved, and the result received from the remote call.
|
| 814 |
+
|
| 815 |
+
The ``on_error`` callback is triggered when a task fails.
|
| 816 |
+
It should accept two arguments: The actor for which the
|
| 817 |
+
task threw an error, and the exception.
|
| 818 |
+
|
| 819 |
+
Args:
|
| 820 |
+
tracked_actors: List of actors to schedule tasks on.
|
| 821 |
+
method_name: Remote actor method to invoke on the actors. If this is
|
| 822 |
+
e.g. ``foo``, then ``actor.foo.remote(*args, **kwargs)`` will be
|
| 823 |
+
scheduled on all actors.
|
| 824 |
+
args: Arguments to pass to the task.
|
| 825 |
+
kwargs: Keyword arguments to pass to the task.
|
| 826 |
+
on_result: Callback to invoke when the task resolves.
|
| 827 |
+
on_error: Callback to invoke when the task fails.
|
| 828 |
+
|
| 829 |
+
"""
|
| 830 |
+
if not isinstance(args, List):
|
| 831 |
+
args_list = [args] * len(tracked_actors)
|
| 832 |
+
else:
|
| 833 |
+
if len(tracked_actors) != len(args):
|
| 834 |
+
raise ValueError(
|
| 835 |
+
f"Length of args must be the same as tracked_actors "
|
| 836 |
+
f"list. Got `len(kwargs)={len(kwargs)}` and "
|
| 837 |
+
f"`len(tracked_actors)={len(tracked_actors)}"
|
| 838 |
+
)
|
| 839 |
+
args_list = args
|
| 840 |
+
|
| 841 |
+
if not isinstance(kwargs, List):
|
| 842 |
+
kwargs_list = [kwargs] * len(tracked_actors)
|
| 843 |
+
else:
|
| 844 |
+
if len(tracked_actors) != len(kwargs):
|
| 845 |
+
raise ValueError(
|
| 846 |
+
f"Length of kwargs must be the same as tracked_actors "
|
| 847 |
+
f"list. Got `len(args)={len(args)}` and "
|
| 848 |
+
f"`len(tracked_actors)={len(tracked_actors)}"
|
| 849 |
+
)
|
| 850 |
+
kwargs_list = kwargs
|
| 851 |
+
|
| 852 |
+
for tracked_actor, args, kwargs in zip(tracked_actors, args_list, kwargs_list):
|
| 853 |
+
self.schedule_actor_task(
|
| 854 |
+
tracked_actor=tracked_actor,
|
| 855 |
+
method_name=method_name,
|
| 856 |
+
args=args,
|
| 857 |
+
kwargs=kwargs,
|
| 858 |
+
on_result=on_result,
|
| 859 |
+
on_error=on_error,
|
| 860 |
+
)
|
| 861 |
+
|
| 862 |
+
def clear_actor_task_futures(self, tracked_actor: TrackedActor):
|
| 863 |
+
"""Discard all actor task futures from a tracked actor."""
|
| 864 |
+
futures = self._tracked_actors_to_task_futures.pop(tracked_actor, [])
|
| 865 |
+
for future in futures:
|
| 866 |
+
self._actor_task_events.discard_future(future)
|
| 867 |
+
|
| 868 |
+
def _cleanup_actor_futures(self, tracked_actor: TrackedActor):
|
| 869 |
+
# Remove all actor task futures
|
| 870 |
+
self.clear_actor_task_futures(tracked_actor=tracked_actor)
|
| 871 |
+
|
| 872 |
+
# Remove all actor state futures
|
| 873 |
+
futures = self._tracked_actors_to_state_futures.pop(tracked_actor, [])
|
| 874 |
+
for future in futures:
|
| 875 |
+
self._actor_state_events.discard_future(future)
|
| 876 |
+
|
| 877 |
+
def cleanup(self):
|
| 878 |
+
for (
|
| 879 |
+
actor,
|
| 880 |
+
acquired_resources,
|
| 881 |
+
) in self._live_actors_to_ray_actors_resources.values():
|
| 882 |
+
ray.kill(actor)
|
| 883 |
+
self._resource_manager.free_resources(acquired_resources)
|
| 884 |
+
|
| 885 |
+
for (
|
| 886 |
+
resource_request,
|
| 887 |
+
pending_actors,
|
| 888 |
+
) in self._resource_request_to_pending_actors.items():
|
| 889 |
+
for i in range(len(pending_actors)):
|
| 890 |
+
self._resource_manager.cancel_resource_request(resource_request)
|
| 891 |
+
|
| 892 |
+
self._resource_manager.clear()
|
| 893 |
+
|
| 894 |
+
self.__init__(resource_manager=self._resource_manager)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/barrier.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, List, Optional, Tuple
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class Barrier:
|
| 5 |
+
"""Barrier to collect results and process them in bulk.
|
| 6 |
+
|
| 7 |
+
A barrier can be used to collect multiple results and process them in bulk once
|
| 8 |
+
a certain count or a timeout is reached.
|
| 9 |
+
|
| 10 |
+
For instance, if ``max_results=N``, the ``on_completion`` callback will be
|
| 11 |
+
invoked once :meth:`arrive` has been called ``N`` times.
|
| 12 |
+
|
| 13 |
+
The completion callback will only be invoked once, even if more results
|
| 14 |
+
arrive after completion. The collected results can be resetted
|
| 15 |
+
with :meth:`reset`, after which the callback may be invoked again.
|
| 16 |
+
|
| 17 |
+
The completion callback should expect one argument, which is the barrier
|
| 18 |
+
object that completed.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
max_results: Maximum number of results to collect before a call to
|
| 22 |
+
:meth:`wait` resolves or the :meth:`on_completion` callback is invoked.
|
| 23 |
+
on_completion: Callback to invoke when ``max_results`` results
|
| 24 |
+
arrived at the barrier.
|
| 25 |
+
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
def __init__(
|
| 29 |
+
self,
|
| 30 |
+
max_results: int,
|
| 31 |
+
*,
|
| 32 |
+
on_completion: Optional[Callable[["Barrier"], None]] = None,
|
| 33 |
+
):
|
| 34 |
+
self._max_results = max_results
|
| 35 |
+
|
| 36 |
+
# on_completion callback
|
| 37 |
+
self._completed = False
|
| 38 |
+
self._on_completion = on_completion
|
| 39 |
+
|
| 40 |
+
# Collect received results
|
| 41 |
+
self._results: List[Tuple[Any]] = []
|
| 42 |
+
|
| 43 |
+
def arrive(self, *data):
|
| 44 |
+
"""Notify barrier that a result successfully arrived.
|
| 45 |
+
|
| 46 |
+
This will count against the ``max_results`` limit. The received result
|
| 47 |
+
will be included in a call to :meth:`get_results`.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
*data: Result data to be cached. Can be obtained via :meth:`get_results`.
|
| 51 |
+
|
| 52 |
+
"""
|
| 53 |
+
if len(data) == 1:
|
| 54 |
+
data = data[0]
|
| 55 |
+
|
| 56 |
+
self._results.append(data)
|
| 57 |
+
self._check_completion()
|
| 58 |
+
|
| 59 |
+
def _check_completion(self):
|
| 60 |
+
if self._completed:
|
| 61 |
+
# Already fired completion callback
|
| 62 |
+
return
|
| 63 |
+
|
| 64 |
+
if self.num_results >= self._max_results:
|
| 65 |
+
# Barrier is complete
|
| 66 |
+
self._completed = True
|
| 67 |
+
|
| 68 |
+
if self._on_completion:
|
| 69 |
+
self._on_completion(self)
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def completed(self) -> bool:
|
| 73 |
+
"""Returns True if the barrier is completed."""
|
| 74 |
+
return self._completed
|
| 75 |
+
|
| 76 |
+
@property
|
| 77 |
+
def num_results(self) -> int:
|
| 78 |
+
"""Number of received (successful) results."""
|
| 79 |
+
return len(self._results)
|
| 80 |
+
|
| 81 |
+
def get_results(self) -> List[Tuple[Any]]:
|
| 82 |
+
"""Return list of received results."""
|
| 83 |
+
return self._results
|
| 84 |
+
|
| 85 |
+
def reset(self) -> None:
|
| 86 |
+
"""Reset barrier, removing all received results.
|
| 87 |
+
|
| 88 |
+
Resetting the barrier will reset the completion status. When ``max_results``
|
| 89 |
+
is set and enough new events arrive after resetting, the
|
| 90 |
+
:meth:`on_completion` callback will be invoked again.
|
| 91 |
+
"""
|
| 92 |
+
self._completed = False
|
| 93 |
+
self._results = []
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/event_manager.py
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
from typing import Any, Callable, Dict, Iterable, Optional, Set, Tuple, Union
|
| 3 |
+
|
| 4 |
+
import ray
|
| 5 |
+
|
| 6 |
+
_ResultCallback = Callable[[Any], None]
|
| 7 |
+
_ErrorCallback = Callable[[Exception], None]
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class RayEventManager:
|
| 11 |
+
"""Event manager for Ray futures.
|
| 12 |
+
|
| 13 |
+
The event manager can be used to track futures and invoke callbacks when
|
| 14 |
+
they resolve.
|
| 15 |
+
|
| 16 |
+
Futures are tracked with :meth:`track_future`. Future can then be awaited with
|
| 17 |
+
:meth:`wait`. When futures successfully resolve, they trigger an optional
|
| 18 |
+
``on_result`` callback that can be passed to :meth:`track_future`. If they
|
| 19 |
+
fail, they trigger an optional ``on_error`` callback.
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
shuffle_futures: If True, futures will be shuffled before awaited. This
|
| 23 |
+
will avoid implicit prioritization of futures within Ray.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, shuffle_futures: bool = True):
|
| 27 |
+
self._shuffle_futures = shuffle_futures
|
| 28 |
+
|
| 29 |
+
# Map of futures to callbacks (result, error)
|
| 30 |
+
self._tracked_futures: Dict[
|
| 31 |
+
ray.ObjectRef, Tuple[Optional[_ResultCallback], Optional[_ErrorCallback]]
|
| 32 |
+
] = {}
|
| 33 |
+
|
| 34 |
+
def track_future(
|
| 35 |
+
self,
|
| 36 |
+
future: ray.ObjectRef,
|
| 37 |
+
on_result: Optional[_ResultCallback] = None,
|
| 38 |
+
on_error: Optional[_ErrorCallback] = None,
|
| 39 |
+
):
|
| 40 |
+
"""Track a single future and invoke callbacks on resolution.
|
| 41 |
+
|
| 42 |
+
Control has to be yielded to the event manager for the callbacks to
|
| 43 |
+
be invoked, either via :meth:`wait` or via :meth:`resolve_future`.
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
future: Ray future to await.
|
| 47 |
+
on_result: Callback to invoke when the future resolves successfully.
|
| 48 |
+
on_error: Callback to invoke when the future fails.
|
| 49 |
+
|
| 50 |
+
"""
|
| 51 |
+
self._tracked_futures[future] = (on_result, on_error)
|
| 52 |
+
|
| 53 |
+
def track_futures(
|
| 54 |
+
self,
|
| 55 |
+
futures: Iterable[ray.ObjectRef],
|
| 56 |
+
on_result: Optional[_ResultCallback] = None,
|
| 57 |
+
on_error: Optional[_ErrorCallback] = None,
|
| 58 |
+
):
|
| 59 |
+
"""Track multiple futures and invoke callbacks on resolution.
|
| 60 |
+
|
| 61 |
+
Control has to be yielded to the event manager for the callbacks to
|
| 62 |
+
be invoked, either via :meth:`wait` or via :meth:`resolve_future`.
|
| 63 |
+
|
| 64 |
+
Args:
|
| 65 |
+
futures: Ray futures to await.
|
| 66 |
+
on_result: Callback to invoke when the future resolves successfully.
|
| 67 |
+
on_error: Callback to invoke when the future fails.
|
| 68 |
+
|
| 69 |
+
"""
|
| 70 |
+
for future in futures:
|
| 71 |
+
self.track_future(future, on_result=on_result, on_error=on_error)
|
| 72 |
+
|
| 73 |
+
def discard_future(self, future: ray.ObjectRef):
|
| 74 |
+
"""Remove future from tracking.
|
| 75 |
+
|
| 76 |
+
The future will not be awaited anymore, and it will not trigger any callbacks.
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
future: Ray futures to discard.
|
| 80 |
+
"""
|
| 81 |
+
self._tracked_futures.pop(future, None)
|
| 82 |
+
|
| 83 |
+
def get_futures(self) -> Set[ray.ObjectRef]:
|
| 84 |
+
"""Get futures tracked by the event manager."""
|
| 85 |
+
return set(self._tracked_futures)
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def num_futures(self) -> int:
|
| 89 |
+
return len(self._tracked_futures)
|
| 90 |
+
|
| 91 |
+
def resolve_future(self, future: ray.ObjectRef):
|
| 92 |
+
"""Resolve a single future.
|
| 93 |
+
|
| 94 |
+
This method will block until the future is available. It will then
|
| 95 |
+
trigger the callback associated to the future and the event (success
|
| 96 |
+
or error), if specified.
|
| 97 |
+
|
| 98 |
+
Args:
|
| 99 |
+
future: Ray future to resolve.
|
| 100 |
+
|
| 101 |
+
"""
|
| 102 |
+
try:
|
| 103 |
+
on_result, on_error = self._tracked_futures.pop(future)
|
| 104 |
+
except KeyError as e:
|
| 105 |
+
raise ValueError(
|
| 106 |
+
f"Future {future} is not tracked by this RayEventManager"
|
| 107 |
+
) from e
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
result = ray.get(future)
|
| 111 |
+
except Exception as e:
|
| 112 |
+
if on_error:
|
| 113 |
+
on_error(e)
|
| 114 |
+
else:
|
| 115 |
+
raise e
|
| 116 |
+
else:
|
| 117 |
+
if on_result:
|
| 118 |
+
on_result(result)
|
| 119 |
+
|
| 120 |
+
def wait(
|
| 121 |
+
self,
|
| 122 |
+
timeout: Optional[Union[float, int]] = None,
|
| 123 |
+
num_results: Optional[int] = 1,
|
| 124 |
+
):
|
| 125 |
+
"""Wait up to ``timeout`` seconds for ``num_results`` futures to resolve.
|
| 126 |
+
|
| 127 |
+
If ``timeout=None``, this method will block until all `num_results`` futures
|
| 128 |
+
resolve. If ``num_results=None``, this method will await all tracked futures.
|
| 129 |
+
|
| 130 |
+
For every future that resolves, the respective associated callbacks will be
|
| 131 |
+
invoked.
|
| 132 |
+
|
| 133 |
+
Args:
|
| 134 |
+
timeout: Timeout in second to wait for futures to resolve.
|
| 135 |
+
num_results: Number of futures to await. If ``None``, will wait for
|
| 136 |
+
all tracked futures to resolve.
|
| 137 |
+
|
| 138 |
+
"""
|
| 139 |
+
futures = list(self.get_futures())
|
| 140 |
+
|
| 141 |
+
if self._shuffle_futures:
|
| 142 |
+
random.shuffle(futures)
|
| 143 |
+
|
| 144 |
+
num_results = num_results or len(futures)
|
| 145 |
+
|
| 146 |
+
ready, _ = ray.wait(list(futures), timeout=timeout, num_returns=num_results)
|
| 147 |
+
for future in ready:
|
| 148 |
+
self.resolve_future(future)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/tracked_actor.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Callable, Optional
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class TrackedActor:
|
| 5 |
+
"""Actor tracked by an actor manager.
|
| 6 |
+
|
| 7 |
+
This object is used to reference a Ray actor on an actor manager
|
| 8 |
+
|
| 9 |
+
Existence of this object does not mean that the Ray actor has already been started.
|
| 10 |
+
Actor state can be inquired from the actor manager tracking the Ray actor.
|
| 11 |
+
|
| 12 |
+
Note:
|
| 13 |
+
Objects of this class are returned by the :class:`RayActorManager`.
|
| 14 |
+
This class should not be instantiated manually.
|
| 15 |
+
|
| 16 |
+
Attributes:
|
| 17 |
+
actor_id: ID for identification of the actor within the actor manager. This
|
| 18 |
+
ID is not related to the Ray actor ID.
|
| 19 |
+
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
actor_id: int,
|
| 25 |
+
on_start: Optional[Callable[["TrackedActor"], None]] = None,
|
| 26 |
+
on_stop: Optional[Callable[["TrackedActor"], None]] = None,
|
| 27 |
+
on_error: Optional[Callable[["TrackedActor", Exception], None]] = None,
|
| 28 |
+
):
|
| 29 |
+
self.actor_id = actor_id
|
| 30 |
+
self._on_start = on_start
|
| 31 |
+
self._on_stop = on_stop
|
| 32 |
+
self._on_error = on_error
|
| 33 |
+
|
| 34 |
+
def set_on_start(self, on_start: Optional[Callable[["TrackedActor"], None]]):
|
| 35 |
+
self._on_start = on_start
|
| 36 |
+
|
| 37 |
+
def set_on_stop(self, on_stop: Optional[Callable[["TrackedActor"], None]]):
|
| 38 |
+
self._on_stop = on_stop
|
| 39 |
+
|
| 40 |
+
def set_on_error(
|
| 41 |
+
self, on_error: Optional[Callable[["TrackedActor", Exception], None]]
|
| 42 |
+
):
|
| 43 |
+
self._on_error = on_error
|
| 44 |
+
|
| 45 |
+
def __repr__(self):
|
| 46 |
+
return f"<TrackedActor {self.actor_id}>"
|
| 47 |
+
|
| 48 |
+
def __eq__(self, other):
|
| 49 |
+
if not isinstance(other, self.__class__):
|
| 50 |
+
return False
|
| 51 |
+
return self.actor_id == other.actor_id
|
| 52 |
+
|
| 53 |
+
def __hash__(self):
|
| 54 |
+
return hash(self.actor_id)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/_internal/tracked_actor_task.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Callable, Optional
|
| 2 |
+
|
| 3 |
+
from ray.air.execution._internal.tracked_actor import TrackedActor
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class TrackedActorTask:
|
| 7 |
+
"""Actor task tracked by a Ray event manager.
|
| 8 |
+
|
| 9 |
+
This container class is used to define callbacks to be invoked when
|
| 10 |
+
the task resolves, errors, or times out.
|
| 11 |
+
|
| 12 |
+
Note:
|
| 13 |
+
Objects of this class are returned by the :class:`RayActorManager`.
|
| 14 |
+
This class should not be instantiated manually.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
tracked_actor: Tracked actor object this task is scheduled on.
|
| 18 |
+
on_result: Callback to invoke when the task resolves.
|
| 19 |
+
on_error: Callback to invoke when the task fails.
|
| 20 |
+
|
| 21 |
+
Example:
|
| 22 |
+
|
| 23 |
+
.. code-block:: python
|
| 24 |
+
|
| 25 |
+
tracked_futures = actor_manager.schedule_actor_tasks(
|
| 26 |
+
actor_manager.live_actors,
|
| 27 |
+
"foo",
|
| 28 |
+
on_result=lambda actor, result: print(result)
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
tracked_actor: TrackedActor,
|
| 36 |
+
on_result: Optional[Callable[[TrackedActor, Any], None]] = None,
|
| 37 |
+
on_error: Optional[Callable[[TrackedActor, Exception], None]] = None,
|
| 38 |
+
):
|
| 39 |
+
self._tracked_actor = tracked_actor
|
| 40 |
+
|
| 41 |
+
self._on_result = on_result
|
| 42 |
+
self._on_error = on_error
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (593 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/__pycache__/placement_group.cpython-310.pyc
ADDED
|
Binary file (6.53 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/execution/resources/resource_manager.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import abc
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
|
| 4 |
+
import ray
|
| 5 |
+
from ray.air.execution.resources.request import AcquiredResources, ResourceRequest
|
| 6 |
+
from ray.util.annotations import DeveloperAPI
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
@DeveloperAPI
|
| 10 |
+
class ResourceManager(abc.ABC):
|
| 11 |
+
"""Resource manager interface.
|
| 12 |
+
|
| 13 |
+
A resource manager can be used to request resources from a Ray cluster and
|
| 14 |
+
allocate them to remote Ray tasks or actors.
|
| 15 |
+
|
| 16 |
+
Resources have to be requested before they can be acquired.
|
| 17 |
+
|
| 18 |
+
Resources managed by the resource manager can be in three states:
|
| 19 |
+
|
| 20 |
+
1. "Requested": The resources have been requested but are not yet available to
|
| 21 |
+
schedule remote Ray objects. The resource request may trigger autoscaling,
|
| 22 |
+
and can be cancelled if no longer needed.
|
| 23 |
+
2. "Ready": The requested resources are now available to schedule remote Ray
|
| 24 |
+
objects. They can be acquired and subsequently used remote Ray objects.
|
| 25 |
+
The resource request can still be cancelled if no longer needed.
|
| 26 |
+
3. "Acquired": The resources have been acquired by a caller to use for scheduling
|
| 27 |
+
remote Ray objects. Note that it is the responsibility of the caller to
|
| 28 |
+
schedule the Ray objects with these resources.
|
| 29 |
+
The associated resource request has been completed and can no longer be
|
| 30 |
+
cancelled. The acquired resources can be freed by the resource manager when
|
| 31 |
+
they are no longer used.
|
| 32 |
+
|
| 33 |
+
The flow is as follows:
|
| 34 |
+
|
| 35 |
+
.. code-block:: python
|
| 36 |
+
|
| 37 |
+
# Create resource manager
|
| 38 |
+
resource_manager = ResourceManager()
|
| 39 |
+
|
| 40 |
+
# Create resource request
|
| 41 |
+
resource_request = ResourceRequest([{"CPU": 4}])
|
| 42 |
+
|
| 43 |
+
# Pass to resource manager
|
| 44 |
+
resource_manager.request_resources(resource_request)
|
| 45 |
+
|
| 46 |
+
# Wait until ready
|
| 47 |
+
while not resource_manager.has_resources_ready(resource_request):
|
| 48 |
+
time.sleep(1)
|
| 49 |
+
|
| 50 |
+
# Once ready, acquire resources
|
| 51 |
+
acquired_resource = resource_manager.acquire_resources(resource_request)
|
| 52 |
+
|
| 53 |
+
# Bind to remote task or actor
|
| 54 |
+
annotated_remote_fn = acquired_resource.annotate_remote_entities(
|
| 55 |
+
[remote_fn])
|
| 56 |
+
|
| 57 |
+
# Run remote function. This will use the acquired resources
|
| 58 |
+
ray.get(annotated_remote_fn.remote())
|
| 59 |
+
|
| 60 |
+
# After using the resources, free
|
| 61 |
+
resource_manager.free_resources(annotated_resources)
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
def request_resources(self, resource_request: ResourceRequest):
|
| 66 |
+
"""Request resources.
|
| 67 |
+
|
| 68 |
+
Depending on the backend, resources can trigger autoscaling. Requested
|
| 69 |
+
resources can be ready or not ready. Once they are "ready", they can
|
| 70 |
+
be acquired and used by remote Ray objects.
|
| 71 |
+
|
| 72 |
+
Resource requests can be cancelled anytime using ``cancel_resource_request()``.
|
| 73 |
+
Once acquired, the resource request is removed. Acquired resources can be
|
| 74 |
+
freed with ``free_resources()``.
|
| 75 |
+
"""
|
| 76 |
+
raise NotImplementedError
|
| 77 |
+
|
| 78 |
+
def cancel_resource_request(self, resource_request: ResourceRequest):
|
| 79 |
+
"""Cancel resource request.
|
| 80 |
+
|
| 81 |
+
Resource requests can be cancelled anytime before a resource is acquired.
|
| 82 |
+
Acquiring a resource will remove the associated resource request.
|
| 83 |
+
Acquired resources can be freed with ``free_resources()``.
|
| 84 |
+
"""
|
| 85 |
+
raise NotImplementedError
|
| 86 |
+
|
| 87 |
+
def has_resources_ready(self, resource_request: ResourceRequest) -> bool:
|
| 88 |
+
"""Returns True if resources for the given request are ready to be acquired."""
|
| 89 |
+
raise NotImplementedError
|
| 90 |
+
|
| 91 |
+
def acquire_resources(
|
| 92 |
+
self, resource_request: ResourceRequest
|
| 93 |
+
) -> Optional[AcquiredResources]:
|
| 94 |
+
"""Acquire resources. Returns None if resources are not ready to be acquired.
|
| 95 |
+
|
| 96 |
+
Acquiring resources will remove the associated resource request.
|
| 97 |
+
Acquired resources can be returned with ``free_resources()``.
|
| 98 |
+
"""
|
| 99 |
+
raise NotImplementedError
|
| 100 |
+
|
| 101 |
+
def free_resources(self, acquired_resource: AcquiredResources):
|
| 102 |
+
"""Free acquired resources from usage and return them to the resource manager.
|
| 103 |
+
|
| 104 |
+
Freeing resources will return the resources to the manager, but there are
|
| 105 |
+
no guarantees about the tasks and actors scheduled on the resources. The caller
|
| 106 |
+
should make sure that any references to tasks or actors scheduled on the
|
| 107 |
+
resources have been removed before calling ``free_resources()``.
|
| 108 |
+
"""
|
| 109 |
+
raise NotImplementedError
|
| 110 |
+
|
| 111 |
+
def get_resource_futures(self) -> List[ray.ObjectRef]:
|
| 112 |
+
"""Return futures for resources to await.
|
| 113 |
+
|
| 114 |
+
Depending on the backend, we use resource futures to determine availability
|
| 115 |
+
of resources (e.g. placement groups) or resolution of requests.
|
| 116 |
+
In this case, the futures can be awaited externally by the caller.
|
| 117 |
+
|
| 118 |
+
When a resource future resolved, the caller may call ``update_state()``
|
| 119 |
+
to force the resource manager to update its internal state immediately.
|
| 120 |
+
"""
|
| 121 |
+
return []
|
| 122 |
+
|
| 123 |
+
def update_state(self):
|
| 124 |
+
"""Update internal state of the resource manager.
|
| 125 |
+
|
| 126 |
+
The resource manager may have internal state that needs periodic updating.
|
| 127 |
+
For instance, depending on the backend, resource futures can be awaited
|
| 128 |
+
externally (with ``get_resource_futures()``).
|
| 129 |
+
|
| 130 |
+
If such a future resolved, the caller can instruct the resource
|
| 131 |
+
manager to update its internal state immediately.
|
| 132 |
+
"""
|
| 133 |
+
pass
|
| 134 |
+
|
| 135 |
+
def clear(self):
|
| 136 |
+
"""Reset internal state and clear all resources.
|
| 137 |
+
|
| 138 |
+
Calling this method will reset the resource manager to its initialization state.
|
| 139 |
+
All resources will be removed.
|
| 140 |
+
|
| 141 |
+
Clearing the state will remove tracked resources from the manager, but there are
|
| 142 |
+
no guarantees about the tasks and actors scheduled on the resources. The caller
|
| 143 |
+
should make sure that any references to tasks or actors scheduled on the
|
| 144 |
+
resources have been removed before calling ``clear()``.
|
| 145 |
+
"""
|
| 146 |
+
raise NotImplementedError
|
| 147 |
+
|
| 148 |
+
def __reduce__(self):
|
| 149 |
+
"""We disallow serialization.
|
| 150 |
+
|
| 151 |
+
Shared resource managers should live on an actor.
|
| 152 |
+
"""
|
| 153 |
+
raise ValueError(
|
| 154 |
+
f"Resource managers cannot be serialized. Resource manager: {str(self)}"
|
| 155 |
+
)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (178 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/comet.cpython-310.pyc
ADDED
|
Binary file (7.91 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/keras.cpython-310.pyc
ADDED
|
Binary file (6.96 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/mlflow.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/__pycache__/wandb.cpython-310.pyc
ADDED
|
Binary file (21.4 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/comet.py
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from typing import Dict, List
|
| 4 |
+
|
| 5 |
+
import pyarrow.fs
|
| 6 |
+
|
| 7 |
+
from ray.tune.experiment import Trial
|
| 8 |
+
from ray.tune.logger import LoggerCallback
|
| 9 |
+
from ray.tune.utils import flatten_dict
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def _import_comet():
|
| 13 |
+
"""Try importing comet_ml.
|
| 14 |
+
|
| 15 |
+
Used to check if comet_ml is installed and, otherwise, pass an informative
|
| 16 |
+
error message.
|
| 17 |
+
"""
|
| 18 |
+
if "COMET_DISABLE_AUTO_LOGGING" not in os.environ:
|
| 19 |
+
os.environ["COMET_DISABLE_AUTO_LOGGING"] = "1"
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
import comet_ml # noqa: F401
|
| 23 |
+
except ImportError:
|
| 24 |
+
raise RuntimeError("pip install 'comet-ml' to use CometLoggerCallback")
|
| 25 |
+
|
| 26 |
+
return comet_ml
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
class CometLoggerCallback(LoggerCallback):
|
| 30 |
+
"""CometLoggerCallback for logging Tune results to Comet.
|
| 31 |
+
|
| 32 |
+
Comet (https://comet.ml/site/) is a tool to manage and optimize the
|
| 33 |
+
entire ML lifecycle, from experiment tracking, model optimization
|
| 34 |
+
and dataset versioning to model production monitoring.
|
| 35 |
+
|
| 36 |
+
This Ray Tune ``LoggerCallback`` sends metrics and parameters to
|
| 37 |
+
Comet for tracking.
|
| 38 |
+
|
| 39 |
+
In order to use the CometLoggerCallback you must first install Comet
|
| 40 |
+
via ``pip install comet_ml``
|
| 41 |
+
|
| 42 |
+
Then set the following environment variables
|
| 43 |
+
``export COMET_API_KEY=<Your API Key>``
|
| 44 |
+
|
| 45 |
+
Alternatively, you can also pass in your API Key as an argument to the
|
| 46 |
+
CometLoggerCallback constructor.
|
| 47 |
+
|
| 48 |
+
``CometLoggerCallback(api_key=<Your API Key>)``
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
online: Whether to make use of an Online or
|
| 52 |
+
Offline Experiment. Defaults to True.
|
| 53 |
+
tags: Tags to add to the logged Experiment.
|
| 54 |
+
Defaults to None.
|
| 55 |
+
save_checkpoints: If ``True``, model checkpoints will be saved to
|
| 56 |
+
Comet ML as artifacts. Defaults to ``False``.
|
| 57 |
+
**experiment_kwargs: Other keyword arguments will be passed to the
|
| 58 |
+
constructor for comet_ml.Experiment (or OfflineExperiment if
|
| 59 |
+
online=False).
|
| 60 |
+
|
| 61 |
+
Please consult the Comet ML documentation for more information on the
|
| 62 |
+
Experiment and OfflineExperiment classes: https://comet.ml/site/
|
| 63 |
+
|
| 64 |
+
Example:
|
| 65 |
+
|
| 66 |
+
.. code-block:: python
|
| 67 |
+
|
| 68 |
+
from ray.air.integrations.comet import CometLoggerCallback
|
| 69 |
+
tune.run(
|
| 70 |
+
train,
|
| 71 |
+
config=config
|
| 72 |
+
callbacks=[CometLoggerCallback(
|
| 73 |
+
True,
|
| 74 |
+
['tag1', 'tag2'],
|
| 75 |
+
workspace='my_workspace',
|
| 76 |
+
project_name='my_project_name'
|
| 77 |
+
)]
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
"""
|
| 81 |
+
|
| 82 |
+
# Do not enable these auto log options unless overridden
|
| 83 |
+
_exclude_autolog = [
|
| 84 |
+
"auto_output_logging",
|
| 85 |
+
"log_git_metadata",
|
| 86 |
+
"log_git_patch",
|
| 87 |
+
"log_env_cpu",
|
| 88 |
+
"log_env_gpu",
|
| 89 |
+
]
|
| 90 |
+
|
| 91 |
+
# Do not log these metrics.
|
| 92 |
+
_exclude_results = ["done", "should_checkpoint"]
|
| 93 |
+
|
| 94 |
+
# These values should be logged as system info instead of metrics.
|
| 95 |
+
_system_results = ["node_ip", "hostname", "pid", "date"]
|
| 96 |
+
|
| 97 |
+
# These values should be logged as "Other" instead of as metrics.
|
| 98 |
+
_other_results = ["trial_id", "experiment_id", "experiment_tag"]
|
| 99 |
+
|
| 100 |
+
_episode_results = ["hist_stats/episode_reward", "hist_stats/episode_lengths"]
|
| 101 |
+
|
| 102 |
+
def __init__(
|
| 103 |
+
self,
|
| 104 |
+
online: bool = True,
|
| 105 |
+
tags: List[str] = None,
|
| 106 |
+
save_checkpoints: bool = False,
|
| 107 |
+
**experiment_kwargs,
|
| 108 |
+
):
|
| 109 |
+
_import_comet()
|
| 110 |
+
self.online = online
|
| 111 |
+
self.tags = tags
|
| 112 |
+
self.save_checkpoints = save_checkpoints
|
| 113 |
+
self.experiment_kwargs = experiment_kwargs
|
| 114 |
+
|
| 115 |
+
# Disable the specific autologging features that cause throttling.
|
| 116 |
+
self._configure_experiment_defaults()
|
| 117 |
+
|
| 118 |
+
# Mapping from trial to experiment object.
|
| 119 |
+
self._trial_experiments = {}
|
| 120 |
+
|
| 121 |
+
self._to_exclude = self._exclude_results.copy()
|
| 122 |
+
self._to_system = self._system_results.copy()
|
| 123 |
+
self._to_other = self._other_results.copy()
|
| 124 |
+
self._to_episodes = self._episode_results.copy()
|
| 125 |
+
|
| 126 |
+
def _configure_experiment_defaults(self):
|
| 127 |
+
"""Disable the specific autologging features that cause throttling."""
|
| 128 |
+
for option in self._exclude_autolog:
|
| 129 |
+
if not self.experiment_kwargs.get(option):
|
| 130 |
+
self.experiment_kwargs[option] = False
|
| 131 |
+
|
| 132 |
+
def _check_key_name(self, key: str, item: str) -> bool:
|
| 133 |
+
"""
|
| 134 |
+
Check if key argument is equal to item argument or starts with item and
|
| 135 |
+
a forward slash. Used for parsing trial result dictionary into ignored
|
| 136 |
+
keys, system metrics, episode logs, etc.
|
| 137 |
+
"""
|
| 138 |
+
return key.startswith(item + "/") or key == item
|
| 139 |
+
|
| 140 |
+
def log_trial_start(self, trial: "Trial"):
|
| 141 |
+
"""
|
| 142 |
+
Initialize an Experiment (or OfflineExperiment if self.online=False)
|
| 143 |
+
and start logging to Comet.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
trial: Trial object.
|
| 147 |
+
|
| 148 |
+
"""
|
| 149 |
+
_import_comet() # is this necessary?
|
| 150 |
+
from comet_ml import Experiment, OfflineExperiment
|
| 151 |
+
from comet_ml.config import set_global_experiment
|
| 152 |
+
|
| 153 |
+
if trial not in self._trial_experiments:
|
| 154 |
+
experiment_cls = Experiment if self.online else OfflineExperiment
|
| 155 |
+
experiment = experiment_cls(**self.experiment_kwargs)
|
| 156 |
+
self._trial_experiments[trial] = experiment
|
| 157 |
+
# Set global experiment to None to allow for multiple experiments.
|
| 158 |
+
set_global_experiment(None)
|
| 159 |
+
else:
|
| 160 |
+
experiment = self._trial_experiments[trial]
|
| 161 |
+
|
| 162 |
+
experiment.set_name(str(trial))
|
| 163 |
+
experiment.add_tags(self.tags)
|
| 164 |
+
experiment.log_other("Created from", "Ray")
|
| 165 |
+
|
| 166 |
+
config = trial.config.copy()
|
| 167 |
+
config.pop("callbacks", None)
|
| 168 |
+
experiment.log_parameters(config)
|
| 169 |
+
|
| 170 |
+
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
|
| 171 |
+
"""
|
| 172 |
+
Log the current result of a Trial upon each iteration.
|
| 173 |
+
"""
|
| 174 |
+
if trial not in self._trial_experiments:
|
| 175 |
+
self.log_trial_start(trial)
|
| 176 |
+
experiment = self._trial_experiments[trial]
|
| 177 |
+
step = result["training_iteration"]
|
| 178 |
+
|
| 179 |
+
config_update = result.pop("config", {}).copy()
|
| 180 |
+
config_update.pop("callbacks", None) # Remove callbacks
|
| 181 |
+
for k, v in config_update.items():
|
| 182 |
+
if isinstance(v, dict):
|
| 183 |
+
experiment.log_parameters(flatten_dict({k: v}, "/"), step=step)
|
| 184 |
+
|
| 185 |
+
else:
|
| 186 |
+
experiment.log_parameter(k, v, step=step)
|
| 187 |
+
|
| 188 |
+
other_logs = {}
|
| 189 |
+
metric_logs = {}
|
| 190 |
+
system_logs = {}
|
| 191 |
+
episode_logs = {}
|
| 192 |
+
|
| 193 |
+
flat_result = flatten_dict(result, delimiter="/")
|
| 194 |
+
for k, v in flat_result.items():
|
| 195 |
+
if any(self._check_key_name(k, item) for item in self._to_exclude):
|
| 196 |
+
continue
|
| 197 |
+
|
| 198 |
+
if any(self._check_key_name(k, item) for item in self._to_other):
|
| 199 |
+
other_logs[k] = v
|
| 200 |
+
|
| 201 |
+
elif any(self._check_key_name(k, item) for item in self._to_system):
|
| 202 |
+
system_logs[k] = v
|
| 203 |
+
|
| 204 |
+
elif any(self._check_key_name(k, item) for item in self._to_episodes):
|
| 205 |
+
episode_logs[k] = v
|
| 206 |
+
|
| 207 |
+
else:
|
| 208 |
+
metric_logs[k] = v
|
| 209 |
+
|
| 210 |
+
experiment.log_others(other_logs)
|
| 211 |
+
experiment.log_metrics(metric_logs, step=step)
|
| 212 |
+
|
| 213 |
+
for k, v in system_logs.items():
|
| 214 |
+
experiment.log_system_info(k, v)
|
| 215 |
+
|
| 216 |
+
for k, v in episode_logs.items():
|
| 217 |
+
experiment.log_curve(k, x=range(len(v)), y=v, step=step)
|
| 218 |
+
|
| 219 |
+
def log_trial_save(self, trial: "Trial"):
|
| 220 |
+
comet_ml = _import_comet()
|
| 221 |
+
|
| 222 |
+
if self.save_checkpoints and trial.checkpoint:
|
| 223 |
+
experiment = self._trial_experiments[trial]
|
| 224 |
+
|
| 225 |
+
artifact = comet_ml.Artifact(
|
| 226 |
+
name=f"checkpoint_{(str(trial))}", artifact_type="model"
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
checkpoint_root = None
|
| 230 |
+
|
| 231 |
+
if isinstance(trial.checkpoint.filesystem, pyarrow.fs.LocalFileSystem):
|
| 232 |
+
checkpoint_root = trial.checkpoint.path
|
| 233 |
+
# Todo: For other filesystems, we may want to use
|
| 234 |
+
# artifact.add_remote() instead. However, this requires a full
|
| 235 |
+
# URI. We can add this once we have a way to retrieve it.
|
| 236 |
+
|
| 237 |
+
# Walk through checkpoint directory and add all files to artifact
|
| 238 |
+
if checkpoint_root:
|
| 239 |
+
for root, dirs, files in os.walk(checkpoint_root):
|
| 240 |
+
rel_root = os.path.relpath(root, checkpoint_root)
|
| 241 |
+
for file in files:
|
| 242 |
+
local_file = Path(checkpoint_root, rel_root, file).as_posix()
|
| 243 |
+
logical_path = Path(rel_root, file).as_posix()
|
| 244 |
+
|
| 245 |
+
# Strip leading `./`
|
| 246 |
+
if logical_path.startswith("./"):
|
| 247 |
+
logical_path = logical_path[2:]
|
| 248 |
+
|
| 249 |
+
artifact.add(local_file, logical_path=logical_path)
|
| 250 |
+
|
| 251 |
+
experiment.log_artifact(artifact)
|
| 252 |
+
|
| 253 |
+
def log_trial_end(self, trial: "Trial", failed: bool = False):
|
| 254 |
+
self._trial_experiments[trial].end()
|
| 255 |
+
del self._trial_experiments[trial]
|
| 256 |
+
|
| 257 |
+
def __del__(self):
|
| 258 |
+
for trial, experiment in self._trial_experiments.items():
|
| 259 |
+
experiment.end()
|
| 260 |
+
self._trial_experiments = {}
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/keras.py
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import shutil
|
| 2 |
+
from typing import Dict, List, Optional, Union
|
| 3 |
+
|
| 4 |
+
from tensorflow.keras.callbacks import Callback as KerasCallback
|
| 5 |
+
|
| 6 |
+
import ray
|
| 7 |
+
from ray.train.tensorflow import TensorflowCheckpoint
|
| 8 |
+
from ray.util.annotations import PublicAPI
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class _Callback(KerasCallback):
|
| 12 |
+
"""Base class for Air's Keras callbacks."""
|
| 13 |
+
|
| 14 |
+
_allowed = [
|
| 15 |
+
"epoch_begin",
|
| 16 |
+
"epoch_end",
|
| 17 |
+
"train_batch_begin",
|
| 18 |
+
"train_batch_end",
|
| 19 |
+
"test_batch_begin",
|
| 20 |
+
"test_batch_end",
|
| 21 |
+
"predict_batch_begin",
|
| 22 |
+
"predict_batch_end",
|
| 23 |
+
"train_begin",
|
| 24 |
+
"train_end",
|
| 25 |
+
"test_begin",
|
| 26 |
+
"test_end",
|
| 27 |
+
"predict_begin",
|
| 28 |
+
"predict_end",
|
| 29 |
+
]
|
| 30 |
+
|
| 31 |
+
def __init__(self, on: Union[str, List[str]] = "validation_end"):
|
| 32 |
+
super(_Callback, self).__init__()
|
| 33 |
+
|
| 34 |
+
if not isinstance(on, list):
|
| 35 |
+
on = [on]
|
| 36 |
+
if any(w not in self._allowed for w in on):
|
| 37 |
+
raise ValueError(
|
| 38 |
+
"Invalid trigger time selected: {}. Must be one of {}".format(
|
| 39 |
+
on, self._allowed
|
| 40 |
+
)
|
| 41 |
+
)
|
| 42 |
+
self._on = on
|
| 43 |
+
|
| 44 |
+
def _handle(self, logs: Dict, when: str):
|
| 45 |
+
raise NotImplementedError
|
| 46 |
+
|
| 47 |
+
def on_epoch_begin(self, epoch, logs=None):
|
| 48 |
+
if "epoch_begin" in self._on:
|
| 49 |
+
self._handle(logs, "epoch_begin")
|
| 50 |
+
|
| 51 |
+
def on_epoch_end(self, epoch, logs=None):
|
| 52 |
+
if "epoch_end" in self._on:
|
| 53 |
+
self._handle(logs, "epoch_end")
|
| 54 |
+
|
| 55 |
+
def on_train_batch_begin(self, batch, logs=None):
|
| 56 |
+
if "train_batch_begin" in self._on:
|
| 57 |
+
self._handle(logs, "train_batch_begin")
|
| 58 |
+
|
| 59 |
+
def on_train_batch_end(self, batch, logs=None):
|
| 60 |
+
if "train_batch_end" in self._on:
|
| 61 |
+
self._handle(logs, "train_batch_end")
|
| 62 |
+
|
| 63 |
+
def on_test_batch_begin(self, batch, logs=None):
|
| 64 |
+
if "test_batch_begin" in self._on:
|
| 65 |
+
self._handle(logs, "test_batch_begin")
|
| 66 |
+
|
| 67 |
+
def on_test_batch_end(self, batch, logs=None):
|
| 68 |
+
if "test_batch_end" in self._on:
|
| 69 |
+
self._handle(logs, "test_batch_end")
|
| 70 |
+
|
| 71 |
+
def on_predict_batch_begin(self, batch, logs=None):
|
| 72 |
+
if "predict_batch_begin" in self._on:
|
| 73 |
+
self._handle(logs, "predict_batch_begin")
|
| 74 |
+
|
| 75 |
+
def on_predict_batch_end(self, batch, logs=None):
|
| 76 |
+
if "predict_batch_end" in self._on:
|
| 77 |
+
self._handle(logs, "predict_batch_end")
|
| 78 |
+
|
| 79 |
+
def on_train_begin(self, logs=None):
|
| 80 |
+
if "train_begin" in self._on:
|
| 81 |
+
self._handle(logs, "train_begin")
|
| 82 |
+
|
| 83 |
+
def on_train_end(self, logs=None):
|
| 84 |
+
if "train_end" in self._on:
|
| 85 |
+
self._handle(logs, "train_end")
|
| 86 |
+
|
| 87 |
+
def on_test_begin(self, logs=None):
|
| 88 |
+
if "test_begin" in self._on:
|
| 89 |
+
self._handle(logs, "test_begin")
|
| 90 |
+
|
| 91 |
+
def on_test_end(self, logs=None):
|
| 92 |
+
if "test_end" in self._on:
|
| 93 |
+
self._handle(logs, "test_end")
|
| 94 |
+
|
| 95 |
+
def on_predict_begin(self, logs=None):
|
| 96 |
+
if "predict_begin" in self._on:
|
| 97 |
+
self._handle(logs, "predict_begin")
|
| 98 |
+
|
| 99 |
+
def on_predict_end(self, logs=None):
|
| 100 |
+
if "predict_end" in self._on:
|
| 101 |
+
self._handle(logs, "predict_end")
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@PublicAPI(stability="alpha")
|
| 105 |
+
class ReportCheckpointCallback(_Callback):
|
| 106 |
+
"""Keras callback for Ray Train reporting and checkpointing.
|
| 107 |
+
|
| 108 |
+
.. note::
|
| 109 |
+
Metrics are always reported with checkpoints, even if the event isn't specified
|
| 110 |
+
in ``report_metrics_on``.
|
| 111 |
+
|
| 112 |
+
Example:
|
| 113 |
+
.. code-block:: python
|
| 114 |
+
|
| 115 |
+
############# Using it in TrainSession ###############
|
| 116 |
+
from ray.air.integrations.keras import ReportCheckpointCallback
|
| 117 |
+
def train_loop_per_worker():
|
| 118 |
+
strategy = tf.distribute.MultiWorkerMirroredStrategy()
|
| 119 |
+
with strategy.scope():
|
| 120 |
+
model = build_model()
|
| 121 |
+
|
| 122 |
+
model.fit(dataset_shard, callbacks=[ReportCheckpointCallback()])
|
| 123 |
+
|
| 124 |
+
Args:
|
| 125 |
+
metrics: Metrics to report. If this is a list, each item describes
|
| 126 |
+
the metric key reported to Keras, and it's reported under the
|
| 127 |
+
same name. If this is a dict, each key is the name reported
|
| 128 |
+
and the respective value is the metric key reported to Keras.
|
| 129 |
+
If this is None, all Keras logs are reported.
|
| 130 |
+
report_metrics_on: When to report metrics. Must be one of
|
| 131 |
+
the Keras event hooks (less the ``on_``), e.g.
|
| 132 |
+
"train_start" or "predict_end". Defaults to "epoch_end".
|
| 133 |
+
checkpoint_on: When to save checkpoints. Must be one of the Keras event hooks
|
| 134 |
+
(less the ``on_``), e.g. "train_start" or "predict_end". Defaults to
|
| 135 |
+
"epoch_end".
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
def __init__(
|
| 139 |
+
self,
|
| 140 |
+
checkpoint_on: Union[str, List[str]] = "epoch_end",
|
| 141 |
+
report_metrics_on: Union[str, List[str]] = "epoch_end",
|
| 142 |
+
metrics: Optional[Union[str, List[str], Dict[str, str]]] = None,
|
| 143 |
+
):
|
| 144 |
+
if isinstance(checkpoint_on, str):
|
| 145 |
+
checkpoint_on = [checkpoint_on]
|
| 146 |
+
if isinstance(report_metrics_on, str):
|
| 147 |
+
report_metrics_on = [report_metrics_on]
|
| 148 |
+
|
| 149 |
+
on = list(set(checkpoint_on + report_metrics_on))
|
| 150 |
+
super().__init__(on=on)
|
| 151 |
+
|
| 152 |
+
self._checkpoint_on: List[str] = checkpoint_on
|
| 153 |
+
self._report_metrics_on: List[str] = report_metrics_on
|
| 154 |
+
self._metrics = metrics
|
| 155 |
+
|
| 156 |
+
def _handle(self, logs: Dict, when: str):
|
| 157 |
+
assert when in self._checkpoint_on or when in self._report_metrics_on
|
| 158 |
+
|
| 159 |
+
metrics = self._get_reported_metrics(logs)
|
| 160 |
+
|
| 161 |
+
should_checkpoint = when in self._checkpoint_on
|
| 162 |
+
if should_checkpoint:
|
| 163 |
+
checkpoint = TensorflowCheckpoint.from_model(self.model)
|
| 164 |
+
ray.train.report(metrics, checkpoint=checkpoint)
|
| 165 |
+
# Clean up temporary checkpoint
|
| 166 |
+
shutil.rmtree(checkpoint.path, ignore_errors=True)
|
| 167 |
+
else:
|
| 168 |
+
ray.train.report(metrics, checkpoint=None)
|
| 169 |
+
|
| 170 |
+
def _get_reported_metrics(self, logs: Dict) -> Dict:
|
| 171 |
+
assert isinstance(self._metrics, (type(None), str, list, dict))
|
| 172 |
+
|
| 173 |
+
if self._metrics is None:
|
| 174 |
+
reported_metrics = logs
|
| 175 |
+
elif isinstance(self._metrics, str):
|
| 176 |
+
reported_metrics = {self._metrics: logs[self._metrics]}
|
| 177 |
+
elif isinstance(self._metrics, list):
|
| 178 |
+
reported_metrics = {metric: logs[metric] for metric in self._metrics}
|
| 179 |
+
elif isinstance(self._metrics, dict):
|
| 180 |
+
reported_metrics = {
|
| 181 |
+
key: logs[metric] for key, metric in self._metrics.items()
|
| 182 |
+
}
|
| 183 |
+
|
| 184 |
+
assert isinstance(reported_metrics, dict)
|
| 185 |
+
return reported_metrics
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/mlflow.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
from types import ModuleType
|
| 3 |
+
from typing import Dict, Optional, Union
|
| 4 |
+
|
| 5 |
+
import ray
|
| 6 |
+
from ray.air._internal import usage as air_usage
|
| 7 |
+
from ray.air._internal.mlflow import _MLflowLoggerUtil
|
| 8 |
+
from ray.air.constants import TRAINING_ITERATION
|
| 9 |
+
from ray.tune.experiment import Trial
|
| 10 |
+
from ray.tune.logger import LoggerCallback
|
| 11 |
+
from ray.tune.result import TIMESTEPS_TOTAL
|
| 12 |
+
from ray.util.annotations import PublicAPI
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
import mlflow
|
| 16 |
+
except ImportError:
|
| 17 |
+
mlflow = None
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
logger = logging.getLogger(__name__)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class _NoopModule:
|
| 24 |
+
def __getattr__(self, item):
|
| 25 |
+
return _NoopModule()
|
| 26 |
+
|
| 27 |
+
def __call__(self, *args, **kwargs):
|
| 28 |
+
return None
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@PublicAPI(stability="alpha")
|
| 32 |
+
def setup_mlflow(
|
| 33 |
+
config: Optional[Dict] = None,
|
| 34 |
+
tracking_uri: Optional[str] = None,
|
| 35 |
+
registry_uri: Optional[str] = None,
|
| 36 |
+
experiment_id: Optional[str] = None,
|
| 37 |
+
experiment_name: Optional[str] = None,
|
| 38 |
+
tracking_token: Optional[str] = None,
|
| 39 |
+
artifact_location: Optional[str] = None,
|
| 40 |
+
run_name: Optional[str] = None,
|
| 41 |
+
create_experiment_if_not_exists: bool = False,
|
| 42 |
+
tags: Optional[Dict] = None,
|
| 43 |
+
rank_zero_only: bool = True,
|
| 44 |
+
) -> Union[ModuleType, _NoopModule]:
|
| 45 |
+
"""Set up a MLflow session.
|
| 46 |
+
|
| 47 |
+
This function can be used to initialize an MLflow session in a
|
| 48 |
+
(distributed) training or tuning run. The session will be created on the trainable.
|
| 49 |
+
|
| 50 |
+
By default, the MLflow experiment ID is the Ray trial ID and the
|
| 51 |
+
MLlflow experiment name is the Ray trial name. These settings can be overwritten by
|
| 52 |
+
passing the respective keyword arguments.
|
| 53 |
+
|
| 54 |
+
The ``config`` dict is automatically logged as the run parameters (excluding the
|
| 55 |
+
mlflow settings).
|
| 56 |
+
|
| 57 |
+
In distributed training with Ray Train, only the zero-rank worker will initialize
|
| 58 |
+
mlflow. All other workers will return a noop client, so that logging is not
|
| 59 |
+
duplicated in a distributed run. This can be disabled by passing
|
| 60 |
+
``rank_zero_only=False``, which will then initialize mlflow in every training
|
| 61 |
+
worker.
|
| 62 |
+
|
| 63 |
+
This function will return the ``mlflow`` module or a noop module for
|
| 64 |
+
non-rank zero workers ``if rank_zero_only=True``. By using
|
| 65 |
+
``mlflow = setup_mlflow(config)`` you can ensure that only the rank zero worker
|
| 66 |
+
calls the mlflow API.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
config: Configuration dict to be logged to mlflow as parameters.
|
| 70 |
+
tracking_uri: The tracking URI for MLflow tracking. If using
|
| 71 |
+
Tune in a multi-node setting, make sure to use a remote server for
|
| 72 |
+
tracking.
|
| 73 |
+
registry_uri: The registry URI for the MLflow model registry.
|
| 74 |
+
experiment_id: The id of an already created MLflow experiment.
|
| 75 |
+
All logs from all trials in ``tune.Tuner()`` will be reported to this
|
| 76 |
+
experiment. If this is not provided or the experiment with this
|
| 77 |
+
id does not exist, you must provide an``experiment_name``. This
|
| 78 |
+
parameter takes precedence over ``experiment_name``.
|
| 79 |
+
experiment_name: The name of an already existing MLflow
|
| 80 |
+
experiment. All logs from all trials in ``tune.Tuner()`` will be
|
| 81 |
+
reported to this experiment. If this is not provided, you must
|
| 82 |
+
provide a valid ``experiment_id``.
|
| 83 |
+
tracking_token: A token to use for HTTP authentication when
|
| 84 |
+
logging to a remote tracking server. This is useful when you
|
| 85 |
+
want to log to a Databricks server, for example. This value will
|
| 86 |
+
be used to set the MLFLOW_TRACKING_TOKEN environment variable on
|
| 87 |
+
all the remote training processes.
|
| 88 |
+
artifact_location: The location to store run artifacts.
|
| 89 |
+
If not provided, MLFlow picks an appropriate default.
|
| 90 |
+
Ignored if experiment already exists.
|
| 91 |
+
run_name: Name of the new MLflow run that will be created.
|
| 92 |
+
If not set, will default to the ``experiment_name``.
|
| 93 |
+
create_experiment_if_not_exists: Whether to create an
|
| 94 |
+
experiment with the provided name if it does not already
|
| 95 |
+
exist. Defaults to False.
|
| 96 |
+
tags: Tags to set for the new run.
|
| 97 |
+
rank_zero_only: If True, will return an initialized session only for the
|
| 98 |
+
rank 0 worker in distributed training. If False, will initialize a
|
| 99 |
+
session for all workers. Defaults to True.
|
| 100 |
+
|
| 101 |
+
Example:
|
| 102 |
+
|
| 103 |
+
Per default, you can just call ``setup_mlflow`` and continue to use
|
| 104 |
+
MLflow like you would normally do:
|
| 105 |
+
|
| 106 |
+
.. code-block:: python
|
| 107 |
+
|
| 108 |
+
from ray.air.integrations.mlflow import setup_mlflow
|
| 109 |
+
|
| 110 |
+
def training_loop(config):
|
| 111 |
+
mlflow = setup_mlflow(config)
|
| 112 |
+
# ...
|
| 113 |
+
mlflow.log_metric(key="loss", val=0.123, step=0)
|
| 114 |
+
|
| 115 |
+
In distributed data parallel training, you can utilize the return value of
|
| 116 |
+
``setup_mlflow``. This will make sure it is only invoked on the first worker
|
| 117 |
+
in distributed training runs.
|
| 118 |
+
|
| 119 |
+
.. code-block:: python
|
| 120 |
+
|
| 121 |
+
from ray.air.integrations.mlflow import setup_mlflow
|
| 122 |
+
|
| 123 |
+
def training_loop(config):
|
| 124 |
+
mlflow = setup_mlflow(config)
|
| 125 |
+
# ...
|
| 126 |
+
mlflow.log_metric(key="loss", val=0.123, step=0)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
You can also use MlFlow's autologging feature if using a training
|
| 130 |
+
framework like Pytorch Lightning, XGBoost, etc. More information can be
|
| 131 |
+
found here
|
| 132 |
+
(https://mlflow.org/docs/latest/tracking.html#automatic-logging).
|
| 133 |
+
|
| 134 |
+
.. code-block:: python
|
| 135 |
+
|
| 136 |
+
from ray.air.integrations.mlflow import setup_mlflow
|
| 137 |
+
|
| 138 |
+
def train_fn(config):
|
| 139 |
+
mlflow = setup_mlflow(config)
|
| 140 |
+
mlflow.autolog()
|
| 141 |
+
xgboost_results = xgb.train(config, ...)
|
| 142 |
+
|
| 143 |
+
"""
|
| 144 |
+
if not mlflow:
|
| 145 |
+
raise RuntimeError(
|
| 146 |
+
"mlflow was not found - please install with `pip install mlflow`"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
train_context = ray.train.get_context()
|
| 151 |
+
|
| 152 |
+
# Do a try-catch here if we are not in a train session
|
| 153 |
+
if rank_zero_only and train_context.get_world_rank() != 0:
|
| 154 |
+
return _NoopModule()
|
| 155 |
+
|
| 156 |
+
default_trial_id = train_context.get_trial_id()
|
| 157 |
+
default_trial_name = train_context.get_trial_name()
|
| 158 |
+
|
| 159 |
+
except RuntimeError:
|
| 160 |
+
default_trial_id = None
|
| 161 |
+
default_trial_name = None
|
| 162 |
+
|
| 163 |
+
_config = config.copy() if config else {}
|
| 164 |
+
|
| 165 |
+
experiment_id = experiment_id or default_trial_id
|
| 166 |
+
experiment_name = experiment_name or default_trial_name
|
| 167 |
+
|
| 168 |
+
# Setup mlflow
|
| 169 |
+
mlflow_util = _MLflowLoggerUtil()
|
| 170 |
+
mlflow_util.setup_mlflow(
|
| 171 |
+
tracking_uri=tracking_uri,
|
| 172 |
+
registry_uri=registry_uri,
|
| 173 |
+
experiment_id=experiment_id,
|
| 174 |
+
experiment_name=experiment_name,
|
| 175 |
+
tracking_token=tracking_token,
|
| 176 |
+
artifact_location=artifact_location,
|
| 177 |
+
create_experiment_if_not_exists=create_experiment_if_not_exists,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
mlflow_util.start_run(
|
| 181 |
+
run_name=run_name or experiment_name,
|
| 182 |
+
tags=tags,
|
| 183 |
+
set_active=True,
|
| 184 |
+
)
|
| 185 |
+
mlflow_util.log_params(_config)
|
| 186 |
+
|
| 187 |
+
# Record `setup_mlflow` usage when everything has setup successfully.
|
| 188 |
+
air_usage.tag_setup_mlflow()
|
| 189 |
+
|
| 190 |
+
return mlflow_util._mlflow
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class MLflowLoggerCallback(LoggerCallback):
|
| 194 |
+
"""MLflow Logger to automatically log Tune results and config to MLflow.
|
| 195 |
+
|
| 196 |
+
MLflow (https://mlflow.org) Tracking is an open source library for
|
| 197 |
+
recording and querying experiments. This Ray Tune ``LoggerCallback``
|
| 198 |
+
sends information (config parameters, training results & metrics,
|
| 199 |
+
and artifacts) to MLflow for automatic experiment tracking.
|
| 200 |
+
|
| 201 |
+
Keep in mind that the callback will open an MLflow session on the driver and
|
| 202 |
+
not on the trainable. Therefore, it is not possible to call MLflow functions
|
| 203 |
+
like ``mlflow.log_figure()`` inside the trainable as there is no MLflow session
|
| 204 |
+
on the trainable. For more fine grained control, use
|
| 205 |
+
:func:`ray.air.integrations.mlflow.setup_mlflow`.
|
| 206 |
+
|
| 207 |
+
Args:
|
| 208 |
+
tracking_uri: The tracking URI for where to manage experiments
|
| 209 |
+
and runs. This can either be a local file path or a remote server.
|
| 210 |
+
This arg gets passed directly to mlflow
|
| 211 |
+
initialization. When using Tune in a multi-node setting, make sure
|
| 212 |
+
to set this to a remote server and not a local file path.
|
| 213 |
+
registry_uri: The registry URI that gets passed directly to
|
| 214 |
+
mlflow initialization.
|
| 215 |
+
experiment_name: The experiment name to use for this Tune run.
|
| 216 |
+
If the experiment with the name already exists with MLflow,
|
| 217 |
+
it will be reused. If not, a new experiment will be created with
|
| 218 |
+
that name.
|
| 219 |
+
tags: An optional dictionary of string keys and values to set
|
| 220 |
+
as tags on the run
|
| 221 |
+
tracking_token: Tracking token used to authenticate with MLflow.
|
| 222 |
+
save_artifact: If set to True, automatically save the entire
|
| 223 |
+
contents of the Tune local_dir as an artifact to the
|
| 224 |
+
corresponding run in MlFlow.
|
| 225 |
+
|
| 226 |
+
Example:
|
| 227 |
+
|
| 228 |
+
.. code-block:: python
|
| 229 |
+
|
| 230 |
+
from ray.air.integrations.mlflow import MLflowLoggerCallback
|
| 231 |
+
|
| 232 |
+
tags = { "user_name" : "John",
|
| 233 |
+
"git_commit_hash" : "abc123"}
|
| 234 |
+
|
| 235 |
+
tune.run(
|
| 236 |
+
train_fn,
|
| 237 |
+
config={
|
| 238 |
+
# define search space here
|
| 239 |
+
"parameter_1": tune.choice([1, 2, 3]),
|
| 240 |
+
"parameter_2": tune.choice([4, 5, 6]),
|
| 241 |
+
},
|
| 242 |
+
callbacks=[MLflowLoggerCallback(
|
| 243 |
+
experiment_name="experiment1",
|
| 244 |
+
tags=tags,
|
| 245 |
+
save_artifact=True)])
|
| 246 |
+
|
| 247 |
+
"""
|
| 248 |
+
|
| 249 |
+
def __init__(
|
| 250 |
+
self,
|
| 251 |
+
tracking_uri: Optional[str] = None,
|
| 252 |
+
*,
|
| 253 |
+
registry_uri: Optional[str] = None,
|
| 254 |
+
experiment_name: Optional[str] = None,
|
| 255 |
+
tags: Optional[Dict] = None,
|
| 256 |
+
tracking_token: Optional[str] = None,
|
| 257 |
+
save_artifact: bool = False,
|
| 258 |
+
):
|
| 259 |
+
|
| 260 |
+
self.tracking_uri = tracking_uri
|
| 261 |
+
self.registry_uri = registry_uri
|
| 262 |
+
self.experiment_name = experiment_name
|
| 263 |
+
self.tags = tags
|
| 264 |
+
self.tracking_token = tracking_token
|
| 265 |
+
self.should_save_artifact = save_artifact
|
| 266 |
+
|
| 267 |
+
self.mlflow_util = _MLflowLoggerUtil()
|
| 268 |
+
|
| 269 |
+
if ray.util.client.ray.is_connected():
|
| 270 |
+
logger.warning(
|
| 271 |
+
"When using MLflowLoggerCallback with Ray Client, "
|
| 272 |
+
"it is recommended to use a remote tracking "
|
| 273 |
+
"server. If you are using a MLflow tracking server "
|
| 274 |
+
"backed by the local filesystem, then it must be "
|
| 275 |
+
"setup on the server side and not on the client "
|
| 276 |
+
"side."
|
| 277 |
+
)
|
| 278 |
+
|
| 279 |
+
def setup(self, *args, **kwargs):
|
| 280 |
+
# Setup the mlflow logging util.
|
| 281 |
+
self.mlflow_util.setup_mlflow(
|
| 282 |
+
tracking_uri=self.tracking_uri,
|
| 283 |
+
registry_uri=self.registry_uri,
|
| 284 |
+
experiment_name=self.experiment_name,
|
| 285 |
+
tracking_token=self.tracking_token,
|
| 286 |
+
)
|
| 287 |
+
|
| 288 |
+
if self.tags is None:
|
| 289 |
+
# Create empty dictionary for tags if not given explicitly
|
| 290 |
+
self.tags = {}
|
| 291 |
+
|
| 292 |
+
self._trial_runs = {}
|
| 293 |
+
|
| 294 |
+
def log_trial_start(self, trial: "Trial"):
|
| 295 |
+
# Create run if not already exists.
|
| 296 |
+
if trial not in self._trial_runs:
|
| 297 |
+
|
| 298 |
+
# Set trial name in tags
|
| 299 |
+
tags = self.tags.copy()
|
| 300 |
+
tags["trial_name"] = str(trial)
|
| 301 |
+
|
| 302 |
+
run = self.mlflow_util.start_run(tags=tags, run_name=str(trial))
|
| 303 |
+
self._trial_runs[trial] = run.info.run_id
|
| 304 |
+
|
| 305 |
+
run_id = self._trial_runs[trial]
|
| 306 |
+
|
| 307 |
+
# Log the config parameters.
|
| 308 |
+
config = trial.config
|
| 309 |
+
self.mlflow_util.log_params(run_id=run_id, params_to_log=config)
|
| 310 |
+
|
| 311 |
+
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
|
| 312 |
+
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
|
| 313 |
+
run_id = self._trial_runs[trial]
|
| 314 |
+
self.mlflow_util.log_metrics(run_id=run_id, metrics_to_log=result, step=step)
|
| 315 |
+
|
| 316 |
+
def log_trial_end(self, trial: "Trial", failed: bool = False):
|
| 317 |
+
run_id = self._trial_runs[trial]
|
| 318 |
+
|
| 319 |
+
# Log the artifact if set_artifact is set to True.
|
| 320 |
+
if self.should_save_artifact:
|
| 321 |
+
self.mlflow_util.save_artifacts(run_id=run_id, dir=trial.local_path)
|
| 322 |
+
|
| 323 |
+
# Stop the run once trial finishes.
|
| 324 |
+
status = "FINISHED" if not failed else "FAILED"
|
| 325 |
+
self.mlflow_util.end_run(run_id=run_id, status=status)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/integrations/wandb.py
ADDED
|
@@ -0,0 +1,750 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import enum
|
| 2 |
+
import os
|
| 3 |
+
import pickle
|
| 4 |
+
import urllib
|
| 5 |
+
import warnings
|
| 6 |
+
from numbers import Number
|
| 7 |
+
from types import ModuleType
|
| 8 |
+
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import pyarrow.fs
|
| 12 |
+
|
| 13 |
+
import ray
|
| 14 |
+
from ray import logger
|
| 15 |
+
from ray._private.storage import _load_class
|
| 16 |
+
from ray.air import session
|
| 17 |
+
from ray.air._internal import usage as air_usage
|
| 18 |
+
from ray.air.constants import TRAINING_ITERATION
|
| 19 |
+
from ray.air.util.node import _force_on_current_node
|
| 20 |
+
from ray.train._internal.syncer import DEFAULT_SYNC_TIMEOUT
|
| 21 |
+
from ray.tune.experiment import Trial
|
| 22 |
+
from ray.tune.logger import LoggerCallback
|
| 23 |
+
from ray.tune.utils import flatten_dict
|
| 24 |
+
from ray.util import PublicAPI
|
| 25 |
+
from ray.util.queue import Queue
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import wandb
|
| 29 |
+
from wandb.sdk.data_types.base_types.wb_value import WBValue
|
| 30 |
+
from wandb.sdk.data_types.image import Image
|
| 31 |
+
from wandb.sdk.data_types.video import Video
|
| 32 |
+
from wandb.sdk.lib.disabled import RunDisabled
|
| 33 |
+
from wandb.util import json_dumps_safer
|
| 34 |
+
from wandb.wandb_run import Run
|
| 35 |
+
except ImportError:
|
| 36 |
+
wandb = json_dumps_safer = Run = RunDisabled = WBValue = None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
WANDB_ENV_VAR = "WANDB_API_KEY"
|
| 40 |
+
WANDB_PROJECT_ENV_VAR = "WANDB_PROJECT_NAME"
|
| 41 |
+
WANDB_GROUP_ENV_VAR = "WANDB_GROUP_NAME"
|
| 42 |
+
WANDB_MODE_ENV_VAR = "WANDB_MODE"
|
| 43 |
+
# Hook that is invoked before wandb.init in the setup method of WandbLoggerCallback
|
| 44 |
+
# to populate the API key if it isn't already set when initializing the callback.
|
| 45 |
+
# It doesn't take in any arguments and returns the W&B API key.
|
| 46 |
+
# Example: "your.module.wandb_setup_api_key_hook".
|
| 47 |
+
WANDB_SETUP_API_KEY_HOOK = "WANDB_SETUP_API_KEY_HOOK"
|
| 48 |
+
# Hook that is invoked before wandb.init in the setup method of WandbLoggerCallback
|
| 49 |
+
# to populate environment variables to specify the location
|
| 50 |
+
# (project and group) of the W&B run.
|
| 51 |
+
# It doesn't take in any arguments and doesn't return anything, but it does populate
|
| 52 |
+
# WANDB_PROJECT_NAME and WANDB_GROUP_NAME.
|
| 53 |
+
# Example: "your.module.wandb_populate_run_location_hook".
|
| 54 |
+
WANDB_POPULATE_RUN_LOCATION_HOOK = "WANDB_POPULATE_RUN_LOCATION_HOOK"
|
| 55 |
+
# Hook that is invoked after running wandb.init in WandbLoggerCallback
|
| 56 |
+
# to process information about the W&B run.
|
| 57 |
+
# It takes in a W&B run object and doesn't return anything.
|
| 58 |
+
# Example: "your.module.wandb_process_run_info_hook".
|
| 59 |
+
WANDB_PROCESS_RUN_INFO_HOOK = "WANDB_PROCESS_RUN_INFO_HOOK"
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@PublicAPI(stability="alpha")
|
| 63 |
+
def setup_wandb(
|
| 64 |
+
config: Optional[Dict] = None,
|
| 65 |
+
api_key: Optional[str] = None,
|
| 66 |
+
api_key_file: Optional[str] = None,
|
| 67 |
+
rank_zero_only: bool = True,
|
| 68 |
+
**kwargs,
|
| 69 |
+
) -> Union[Run, RunDisabled]:
|
| 70 |
+
"""Set up a Weights & Biases session.
|
| 71 |
+
|
| 72 |
+
This function can be used to initialize a Weights & Biases session in a
|
| 73 |
+
(distributed) training or tuning run.
|
| 74 |
+
|
| 75 |
+
By default, the run ID is the trial ID, the run name is the trial name, and
|
| 76 |
+
the run group is the experiment name. These settings can be overwritten by
|
| 77 |
+
passing the respective arguments as ``kwargs``, which will be passed to
|
| 78 |
+
``wandb.init()``.
|
| 79 |
+
|
| 80 |
+
In distributed training with Ray Train, only the zero-rank worker will initialize
|
| 81 |
+
wandb. All other workers will return a disabled run object, so that logging is not
|
| 82 |
+
duplicated in a distributed run. This can be disabled by passing
|
| 83 |
+
``rank_zero_only=False``, which will then initialize wandb in every training
|
| 84 |
+
worker.
|
| 85 |
+
|
| 86 |
+
The ``config`` argument will be passed to Weights and Biases and will be logged
|
| 87 |
+
as the run configuration.
|
| 88 |
+
|
| 89 |
+
If no API key or key file are passed, wandb will try to authenticate
|
| 90 |
+
using locally stored credentials, created for instance by running ``wandb login``.
|
| 91 |
+
|
| 92 |
+
Keyword arguments passed to ``setup_wandb()`` will be passed to
|
| 93 |
+
``wandb.init()`` and take precedence over any potential default settings.
|
| 94 |
+
|
| 95 |
+
Args:
|
| 96 |
+
config: Configuration dict to be logged to Weights and Biases. Can contain
|
| 97 |
+
arguments for ``wandb.init()`` as well as authentication information.
|
| 98 |
+
api_key: API key to use for authentication with Weights and Biases.
|
| 99 |
+
api_key_file: File pointing to API key for with Weights and Biases.
|
| 100 |
+
rank_zero_only: If True, will return an initialized session only for the
|
| 101 |
+
rank 0 worker in distributed training. If False, will initialize a
|
| 102 |
+
session for all workers.
|
| 103 |
+
kwargs: Passed to ``wandb.init()``.
|
| 104 |
+
|
| 105 |
+
Example:
|
| 106 |
+
|
| 107 |
+
.. code-block:: python
|
| 108 |
+
|
| 109 |
+
from ray.air.integrations.wandb import setup_wandb
|
| 110 |
+
|
| 111 |
+
def training_loop(config):
|
| 112 |
+
wandb = setup_wandb(config)
|
| 113 |
+
# ...
|
| 114 |
+
wandb.log({"loss": 0.123})
|
| 115 |
+
|
| 116 |
+
"""
|
| 117 |
+
if not wandb:
|
| 118 |
+
raise RuntimeError(
|
| 119 |
+
"Wandb was not found - please install with `pip install wandb`"
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
try:
|
| 123 |
+
# Do a try-catch here if we are not in a train session
|
| 124 |
+
_session = session._get_session(warn=False)
|
| 125 |
+
if _session and rank_zero_only and session.get_world_rank() != 0:
|
| 126 |
+
return RunDisabled()
|
| 127 |
+
|
| 128 |
+
default_trial_id = session.get_trial_id()
|
| 129 |
+
default_trial_name = session.get_trial_name()
|
| 130 |
+
default_experiment_name = session.get_experiment_name()
|
| 131 |
+
|
| 132 |
+
except RuntimeError:
|
| 133 |
+
default_trial_id = None
|
| 134 |
+
default_trial_name = None
|
| 135 |
+
default_experiment_name = None
|
| 136 |
+
|
| 137 |
+
# Default init kwargs
|
| 138 |
+
wandb_init_kwargs = {
|
| 139 |
+
"trial_id": kwargs.get("trial_id") or default_trial_id,
|
| 140 |
+
"trial_name": kwargs.get("trial_name") or default_trial_name,
|
| 141 |
+
"group": kwargs.get("group") or default_experiment_name,
|
| 142 |
+
}
|
| 143 |
+
# Passed kwargs take precedence over default kwargs
|
| 144 |
+
wandb_init_kwargs.update(kwargs)
|
| 145 |
+
|
| 146 |
+
return _setup_wandb(
|
| 147 |
+
config=config, api_key=api_key, api_key_file=api_key_file, **wandb_init_kwargs
|
| 148 |
+
)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def _setup_wandb(
|
| 152 |
+
trial_id: str,
|
| 153 |
+
trial_name: str,
|
| 154 |
+
config: Optional[Dict] = None,
|
| 155 |
+
api_key: Optional[str] = None,
|
| 156 |
+
api_key_file: Optional[str] = None,
|
| 157 |
+
_wandb: Optional[ModuleType] = None,
|
| 158 |
+
**kwargs,
|
| 159 |
+
) -> Union[Run, RunDisabled]:
|
| 160 |
+
_config = config.copy() if config else {}
|
| 161 |
+
|
| 162 |
+
# If key file is specified, set
|
| 163 |
+
if api_key_file:
|
| 164 |
+
api_key_file = os.path.expanduser(api_key_file)
|
| 165 |
+
|
| 166 |
+
_set_api_key(api_key_file, api_key)
|
| 167 |
+
project = _get_wandb_project(kwargs.pop("project", None))
|
| 168 |
+
group = kwargs.pop("group", os.environ.get(WANDB_GROUP_ENV_VAR))
|
| 169 |
+
|
| 170 |
+
# Remove unpickleable items.
|
| 171 |
+
_config = _clean_log(_config)
|
| 172 |
+
|
| 173 |
+
wandb_init_kwargs = dict(
|
| 174 |
+
id=trial_id,
|
| 175 |
+
name=trial_name,
|
| 176 |
+
resume=True,
|
| 177 |
+
reinit=True,
|
| 178 |
+
allow_val_change=True,
|
| 179 |
+
config=_config,
|
| 180 |
+
project=project,
|
| 181 |
+
group=group,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
# Update config (e.g. set any other parameters in the call to wandb.init)
|
| 185 |
+
wandb_init_kwargs.update(**kwargs)
|
| 186 |
+
|
| 187 |
+
# On windows, we can't fork
|
| 188 |
+
if os.name == "nt":
|
| 189 |
+
os.environ["WANDB_START_METHOD"] = "thread"
|
| 190 |
+
else:
|
| 191 |
+
os.environ["WANDB_START_METHOD"] = "fork"
|
| 192 |
+
|
| 193 |
+
_wandb = _wandb or wandb
|
| 194 |
+
|
| 195 |
+
run = _wandb.init(**wandb_init_kwargs)
|
| 196 |
+
_run_wandb_process_run_info_hook(run)
|
| 197 |
+
|
| 198 |
+
# Record `setup_wandb` usage when everything has setup successfully.
|
| 199 |
+
air_usage.tag_setup_wandb()
|
| 200 |
+
|
| 201 |
+
return run
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def _is_allowed_type(obj):
|
| 205 |
+
"""Return True if type is allowed for logging to wandb"""
|
| 206 |
+
if isinstance(obj, np.ndarray) and obj.size == 1:
|
| 207 |
+
return isinstance(obj.item(), Number)
|
| 208 |
+
if isinstance(obj, Sequence) and len(obj) > 0:
|
| 209 |
+
return isinstance(obj[0], (Image, Video, WBValue))
|
| 210 |
+
return isinstance(obj, (Number, WBValue))
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def _clean_log(obj: Any):
|
| 214 |
+
# Fixes https://github.com/ray-project/ray/issues/10631
|
| 215 |
+
if isinstance(obj, dict):
|
| 216 |
+
return {k: _clean_log(v) for k, v in obj.items()}
|
| 217 |
+
elif isinstance(obj, (list, set)):
|
| 218 |
+
return [_clean_log(v) for v in obj]
|
| 219 |
+
elif isinstance(obj, tuple):
|
| 220 |
+
return tuple(_clean_log(v) for v in obj)
|
| 221 |
+
elif isinstance(obj, np.ndarray) and obj.ndim == 3:
|
| 222 |
+
# Must be single image (H, W, C).
|
| 223 |
+
return Image(obj)
|
| 224 |
+
elif isinstance(obj, np.ndarray) and obj.ndim == 4:
|
| 225 |
+
# Must be batch of images (N >= 1, H, W, C).
|
| 226 |
+
return (
|
| 227 |
+
_clean_log([Image(v) for v in obj]) if obj.shape[0] > 1 else Image(obj[0])
|
| 228 |
+
)
|
| 229 |
+
elif isinstance(obj, np.ndarray) and obj.ndim == 5:
|
| 230 |
+
# Must be batch of videos (N >= 1, T, C, W, H).
|
| 231 |
+
return (
|
| 232 |
+
_clean_log([Video(v) for v in obj]) if obj.shape[0] > 1 else Video(obj[0])
|
| 233 |
+
)
|
| 234 |
+
elif _is_allowed_type(obj):
|
| 235 |
+
return obj
|
| 236 |
+
|
| 237 |
+
# Else
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
# This is what wandb uses internally. If we cannot dump
|
| 241 |
+
# an object using this method, wandb will raise an exception.
|
| 242 |
+
json_dumps_safer(obj)
|
| 243 |
+
|
| 244 |
+
# This is probably unnecessary, but left here to be extra sure.
|
| 245 |
+
pickle.dumps(obj)
|
| 246 |
+
|
| 247 |
+
return obj
|
| 248 |
+
except Exception:
|
| 249 |
+
# give up, similar to _SafeFallBackEncoder
|
| 250 |
+
fallback = str(obj)
|
| 251 |
+
|
| 252 |
+
# Try to convert to int
|
| 253 |
+
try:
|
| 254 |
+
fallback = int(fallback)
|
| 255 |
+
return fallback
|
| 256 |
+
except ValueError:
|
| 257 |
+
pass
|
| 258 |
+
|
| 259 |
+
# Try to convert to float
|
| 260 |
+
try:
|
| 261 |
+
fallback = float(fallback)
|
| 262 |
+
return fallback
|
| 263 |
+
except ValueError:
|
| 264 |
+
pass
|
| 265 |
+
|
| 266 |
+
# Else, return string
|
| 267 |
+
return fallback
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def _get_wandb_project(project: Optional[str] = None) -> Optional[str]:
|
| 271 |
+
"""Get W&B project from environment variable or external hook if not passed
|
| 272 |
+
as and argument."""
|
| 273 |
+
if (
|
| 274 |
+
not project
|
| 275 |
+
and not os.environ.get(WANDB_PROJECT_ENV_VAR)
|
| 276 |
+
and os.environ.get(WANDB_POPULATE_RUN_LOCATION_HOOK)
|
| 277 |
+
):
|
| 278 |
+
# Try to populate WANDB_PROJECT_ENV_VAR and WANDB_GROUP_ENV_VAR
|
| 279 |
+
# from external hook
|
| 280 |
+
try:
|
| 281 |
+
_load_class(os.environ[WANDB_POPULATE_RUN_LOCATION_HOOK])()
|
| 282 |
+
except Exception as e:
|
| 283 |
+
logger.exception(
|
| 284 |
+
f"Error executing {WANDB_POPULATE_RUN_LOCATION_HOOK} to "
|
| 285 |
+
f"populate {WANDB_PROJECT_ENV_VAR} and {WANDB_GROUP_ENV_VAR}: {e}",
|
| 286 |
+
exc_info=e,
|
| 287 |
+
)
|
| 288 |
+
if not project and os.environ.get(WANDB_PROJECT_ENV_VAR):
|
| 289 |
+
# Try to get project and group from environment variables if not
|
| 290 |
+
# passed through WandbLoggerCallback.
|
| 291 |
+
project = os.environ.get(WANDB_PROJECT_ENV_VAR)
|
| 292 |
+
return project
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
def _set_api_key(api_key_file: Optional[str] = None, api_key: Optional[str] = None):
|
| 296 |
+
"""Set WandB API key from `wandb_config`. Will pop the
|
| 297 |
+
`api_key_file` and `api_key` keys from `wandb_config` parameter.
|
| 298 |
+
|
| 299 |
+
The order of fetching the API key is:
|
| 300 |
+
1) From `api_key` or `api_key_file` arguments
|
| 301 |
+
2) From WANDB_API_KEY environment variables
|
| 302 |
+
3) User already logged in to W&B (wandb.api.api_key set)
|
| 303 |
+
4) From external hook WANDB_SETUP_API_KEY_HOOK
|
| 304 |
+
"""
|
| 305 |
+
if os.environ.get(WANDB_MODE_ENV_VAR) in {"offline", "disabled"}:
|
| 306 |
+
return
|
| 307 |
+
|
| 308 |
+
if api_key_file:
|
| 309 |
+
if api_key:
|
| 310 |
+
raise ValueError("Both WandB `api_key_file` and `api_key` set.")
|
| 311 |
+
with open(api_key_file, "rt") as fp:
|
| 312 |
+
api_key = fp.readline().strip()
|
| 313 |
+
|
| 314 |
+
if not api_key and not os.environ.get(WANDB_ENV_VAR):
|
| 315 |
+
# Check if user is already logged into wandb.
|
| 316 |
+
try:
|
| 317 |
+
wandb.ensure_configured()
|
| 318 |
+
if wandb.api.api_key:
|
| 319 |
+
logger.info("Already logged into W&B.")
|
| 320 |
+
return
|
| 321 |
+
except AttributeError:
|
| 322 |
+
pass
|
| 323 |
+
# Try to get API key from external hook
|
| 324 |
+
if WANDB_SETUP_API_KEY_HOOK in os.environ:
|
| 325 |
+
try:
|
| 326 |
+
api_key = _load_class(os.environ[WANDB_SETUP_API_KEY_HOOK])()
|
| 327 |
+
except Exception as e:
|
| 328 |
+
logger.exception(
|
| 329 |
+
f"Error executing {WANDB_SETUP_API_KEY_HOOK} to setup API key: {e}",
|
| 330 |
+
exc_info=e,
|
| 331 |
+
)
|
| 332 |
+
if api_key:
|
| 333 |
+
os.environ[WANDB_ENV_VAR] = api_key
|
| 334 |
+
elif not os.environ.get(WANDB_ENV_VAR):
|
| 335 |
+
raise ValueError(
|
| 336 |
+
"No WandB API key found. Either set the {} environment "
|
| 337 |
+
"variable, pass `api_key` or `api_key_file` to the"
|
| 338 |
+
"`WandbLoggerCallback` class as arguments, "
|
| 339 |
+
"or run `wandb login` from the command line".format(WANDB_ENV_VAR)
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def _run_wandb_process_run_info_hook(run: Any) -> None:
|
| 344 |
+
"""Run external hook to process information about wandb run"""
|
| 345 |
+
if WANDB_PROCESS_RUN_INFO_HOOK in os.environ:
|
| 346 |
+
try:
|
| 347 |
+
_load_class(os.environ[WANDB_PROCESS_RUN_INFO_HOOK])(run)
|
| 348 |
+
except Exception as e:
|
| 349 |
+
logger.exception(
|
| 350 |
+
f"Error calling {WANDB_PROCESS_RUN_INFO_HOOK}: {e}", exc_info=e
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
class _QueueItem(enum.Enum):
|
| 355 |
+
END = enum.auto()
|
| 356 |
+
RESULT = enum.auto()
|
| 357 |
+
CHECKPOINT = enum.auto()
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
class _WandbLoggingActor:
|
| 361 |
+
"""
|
| 362 |
+
Wandb assumes that each trial's information should be logged from a
|
| 363 |
+
separate process. We use Ray actors as forking multiprocessing
|
| 364 |
+
processes is not supported by Ray and spawn processes run into pickling
|
| 365 |
+
problems.
|
| 366 |
+
|
| 367 |
+
We use a queue for the driver to communicate with the logging process.
|
| 368 |
+
The queue accepts the following items:
|
| 369 |
+
|
| 370 |
+
- If it's a dict, it is assumed to be a result and will be logged using
|
| 371 |
+
``wandb.log()``
|
| 372 |
+
- If it's a checkpoint object, it will be saved using ``wandb.log_artifact()``.
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
def __init__(
|
| 376 |
+
self,
|
| 377 |
+
logdir: str,
|
| 378 |
+
queue: Queue,
|
| 379 |
+
exclude: List[str],
|
| 380 |
+
to_config: List[str],
|
| 381 |
+
*args,
|
| 382 |
+
**kwargs,
|
| 383 |
+
):
|
| 384 |
+
import wandb
|
| 385 |
+
|
| 386 |
+
self._wandb = wandb
|
| 387 |
+
|
| 388 |
+
os.chdir(logdir)
|
| 389 |
+
self.queue = queue
|
| 390 |
+
self._exclude = set(exclude)
|
| 391 |
+
self._to_config = set(to_config)
|
| 392 |
+
self.args = args
|
| 393 |
+
self.kwargs = kwargs
|
| 394 |
+
|
| 395 |
+
self._trial_name = self.kwargs.get("name", "unknown")
|
| 396 |
+
self._logdir = logdir
|
| 397 |
+
|
| 398 |
+
def run(self):
|
| 399 |
+
# Since we're running in a separate process already, use threads.
|
| 400 |
+
os.environ["WANDB_START_METHOD"] = "thread"
|
| 401 |
+
run = self._wandb.init(*self.args, **self.kwargs)
|
| 402 |
+
run.config.trial_log_path = self._logdir
|
| 403 |
+
|
| 404 |
+
_run_wandb_process_run_info_hook(run)
|
| 405 |
+
|
| 406 |
+
while True:
|
| 407 |
+
item_type, item_content = self.queue.get()
|
| 408 |
+
if item_type == _QueueItem.END:
|
| 409 |
+
break
|
| 410 |
+
|
| 411 |
+
if item_type == _QueueItem.CHECKPOINT:
|
| 412 |
+
self._handle_checkpoint(item_content)
|
| 413 |
+
continue
|
| 414 |
+
|
| 415 |
+
assert item_type == _QueueItem.RESULT
|
| 416 |
+
log, config_update = self._handle_result(item_content)
|
| 417 |
+
try:
|
| 418 |
+
self._wandb.config.update(config_update, allow_val_change=True)
|
| 419 |
+
self._wandb.log(log, step=log.get(TRAINING_ITERATION))
|
| 420 |
+
except urllib.error.HTTPError as e:
|
| 421 |
+
# Ignore HTTPError. Missing a few data points is not a
|
| 422 |
+
# big issue, as long as things eventually recover.
|
| 423 |
+
logger.warn("Failed to log result to w&b: {}".format(str(e)))
|
| 424 |
+
self._wandb.finish()
|
| 425 |
+
|
| 426 |
+
def _handle_checkpoint(self, checkpoint_path: str):
|
| 427 |
+
artifact = self._wandb.Artifact(
|
| 428 |
+
name=f"checkpoint_{self._trial_name}", type="model"
|
| 429 |
+
)
|
| 430 |
+
artifact.add_dir(checkpoint_path)
|
| 431 |
+
self._wandb.log_artifact(artifact)
|
| 432 |
+
|
| 433 |
+
def _handle_result(self, result: Dict) -> Tuple[Dict, Dict]:
|
| 434 |
+
config_update = result.get("config", {}).copy()
|
| 435 |
+
log = {}
|
| 436 |
+
flat_result = flatten_dict(result, delimiter="/")
|
| 437 |
+
|
| 438 |
+
for k, v in flat_result.items():
|
| 439 |
+
if any(k.startswith(item + "/") or k == item for item in self._exclude):
|
| 440 |
+
continue
|
| 441 |
+
elif any(k.startswith(item + "/") or k == item for item in self._to_config):
|
| 442 |
+
config_update[k] = v
|
| 443 |
+
elif not _is_allowed_type(v):
|
| 444 |
+
continue
|
| 445 |
+
else:
|
| 446 |
+
log[k] = v
|
| 447 |
+
|
| 448 |
+
config_update.pop("callbacks", None) # Remove callbacks
|
| 449 |
+
return log, config_update
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
@PublicAPI(stability="alpha")
|
| 453 |
+
class WandbLoggerCallback(LoggerCallback):
|
| 454 |
+
"""WandbLoggerCallback
|
| 455 |
+
|
| 456 |
+
Weights and biases (https://www.wandb.ai/) is a tool for experiment
|
| 457 |
+
tracking, model optimization, and dataset versioning. This Ray Tune
|
| 458 |
+
``LoggerCallback`` sends metrics to Wandb for automatic tracking and
|
| 459 |
+
visualization.
|
| 460 |
+
|
| 461 |
+
Example:
|
| 462 |
+
|
| 463 |
+
.. testcode::
|
| 464 |
+
|
| 465 |
+
import random
|
| 466 |
+
|
| 467 |
+
from ray import train, tune
|
| 468 |
+
from ray.train import RunConfig
|
| 469 |
+
from ray.air.integrations.wandb import WandbLoggerCallback
|
| 470 |
+
|
| 471 |
+
|
| 472 |
+
def train_func(config):
|
| 473 |
+
offset = random.random() / 5
|
| 474 |
+
for epoch in range(2, config["epochs"]):
|
| 475 |
+
acc = 1 - (2 + config["lr"]) ** -epoch - random.random() / epoch - offset
|
| 476 |
+
loss = (2 + config["lr"]) ** -epoch + random.random() / epoch + offset
|
| 477 |
+
train.report({"acc": acc, "loss": loss})
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
tuner = tune.Tuner(
|
| 481 |
+
train_func,
|
| 482 |
+
param_space={
|
| 483 |
+
"lr": tune.grid_search([0.001, 0.01, 0.1, 1.0]),
|
| 484 |
+
"epochs": 10,
|
| 485 |
+
},
|
| 486 |
+
run_config=RunConfig(
|
| 487 |
+
callbacks=[WandbLoggerCallback(project="Optimization_Project")]
|
| 488 |
+
),
|
| 489 |
+
)
|
| 490 |
+
results = tuner.fit()
|
| 491 |
+
|
| 492 |
+
.. testoutput::
|
| 493 |
+
:hide:
|
| 494 |
+
|
| 495 |
+
...
|
| 496 |
+
|
| 497 |
+
Args:
|
| 498 |
+
project: Name of the Wandb project. Mandatory.
|
| 499 |
+
group: Name of the Wandb group. Defaults to the trainable
|
| 500 |
+
name.
|
| 501 |
+
api_key_file: Path to file containing the Wandb API KEY. This
|
| 502 |
+
file only needs to be present on the node running the Tune script
|
| 503 |
+
if using the WandbLogger.
|
| 504 |
+
api_key: Wandb API Key. Alternative to setting ``api_key_file``.
|
| 505 |
+
excludes: List of metrics and config that should be excluded from
|
| 506 |
+
the log.
|
| 507 |
+
log_config: Boolean indicating if the ``config`` parameter of
|
| 508 |
+
the ``results`` dict should be logged. This makes sense if
|
| 509 |
+
parameters will change during training, e.g. with
|
| 510 |
+
PopulationBasedTraining. Defaults to False.
|
| 511 |
+
upload_checkpoints: If ``True``, model checkpoints will be uploaded to
|
| 512 |
+
Wandb as artifacts. Defaults to ``False``.
|
| 513 |
+
**kwargs: The keyword arguments will be pased to ``wandb.init()``.
|
| 514 |
+
|
| 515 |
+
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
|
| 516 |
+
by Tune, but can be overwritten by filling out the respective configuration
|
| 517 |
+
values.
|
| 518 |
+
|
| 519 |
+
Please see here for all other valid configuration settings:
|
| 520 |
+
https://docs.wandb.ai/library/init
|
| 521 |
+
""" # noqa: E501
|
| 522 |
+
|
| 523 |
+
# Do not log these result keys
|
| 524 |
+
_exclude_results = ["done", "should_checkpoint"]
|
| 525 |
+
|
| 526 |
+
AUTO_CONFIG_KEYS = [
|
| 527 |
+
"trial_id",
|
| 528 |
+
"experiment_tag",
|
| 529 |
+
"node_ip",
|
| 530 |
+
"experiment_id",
|
| 531 |
+
"hostname",
|
| 532 |
+
"pid",
|
| 533 |
+
"date",
|
| 534 |
+
]
|
| 535 |
+
"""Results that are saved with `wandb.config` instead of `wandb.log`."""
|
| 536 |
+
|
| 537 |
+
_logger_actor_cls = _WandbLoggingActor
|
| 538 |
+
|
| 539 |
+
def __init__(
|
| 540 |
+
self,
|
| 541 |
+
project: Optional[str] = None,
|
| 542 |
+
group: Optional[str] = None,
|
| 543 |
+
api_key_file: Optional[str] = None,
|
| 544 |
+
api_key: Optional[str] = None,
|
| 545 |
+
excludes: Optional[List[str]] = None,
|
| 546 |
+
log_config: bool = False,
|
| 547 |
+
upload_checkpoints: bool = False,
|
| 548 |
+
save_checkpoints: bool = False,
|
| 549 |
+
upload_timeout: int = DEFAULT_SYNC_TIMEOUT,
|
| 550 |
+
**kwargs,
|
| 551 |
+
):
|
| 552 |
+
if not wandb:
|
| 553 |
+
raise RuntimeError(
|
| 554 |
+
"Wandb was not found - please install with `pip install wandb`"
|
| 555 |
+
)
|
| 556 |
+
|
| 557 |
+
if save_checkpoints:
|
| 558 |
+
warnings.warn(
|
| 559 |
+
"`save_checkpoints` is deprecated. Use `upload_checkpoints` instead.",
|
| 560 |
+
DeprecationWarning,
|
| 561 |
+
)
|
| 562 |
+
upload_checkpoints = save_checkpoints
|
| 563 |
+
|
| 564 |
+
self.project = project
|
| 565 |
+
self.group = group
|
| 566 |
+
self.api_key_path = api_key_file
|
| 567 |
+
self.api_key = api_key
|
| 568 |
+
self.excludes = excludes or []
|
| 569 |
+
self.log_config = log_config
|
| 570 |
+
self.upload_checkpoints = upload_checkpoints
|
| 571 |
+
self._upload_timeout = upload_timeout
|
| 572 |
+
self.kwargs = kwargs
|
| 573 |
+
|
| 574 |
+
self._remote_logger_class = None
|
| 575 |
+
|
| 576 |
+
self._trial_logging_actors: Dict[
|
| 577 |
+
"Trial", ray.actor.ActorHandle[_WandbLoggingActor]
|
| 578 |
+
] = {}
|
| 579 |
+
self._trial_logging_futures: Dict["Trial", ray.ObjectRef] = {}
|
| 580 |
+
self._logging_future_to_trial: Dict[ray.ObjectRef, "Trial"] = {}
|
| 581 |
+
self._trial_queues: Dict["Trial", Queue] = {}
|
| 582 |
+
|
| 583 |
+
def setup(self, *args, **kwargs):
|
| 584 |
+
self.api_key_file = (
|
| 585 |
+
os.path.expanduser(self.api_key_path) if self.api_key_path else None
|
| 586 |
+
)
|
| 587 |
+
_set_api_key(self.api_key_file, self.api_key)
|
| 588 |
+
|
| 589 |
+
self.project = _get_wandb_project(self.project)
|
| 590 |
+
if not self.project:
|
| 591 |
+
raise ValueError(
|
| 592 |
+
"Please pass the project name as argument or through "
|
| 593 |
+
f"the {WANDB_PROJECT_ENV_VAR} environment variable."
|
| 594 |
+
)
|
| 595 |
+
if not self.group and os.environ.get(WANDB_GROUP_ENV_VAR):
|
| 596 |
+
self.group = os.environ.get(WANDB_GROUP_ENV_VAR)
|
| 597 |
+
|
| 598 |
+
def log_trial_start(self, trial: "Trial"):
|
| 599 |
+
config = trial.config.copy()
|
| 600 |
+
|
| 601 |
+
config.pop("callbacks", None) # Remove callbacks
|
| 602 |
+
|
| 603 |
+
exclude_results = self._exclude_results.copy()
|
| 604 |
+
|
| 605 |
+
# Additional excludes
|
| 606 |
+
exclude_results += self.excludes
|
| 607 |
+
|
| 608 |
+
# Log config keys on each result?
|
| 609 |
+
if not self.log_config:
|
| 610 |
+
exclude_results += ["config"]
|
| 611 |
+
|
| 612 |
+
# Fill trial ID and name
|
| 613 |
+
trial_id = trial.trial_id if trial else None
|
| 614 |
+
trial_name = str(trial) if trial else None
|
| 615 |
+
|
| 616 |
+
# Project name for Wandb
|
| 617 |
+
wandb_project = self.project
|
| 618 |
+
|
| 619 |
+
# Grouping
|
| 620 |
+
wandb_group = self.group or trial.experiment_dir_name if trial else None
|
| 621 |
+
|
| 622 |
+
# remove unpickleable items!
|
| 623 |
+
config = _clean_log(config)
|
| 624 |
+
config = {
|
| 625 |
+
key: value for key, value in config.items() if key not in self.excludes
|
| 626 |
+
}
|
| 627 |
+
|
| 628 |
+
wandb_init_kwargs = dict(
|
| 629 |
+
id=trial_id,
|
| 630 |
+
name=trial_name,
|
| 631 |
+
resume=False,
|
| 632 |
+
reinit=True,
|
| 633 |
+
allow_val_change=True,
|
| 634 |
+
group=wandb_group,
|
| 635 |
+
project=wandb_project,
|
| 636 |
+
config=config,
|
| 637 |
+
)
|
| 638 |
+
wandb_init_kwargs.update(self.kwargs)
|
| 639 |
+
|
| 640 |
+
self._start_logging_actor(trial, exclude_results, **wandb_init_kwargs)
|
| 641 |
+
|
| 642 |
+
def _start_logging_actor(
|
| 643 |
+
self, trial: "Trial", exclude_results: List[str], **wandb_init_kwargs
|
| 644 |
+
):
|
| 645 |
+
# Reuse actor if one already exists.
|
| 646 |
+
# This can happen if the trial is restarted.
|
| 647 |
+
if trial in self._trial_logging_futures:
|
| 648 |
+
return
|
| 649 |
+
|
| 650 |
+
if not self._remote_logger_class:
|
| 651 |
+
env_vars = {}
|
| 652 |
+
# API key env variable is not set if authenticating through `wandb login`
|
| 653 |
+
if WANDB_ENV_VAR in os.environ:
|
| 654 |
+
env_vars[WANDB_ENV_VAR] = os.environ[WANDB_ENV_VAR]
|
| 655 |
+
self._remote_logger_class = ray.remote(
|
| 656 |
+
num_cpus=0,
|
| 657 |
+
**_force_on_current_node(),
|
| 658 |
+
runtime_env={"env_vars": env_vars},
|
| 659 |
+
max_restarts=-1,
|
| 660 |
+
max_task_retries=-1,
|
| 661 |
+
)(self._logger_actor_cls)
|
| 662 |
+
|
| 663 |
+
self._trial_queues[trial] = Queue(
|
| 664 |
+
actor_options={
|
| 665 |
+
"num_cpus": 0,
|
| 666 |
+
**_force_on_current_node(),
|
| 667 |
+
"max_restarts": -1,
|
| 668 |
+
"max_task_retries": -1,
|
| 669 |
+
}
|
| 670 |
+
)
|
| 671 |
+
self._trial_logging_actors[trial] = self._remote_logger_class.remote(
|
| 672 |
+
logdir=trial.local_path,
|
| 673 |
+
queue=self._trial_queues[trial],
|
| 674 |
+
exclude=exclude_results,
|
| 675 |
+
to_config=self.AUTO_CONFIG_KEYS,
|
| 676 |
+
**wandb_init_kwargs,
|
| 677 |
+
)
|
| 678 |
+
logging_future = self._trial_logging_actors[trial].run.remote()
|
| 679 |
+
self._trial_logging_futures[trial] = logging_future
|
| 680 |
+
self._logging_future_to_trial[logging_future] = trial
|
| 681 |
+
|
| 682 |
+
def _signal_logging_actor_stop(self, trial: "Trial"):
|
| 683 |
+
self._trial_queues[trial].put((_QueueItem.END, None))
|
| 684 |
+
|
| 685 |
+
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
|
| 686 |
+
if trial not in self._trial_logging_actors:
|
| 687 |
+
self.log_trial_start(trial)
|
| 688 |
+
|
| 689 |
+
result = _clean_log(result)
|
| 690 |
+
self._trial_queues[trial].put((_QueueItem.RESULT, result))
|
| 691 |
+
|
| 692 |
+
def log_trial_save(self, trial: "Trial"):
|
| 693 |
+
if self.upload_checkpoints and trial.checkpoint:
|
| 694 |
+
checkpoint_root = None
|
| 695 |
+
if isinstance(trial.checkpoint.filesystem, pyarrow.fs.LocalFileSystem):
|
| 696 |
+
checkpoint_root = trial.checkpoint.path
|
| 697 |
+
|
| 698 |
+
if checkpoint_root:
|
| 699 |
+
self._trial_queues[trial].put((_QueueItem.CHECKPOINT, checkpoint_root))
|
| 700 |
+
|
| 701 |
+
def log_trial_end(self, trial: "Trial", failed: bool = False):
|
| 702 |
+
self._signal_logging_actor_stop(trial=trial)
|
| 703 |
+
self._cleanup_logging_actors()
|
| 704 |
+
|
| 705 |
+
def _cleanup_logging_actor(self, trial: "Trial"):
|
| 706 |
+
del self._trial_queues[trial]
|
| 707 |
+
del self._trial_logging_futures[trial]
|
| 708 |
+
ray.kill(self._trial_logging_actors[trial])
|
| 709 |
+
del self._trial_logging_actors[trial]
|
| 710 |
+
|
| 711 |
+
def _cleanup_logging_actors(self, timeout: int = 0, kill_on_timeout: bool = False):
|
| 712 |
+
"""Clean up logging actors that have finished uploading to wandb.
|
| 713 |
+
Waits for `timeout` seconds to collect finished logging actors.
|
| 714 |
+
|
| 715 |
+
Args:
|
| 716 |
+
timeout: The number of seconds to wait. Defaults to 0 to clean up
|
| 717 |
+
any immediate logging actors during the run.
|
| 718 |
+
This is set to a timeout threshold to wait for pending uploads
|
| 719 |
+
on experiment end.
|
| 720 |
+
kill_on_timeout: Whether or not to kill and cleanup the logging actor if
|
| 721 |
+
it hasn't finished within the timeout.
|
| 722 |
+
"""
|
| 723 |
+
|
| 724 |
+
futures = list(self._trial_logging_futures.values())
|
| 725 |
+
done, remaining = ray.wait(futures, num_returns=len(futures), timeout=timeout)
|
| 726 |
+
for ready_future in done:
|
| 727 |
+
finished_trial = self._logging_future_to_trial.pop(ready_future)
|
| 728 |
+
self._cleanup_logging_actor(finished_trial)
|
| 729 |
+
|
| 730 |
+
if kill_on_timeout:
|
| 731 |
+
for remaining_future in remaining:
|
| 732 |
+
trial = self._logging_future_to_trial.pop(remaining_future)
|
| 733 |
+
self._cleanup_logging_actor(trial)
|
| 734 |
+
|
| 735 |
+
def on_experiment_end(self, trials: List["Trial"], **info):
|
| 736 |
+
"""Wait for the actors to finish their call to `wandb.finish`.
|
| 737 |
+
This includes uploading all logs + artifacts to wandb."""
|
| 738 |
+
self._cleanup_logging_actors(timeout=self._upload_timeout, kill_on_timeout=True)
|
| 739 |
+
|
| 740 |
+
def __del__(self):
|
| 741 |
+
if ray.is_initialized():
|
| 742 |
+
for trial in list(self._trial_logging_actors):
|
| 743 |
+
self._signal_logging_actor_stop(trial=trial)
|
| 744 |
+
|
| 745 |
+
self._cleanup_logging_actors(timeout=2, kill_on_timeout=True)
|
| 746 |
+
|
| 747 |
+
self._trial_logging_actors = {}
|
| 748 |
+
self._trial_logging_futures = {}
|
| 749 |
+
self._logging_future_to_trial = {}
|
| 750 |
+
self._trial_queues = {}
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__init__.py
ADDED
|
File without changes
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/data_batch_conversion.cpython-310.pyc
ADDED
|
Binary file (8.25 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/node.cpython-310.pyc
ADDED
|
Binary file (2.71 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/torch_dist.cpython-310.pyc
ADDED
|
Binary file (5.64 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/__pycache__/transform_pyarrow.cpython-310.pyc
ADDED
|
Binary file (1.62 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/check_ingest.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
import sys
|
| 4 |
+
import time
|
| 5 |
+
from typing import Optional
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
|
| 9 |
+
import ray
|
| 10 |
+
from ray import train
|
| 11 |
+
from ray.air.config import DatasetConfig, ScalingConfig
|
| 12 |
+
from ray.data import DataIterator, Dataset, Preprocessor
|
| 13 |
+
from ray.train import DataConfig
|
| 14 |
+
from ray.train.data_parallel_trainer import DataParallelTrainer
|
| 15 |
+
from ray.util.annotations import Deprecated, DeveloperAPI
|
| 16 |
+
|
| 17 |
+
MAKE_LOCAL_DATA_ITERATOR_DEPRECATION_MSG = """
|
| 18 |
+
make_local_dataset_iterator is deprecated. Call ``iterator()`` directly on your dataset instead to create a local DataIterator.
|
| 19 |
+
""" # noqa: E501
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@DeveloperAPI
|
| 23 |
+
class DummyTrainer(DataParallelTrainer):
|
| 24 |
+
"""A Trainer that does nothing except read the data for a given number of epochs.
|
| 25 |
+
|
| 26 |
+
It prints out as much debugging statistics as possible.
|
| 27 |
+
|
| 28 |
+
This is useful for debugging data ingest problem. This trainer supports normal
|
| 29 |
+
scaling options same as any other Trainer (e.g., num_workers, use_gpu).
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
scaling_config: Configuration for how to scale training. This is the same
|
| 33 |
+
as for :class:`~ray.train.base_trainer.BaseTrainer`.
|
| 34 |
+
num_epochs: How many many times to iterate through the datasets for.
|
| 35 |
+
prefetch_batches: The number of batches to prefetch ahead of the
|
| 36 |
+
current block during the scan. This is the same as
|
| 37 |
+
:meth:`~ray.data.Dataset.iter_batches`
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
*args,
|
| 43 |
+
scaling_config: Optional[ScalingConfig] = None,
|
| 44 |
+
num_epochs: int = 1,
|
| 45 |
+
prefetch_batches: int = 1,
|
| 46 |
+
batch_size: Optional[int] = 4096,
|
| 47 |
+
**kwargs,
|
| 48 |
+
):
|
| 49 |
+
if not scaling_config:
|
| 50 |
+
scaling_config = ScalingConfig(num_workers=1)
|
| 51 |
+
super().__init__(
|
| 52 |
+
train_loop_per_worker=DummyTrainer.make_train_loop(
|
| 53 |
+
num_epochs, prefetch_batches, batch_size
|
| 54 |
+
),
|
| 55 |
+
*args,
|
| 56 |
+
scaling_config=scaling_config,
|
| 57 |
+
**kwargs,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
@staticmethod
|
| 61 |
+
def make_train_loop(
|
| 62 |
+
num_epochs: int,
|
| 63 |
+
prefetch_batches: int,
|
| 64 |
+
batch_size: Optional[int],
|
| 65 |
+
):
|
| 66 |
+
"""Make a debug train loop that runs for the given amount of epochs."""
|
| 67 |
+
|
| 68 |
+
def train_loop_per_worker():
|
| 69 |
+
import pandas as pd
|
| 70 |
+
|
| 71 |
+
rank = train.get_context().get_world_rank()
|
| 72 |
+
data_shard = train.get_dataset_shard("train")
|
| 73 |
+
start = time.perf_counter()
|
| 74 |
+
epochs_read, batches_read, bytes_read = 0, 0, 0
|
| 75 |
+
batch_delays = []
|
| 76 |
+
|
| 77 |
+
print("Starting train loop on worker", rank)
|
| 78 |
+
for epoch in range(num_epochs):
|
| 79 |
+
epochs_read += 1
|
| 80 |
+
batch_start = time.perf_counter()
|
| 81 |
+
for batch in data_shard.iter_batches(
|
| 82 |
+
prefetch_batches=prefetch_batches,
|
| 83 |
+
batch_size=batch_size,
|
| 84 |
+
):
|
| 85 |
+
batch_delay = time.perf_counter() - batch_start
|
| 86 |
+
batch_delays.append(batch_delay)
|
| 87 |
+
batches_read += 1
|
| 88 |
+
if isinstance(batch, pd.DataFrame):
|
| 89 |
+
bytes_read += int(
|
| 90 |
+
batch.memory_usage(index=True, deep=True).sum()
|
| 91 |
+
)
|
| 92 |
+
elif isinstance(batch, np.ndarray):
|
| 93 |
+
bytes_read += batch.nbytes
|
| 94 |
+
elif isinstance(batch, dict):
|
| 95 |
+
for arr in batch.values():
|
| 96 |
+
bytes_read += arr.nbytes
|
| 97 |
+
else:
|
| 98 |
+
# NOTE: This isn't recursive and will just return the size of
|
| 99 |
+
# the object pointers if list of non-primitive types.
|
| 100 |
+
bytes_read += sys.getsizeof(batch)
|
| 101 |
+
train.report(
|
| 102 |
+
dict(
|
| 103 |
+
bytes_read=bytes_read,
|
| 104 |
+
batches_read=batches_read,
|
| 105 |
+
epochs_read=epochs_read,
|
| 106 |
+
batch_delay=batch_delay,
|
| 107 |
+
)
|
| 108 |
+
)
|
| 109 |
+
batch_start = time.perf_counter()
|
| 110 |
+
delta = time.perf_counter() - start
|
| 111 |
+
|
| 112 |
+
print("Time to read all data", delta, "seconds")
|
| 113 |
+
print(
|
| 114 |
+
"P50/P95/Max batch delay (s)",
|
| 115 |
+
np.quantile(batch_delays, 0.5),
|
| 116 |
+
np.quantile(batch_delays, 0.95),
|
| 117 |
+
np.max(batch_delays),
|
| 118 |
+
)
|
| 119 |
+
print("Num epochs read", epochs_read)
|
| 120 |
+
print("Num batches read", batches_read)
|
| 121 |
+
print("Num bytes read", round(bytes_read / (1024 * 1024), 2), "MiB")
|
| 122 |
+
print(
|
| 123 |
+
"Mean throughput", round(bytes_read / (1024 * 1024) / delta, 2), "MiB/s"
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
if rank == 0:
|
| 127 |
+
print("Ingest stats from rank=0:\n\n{}".format(data_shard.stats()))
|
| 128 |
+
|
| 129 |
+
return train_loop_per_worker
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
@Deprecated(MAKE_LOCAL_DATA_ITERATOR_DEPRECATION_MSG)
|
| 133 |
+
def make_local_dataset_iterator(
|
| 134 |
+
dataset: Dataset,
|
| 135 |
+
preprocessor: Preprocessor,
|
| 136 |
+
dataset_config: DatasetConfig,
|
| 137 |
+
) -> DataIterator:
|
| 138 |
+
"""A helper function to create a local
|
| 139 |
+
:py:class:`DataIterator <ray.data.DataIterator>`,
|
| 140 |
+
like the one returned by :meth:`~ray.train.get_dataset_shard`.
|
| 141 |
+
|
| 142 |
+
This function should only be used for development and debugging. It will
|
| 143 |
+
raise an exception if called by a worker instead of the driver.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
dataset: The input Dataset.
|
| 147 |
+
preprocessor: The preprocessor that will be applied to the input dataset.
|
| 148 |
+
dataset_config: The dataset config normally passed to the trainer.
|
| 149 |
+
"""
|
| 150 |
+
raise DeprecationWarning(MAKE_LOCAL_DATA_ITERATOR_DEPRECATION_MSG)
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
if __name__ == "__main__":
|
| 154 |
+
|
| 155 |
+
import argparse
|
| 156 |
+
|
| 157 |
+
parser = argparse.ArgumentParser()
|
| 158 |
+
parser.add_argument(
|
| 159 |
+
"--num-epochs", "-e", type=int, default=1, help="Number of epochs to read."
|
| 160 |
+
)
|
| 161 |
+
parser.add_argument(
|
| 162 |
+
"--prefetch-batches",
|
| 163 |
+
"-b",
|
| 164 |
+
type=int,
|
| 165 |
+
default=1,
|
| 166 |
+
help="Number of batches to prefetch when reading data.",
|
| 167 |
+
)
|
| 168 |
+
|
| 169 |
+
args = parser.parse_args()
|
| 170 |
+
|
| 171 |
+
# Generate a synthetic dataset of ~10GiB of float64 data. The dataset is sharded
|
| 172 |
+
# into 100 blocks (override_num_blocks=100).
|
| 173 |
+
ds = ray.data.range_tensor(50000, shape=(80, 80, 4), override_num_blocks=100)
|
| 174 |
+
|
| 175 |
+
# An example preprocessing chain that just scales all values by 4.0 in two stages.
|
| 176 |
+
ds = ds.map_batches(lambda df: df * 2, batch_format="pandas")
|
| 177 |
+
ds = ds.map_batches(lambda df: df * 2, batch_format="pandas")
|
| 178 |
+
|
| 179 |
+
# Setup the dummy trainer that prints ingest stats.
|
| 180 |
+
# Run and print ingest stats.
|
| 181 |
+
trainer = DummyTrainer(
|
| 182 |
+
scaling_config=ScalingConfig(num_workers=1, use_gpu=False),
|
| 183 |
+
datasets={"train": ds},
|
| 184 |
+
num_epochs=args.num_epochs,
|
| 185 |
+
prefetch_batches=args.prefetch_batches,
|
| 186 |
+
dataset_config=DataConfig(),
|
| 187 |
+
batch_size=None,
|
| 188 |
+
)
|
| 189 |
+
print("Dataset config", trainer.get_dataset_config())
|
| 190 |
+
trainer.fit()
|
| 191 |
+
|
| 192 |
+
# Print memory stats (you can also use "ray memory --stats-only" to monitor this
|
| 193 |
+
# during the middle of the run.
|
| 194 |
+
try:
|
| 195 |
+
print(
|
| 196 |
+
"Memory stats at end of ingest:\n\n{}".format(
|
| 197 |
+
ray._private.internal_api.memory_summary(stats_only=True)
|
| 198 |
+
)
|
| 199 |
+
)
|
| 200 |
+
except Exception:
|
| 201 |
+
print("Error getting Ray memory stats")
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/data_batch_conversion.py
ADDED
|
@@ -0,0 +1,353 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
from enum import Enum
|
| 3 |
+
from typing import TYPE_CHECKING, Dict, List, Union
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from ray.air.constants import TENSOR_COLUMN_NAME
|
| 8 |
+
from ray.air.data_batch_type import DataBatchType
|
| 9 |
+
from ray.util.annotations import Deprecated, DeveloperAPI
|
| 10 |
+
|
| 11 |
+
if TYPE_CHECKING:
|
| 12 |
+
import pandas as pd
|
| 13 |
+
|
| 14 |
+
# TODO: Consolidate data conversion edges for arrow bug workaround.
|
| 15 |
+
try:
|
| 16 |
+
import pyarrow
|
| 17 |
+
except ImportError:
|
| 18 |
+
pyarrow = None
|
| 19 |
+
|
| 20 |
+
# Lazy import to avoid ray init failures without pandas installed and allow
|
| 21 |
+
# dataset to import modules in this file.
|
| 22 |
+
_pandas = None
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _lazy_import_pandas():
|
| 26 |
+
global _pandas
|
| 27 |
+
if _pandas is None:
|
| 28 |
+
import pandas
|
| 29 |
+
|
| 30 |
+
_pandas = pandas
|
| 31 |
+
return _pandas
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
@DeveloperAPI
|
| 35 |
+
class BatchFormat(str, Enum):
|
| 36 |
+
PANDAS = "pandas"
|
| 37 |
+
# TODO: Remove once Arrow is deprecated as user facing batch format
|
| 38 |
+
ARROW = "arrow"
|
| 39 |
+
NUMPY = "numpy" # Either a single numpy array or a Dict of numpy arrays.
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@DeveloperAPI
|
| 43 |
+
class BlockFormat(str, Enum):
|
| 44 |
+
"""Internal Dataset block format enum."""
|
| 45 |
+
|
| 46 |
+
PANDAS = "pandas"
|
| 47 |
+
ARROW = "arrow"
|
| 48 |
+
SIMPLE = "simple"
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def _convert_batch_type_to_pandas(
|
| 52 |
+
data: DataBatchType,
|
| 53 |
+
cast_tensor_columns: bool = False,
|
| 54 |
+
) -> "pd.DataFrame":
|
| 55 |
+
"""Convert the provided data to a Pandas DataFrame.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
data: Data of type DataBatchType
|
| 59 |
+
cast_tensor_columns: Whether tensor columns should be cast to NumPy ndarrays.
|
| 60 |
+
|
| 61 |
+
Returns:
|
| 62 |
+
A pandas Dataframe representation of the input data.
|
| 63 |
+
|
| 64 |
+
"""
|
| 65 |
+
pd = _lazy_import_pandas()
|
| 66 |
+
|
| 67 |
+
if isinstance(data, np.ndarray):
|
| 68 |
+
data = pd.DataFrame({TENSOR_COLUMN_NAME: _ndarray_to_column(data)})
|
| 69 |
+
elif isinstance(data, dict):
|
| 70 |
+
tensor_dict = {}
|
| 71 |
+
for col_name, col in data.items():
|
| 72 |
+
if not isinstance(col, np.ndarray):
|
| 73 |
+
raise ValueError(
|
| 74 |
+
"All values in the provided dict must be of type "
|
| 75 |
+
f"np.ndarray. Found type {type(col)} for key {col_name} "
|
| 76 |
+
f"instead."
|
| 77 |
+
)
|
| 78 |
+
tensor_dict[col_name] = _ndarray_to_column(col)
|
| 79 |
+
data = pd.DataFrame(tensor_dict)
|
| 80 |
+
elif pyarrow is not None and isinstance(data, pyarrow.Table):
|
| 81 |
+
data = data.to_pandas()
|
| 82 |
+
elif not isinstance(data, pd.DataFrame):
|
| 83 |
+
raise ValueError(
|
| 84 |
+
f"Received data of type: {type(data)}, but expected it to be one "
|
| 85 |
+
f"of {DataBatchType}"
|
| 86 |
+
)
|
| 87 |
+
if cast_tensor_columns:
|
| 88 |
+
data = _cast_tensor_columns_to_ndarrays(data)
|
| 89 |
+
return data
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def _convert_pandas_to_batch_type(
|
| 93 |
+
data: "pd.DataFrame",
|
| 94 |
+
type: BatchFormat,
|
| 95 |
+
cast_tensor_columns: bool = False,
|
| 96 |
+
) -> DataBatchType:
|
| 97 |
+
"""Convert the provided Pandas dataframe to the provided ``type``.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
data: A Pandas DataFrame
|
| 101 |
+
type: The specific ``BatchFormat`` to convert to.
|
| 102 |
+
cast_tensor_columns: Whether tensor columns should be cast to our tensor
|
| 103 |
+
extension type.
|
| 104 |
+
|
| 105 |
+
Returns:
|
| 106 |
+
The input data represented with the provided type.
|
| 107 |
+
"""
|
| 108 |
+
if cast_tensor_columns:
|
| 109 |
+
data = _cast_ndarray_columns_to_tensor_extension(data)
|
| 110 |
+
if type == BatchFormat.PANDAS:
|
| 111 |
+
return data
|
| 112 |
+
|
| 113 |
+
elif type == BatchFormat.NUMPY:
|
| 114 |
+
if len(data.columns) == 1:
|
| 115 |
+
# If just a single column, return as a single numpy array.
|
| 116 |
+
return data.iloc[:, 0].to_numpy()
|
| 117 |
+
else:
|
| 118 |
+
# Else return as a dict of numpy arrays.
|
| 119 |
+
output_dict = {}
|
| 120 |
+
for column in data:
|
| 121 |
+
output_dict[column] = data[column].to_numpy()
|
| 122 |
+
return output_dict
|
| 123 |
+
|
| 124 |
+
elif type == BatchFormat.ARROW:
|
| 125 |
+
if not pyarrow:
|
| 126 |
+
raise ValueError(
|
| 127 |
+
"Attempted to convert data to Pyarrow Table but Pyarrow "
|
| 128 |
+
"is not installed. Please do `pip install pyarrow` to "
|
| 129 |
+
"install Pyarrow."
|
| 130 |
+
)
|
| 131 |
+
return pyarrow.Table.from_pandas(data)
|
| 132 |
+
|
| 133 |
+
else:
|
| 134 |
+
raise ValueError(
|
| 135 |
+
f"Received type {type}, but expected it to be one of {DataBatchType}"
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@Deprecated
|
| 140 |
+
def convert_batch_type_to_pandas(
|
| 141 |
+
data: DataBatchType,
|
| 142 |
+
cast_tensor_columns: bool = False,
|
| 143 |
+
):
|
| 144 |
+
"""Convert the provided data to a Pandas DataFrame.
|
| 145 |
+
|
| 146 |
+
This API is deprecated from Ray 2.4.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
data: Data of type DataBatchType
|
| 150 |
+
cast_tensor_columns: Whether tensor columns should be cast to NumPy ndarrays.
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
A pandas Dataframe representation of the input data.
|
| 154 |
+
|
| 155 |
+
"""
|
| 156 |
+
warnings.warn(
|
| 157 |
+
"`convert_batch_type_to_pandas` is deprecated as a developer API "
|
| 158 |
+
"starting from Ray 2.4. All batch format conversions should be "
|
| 159 |
+
"done manually instead of relying on this API.",
|
| 160 |
+
PendingDeprecationWarning,
|
| 161 |
+
)
|
| 162 |
+
return _convert_batch_type_to_pandas(
|
| 163 |
+
data=data, cast_tensor_columns=cast_tensor_columns
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
@Deprecated
|
| 168 |
+
def convert_pandas_to_batch_type(
|
| 169 |
+
data: "pd.DataFrame",
|
| 170 |
+
type: BatchFormat,
|
| 171 |
+
cast_tensor_columns: bool = False,
|
| 172 |
+
):
|
| 173 |
+
"""Convert the provided Pandas dataframe to the provided ``type``.
|
| 174 |
+
|
| 175 |
+
Args:
|
| 176 |
+
data: A Pandas DataFrame
|
| 177 |
+
type: The specific ``BatchFormat`` to convert to.
|
| 178 |
+
cast_tensor_columns: Whether tensor columns should be cast to our tensor
|
| 179 |
+
extension type.
|
| 180 |
+
|
| 181 |
+
Returns:
|
| 182 |
+
The input data represented with the provided type.
|
| 183 |
+
"""
|
| 184 |
+
warnings.warn(
|
| 185 |
+
"`convert_pandas_to_batch_type` is deprecated as a developer API "
|
| 186 |
+
"starting from Ray 2.4. All batch format conversions should be "
|
| 187 |
+
"done manually instead of relying on this API.",
|
| 188 |
+
PendingDeprecationWarning,
|
| 189 |
+
)
|
| 190 |
+
return _convert_pandas_to_batch_type(
|
| 191 |
+
data=data, type=type, cast_tensor_columns=cast_tensor_columns
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _convert_batch_type_to_numpy(
|
| 196 |
+
data: DataBatchType,
|
| 197 |
+
) -> Union[np.ndarray, Dict[str, np.ndarray]]:
|
| 198 |
+
"""Convert the provided data to a NumPy ndarray or dict of ndarrays.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
data: Data of type DataBatchType
|
| 202 |
+
|
| 203 |
+
Returns:
|
| 204 |
+
A numpy representation of the input data.
|
| 205 |
+
"""
|
| 206 |
+
pd = _lazy_import_pandas()
|
| 207 |
+
|
| 208 |
+
if isinstance(data, np.ndarray):
|
| 209 |
+
return data
|
| 210 |
+
elif isinstance(data, dict):
|
| 211 |
+
for col_name, col in data.items():
|
| 212 |
+
if not isinstance(col, np.ndarray):
|
| 213 |
+
raise ValueError(
|
| 214 |
+
"All values in the provided dict must be of type "
|
| 215 |
+
f"np.ndarray. Found type {type(col)} for key {col_name} "
|
| 216 |
+
f"instead."
|
| 217 |
+
)
|
| 218 |
+
return data
|
| 219 |
+
elif pyarrow is not None and isinstance(data, pyarrow.Table):
|
| 220 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 221 |
+
get_arrow_extension_fixed_shape_tensor_types,
|
| 222 |
+
)
|
| 223 |
+
from ray.data._internal.arrow_ops import transform_pyarrow
|
| 224 |
+
|
| 225 |
+
column_values_ndarrays = []
|
| 226 |
+
|
| 227 |
+
for col in data.columns:
|
| 228 |
+
# Combine columnar values arrays to make these contiguous
|
| 229 |
+
# (making them compatible with numpy format)
|
| 230 |
+
combined_array = transform_pyarrow.combine_chunked_array(col)
|
| 231 |
+
|
| 232 |
+
column_values_ndarrays.append(
|
| 233 |
+
transform_pyarrow.to_numpy(combined_array, zero_copy_only=False)
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
arrow_fixed_shape_tensor_types = get_arrow_extension_fixed_shape_tensor_types()
|
| 237 |
+
|
| 238 |
+
# NOTE: This branch is here for backwards-compatibility
|
| 239 |
+
if data.column_names == [TENSOR_COLUMN_NAME] and (
|
| 240 |
+
isinstance(data.schema.types[0], arrow_fixed_shape_tensor_types)
|
| 241 |
+
):
|
| 242 |
+
return column_values_ndarrays[0]
|
| 243 |
+
|
| 244 |
+
return dict(zip(data.column_names, column_values_ndarrays))
|
| 245 |
+
elif isinstance(data, pd.DataFrame):
|
| 246 |
+
return _convert_pandas_to_batch_type(data, BatchFormat.NUMPY)
|
| 247 |
+
else:
|
| 248 |
+
raise ValueError(
|
| 249 |
+
f"Received data of type: {type(data)}, but expected it to be one "
|
| 250 |
+
f"of {DataBatchType}"
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _ndarray_to_column(arr: np.ndarray) -> Union["pd.Series", List[np.ndarray]]:
|
| 255 |
+
"""Convert a NumPy ndarray into an appropriate column format for insertion into a
|
| 256 |
+
pandas DataFrame.
|
| 257 |
+
|
| 258 |
+
If conversion to a pandas Series fails (e.g. if the ndarray is multi-dimensional),
|
| 259 |
+
fall back to a list of NumPy ndarrays.
|
| 260 |
+
"""
|
| 261 |
+
pd = _lazy_import_pandas()
|
| 262 |
+
try:
|
| 263 |
+
# Try to convert to Series, falling back to a list conversion if this fails
|
| 264 |
+
# (e.g. if the ndarray is multi-dimensional).
|
| 265 |
+
return pd.Series(arr)
|
| 266 |
+
except ValueError:
|
| 267 |
+
return list(arr)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def _unwrap_ndarray_object_type_if_needed(arr: np.ndarray) -> np.ndarray:
|
| 271 |
+
"""Unwrap an object-dtyped NumPy ndarray containing ndarray pointers into a single
|
| 272 |
+
contiguous ndarray, if needed/possible.
|
| 273 |
+
"""
|
| 274 |
+
if arr.dtype.type is np.object_:
|
| 275 |
+
try:
|
| 276 |
+
# Try to convert the NumPy ndarray to a non-object dtype.
|
| 277 |
+
arr = np.array([np.asarray(v) for v in arr])
|
| 278 |
+
except Exception:
|
| 279 |
+
# This may fail if the subndarrays are of heterogeneous shape
|
| 280 |
+
pass
|
| 281 |
+
return arr
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def _cast_ndarray_columns_to_tensor_extension(df: "pd.DataFrame") -> "pd.DataFrame":
|
| 285 |
+
"""
|
| 286 |
+
Cast all NumPy ndarray columns in df to our tensor extension type, TensorArray.
|
| 287 |
+
"""
|
| 288 |
+
pd = _lazy_import_pandas()
|
| 289 |
+
try:
|
| 290 |
+
SettingWithCopyWarning = pd.core.common.SettingWithCopyWarning
|
| 291 |
+
except AttributeError:
|
| 292 |
+
# SettingWithCopyWarning was moved to pd.errors in Pandas 1.5.0.
|
| 293 |
+
SettingWithCopyWarning = pd.errors.SettingWithCopyWarning
|
| 294 |
+
|
| 295 |
+
from ray.air.util.tensor_extensions.pandas import (
|
| 296 |
+
TensorArray,
|
| 297 |
+
column_needs_tensor_extension,
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
# Try to convert any ndarray columns to TensorArray columns.
|
| 301 |
+
# TODO(Clark): Once Pandas supports registering extension types for type
|
| 302 |
+
# inference on construction, implement as much for NumPy ndarrays and remove
|
| 303 |
+
# this. See https://github.com/pandas-dev/pandas/issues/41848
|
| 304 |
+
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
|
| 305 |
+
# column names containing tensor columns, to make this an O(# of tensor columns)
|
| 306 |
+
# check rather than the current O(# of columns) check.
|
| 307 |
+
for col_name, col in df.items():
|
| 308 |
+
if column_needs_tensor_extension(col):
|
| 309 |
+
try:
|
| 310 |
+
# Suppress Pandas warnings:
|
| 311 |
+
# https://github.com/ray-project/ray/issues/29270
|
| 312 |
+
# We actually want in-place operations so we surpress this warning.
|
| 313 |
+
# https://stackoverflow.com/a/74193599
|
| 314 |
+
with warnings.catch_warnings():
|
| 315 |
+
warnings.simplefilter("ignore", category=FutureWarning)
|
| 316 |
+
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
|
| 317 |
+
df[col_name] = TensorArray(col)
|
| 318 |
+
except Exception as e:
|
| 319 |
+
raise ValueError(
|
| 320 |
+
f"Tried to cast column {col_name} to the TensorArray tensor "
|
| 321 |
+
"extension type but the conversion failed. To disable "
|
| 322 |
+
"automatic casting to this tensor extension, set "
|
| 323 |
+
"ctx = DataContext.get_current(); "
|
| 324 |
+
"ctx.enable_tensor_extension_casting = False."
|
| 325 |
+
) from e
|
| 326 |
+
return df
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
def _cast_tensor_columns_to_ndarrays(df: "pd.DataFrame") -> "pd.DataFrame":
|
| 330 |
+
"""Cast all tensor extension columns in df to NumPy ndarrays."""
|
| 331 |
+
pd = _lazy_import_pandas()
|
| 332 |
+
try:
|
| 333 |
+
SettingWithCopyWarning = pd.core.common.SettingWithCopyWarning
|
| 334 |
+
except AttributeError:
|
| 335 |
+
# SettingWithCopyWarning was moved to pd.errors in Pandas 1.5.0.
|
| 336 |
+
SettingWithCopyWarning = pd.errors.SettingWithCopyWarning
|
| 337 |
+
from ray.air.util.tensor_extensions.pandas import TensorDtype
|
| 338 |
+
|
| 339 |
+
# Try to convert any tensor extension columns to ndarray columns.
|
| 340 |
+
# TODO(Clark): Optimize this with propagated DataFrame metadata containing a list of
|
| 341 |
+
# column names containing tensor columns, to make this an O(# of tensor columns)
|
| 342 |
+
# check rather than the current O(# of columns) check.
|
| 343 |
+
for col_name, col in df.items():
|
| 344 |
+
if isinstance(col.dtype, TensorDtype):
|
| 345 |
+
# Suppress Pandas warnings:
|
| 346 |
+
# https://github.com/ray-project/ray/issues/29270
|
| 347 |
+
# We actually want in-place operations so we surpress this warning.
|
| 348 |
+
# https://stackoverflow.com/a/74193599
|
| 349 |
+
with warnings.catch_warnings():
|
| 350 |
+
warnings.simplefilter("ignore", category=FutureWarning)
|
| 351 |
+
warnings.simplefilter("ignore", category=SettingWithCopyWarning)
|
| 352 |
+
df[col_name] = list(col.to_numpy())
|
| 353 |
+
return df
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/node.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Optional, Union
|
| 2 |
+
|
| 3 |
+
import ray
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def _get_node_id_from_node_ip(node_ip: str) -> Optional[str]:
|
| 7 |
+
"""Returns the node ID for the first alive node with the input IP."""
|
| 8 |
+
for node in ray.nodes():
|
| 9 |
+
if node["Alive"] and node["NodeManagerAddress"] == node_ip:
|
| 10 |
+
return node["NodeID"]
|
| 11 |
+
|
| 12 |
+
return None
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def _force_on_node(
|
| 16 |
+
node_id: str,
|
| 17 |
+
remote_func_or_actor_class: Optional[
|
| 18 |
+
Union[ray.remote_function.RemoteFunction, ray.actor.ActorClass]
|
| 19 |
+
] = None,
|
| 20 |
+
) -> Union[Union[ray.remote_function.RemoteFunction, ray.actor.ActorClass], Dict]:
|
| 21 |
+
"""Schedule a remote function or actor class on a given node.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
node_id: The node to schedule on.
|
| 25 |
+
remote_func_or_actor_class: A Ray remote function or actor class
|
| 26 |
+
to schedule on the input node. If None, this function will directly
|
| 27 |
+
return the options dict to pass to another remote function or actor class
|
| 28 |
+
as remote options.
|
| 29 |
+
Returns:
|
| 30 |
+
The provided remote function or actor class, but with options modified to force
|
| 31 |
+
placement on the input node. If remote_func_or_actor_class is None,
|
| 32 |
+
the options dict to pass to another remote function or
|
| 33 |
+
actor class as remote options kwargs.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
scheduling_strategy = ray.util.scheduling_strategies.NodeAffinitySchedulingStrategy(
|
| 37 |
+
node_id=node_id, soft=False
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
options = {"scheduling_strategy": scheduling_strategy}
|
| 41 |
+
|
| 42 |
+
if remote_func_or_actor_class is None:
|
| 43 |
+
return options
|
| 44 |
+
|
| 45 |
+
return remote_func_or_actor_class.options(**options)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _force_on_current_node(
|
| 49 |
+
remote_func_or_actor_class: Optional[
|
| 50 |
+
Union[ray.remote_function.RemoteFunction, ray.actor.ActorClass]
|
| 51 |
+
] = None
|
| 52 |
+
) -> Union[Union[ray.remote_function.RemoteFunction, ray.actor.ActorClass], Dict]:
|
| 53 |
+
"""Schedule a remote function or actor class on the current node.
|
| 54 |
+
|
| 55 |
+
If using Ray Client, the current node is the client server node.
|
| 56 |
+
|
| 57 |
+
Args:
|
| 58 |
+
remote_func_or_actor_class: A Ray remote function or actor class
|
| 59 |
+
to schedule on the current node. If None, this function will directly
|
| 60 |
+
return the options dict to pass to another remote function or actor class
|
| 61 |
+
as remote options.
|
| 62 |
+
Returns:
|
| 63 |
+
The provided remote function or actor class, but with options modified to force
|
| 64 |
+
placement on the current node. If remote_func_or_actor_class is None,
|
| 65 |
+
the options dict to pass to another remote function or
|
| 66 |
+
actor class as remote options kwargs.
|
| 67 |
+
"""
|
| 68 |
+
current_node_id = ray.get_runtime_context().get_node_id()
|
| 69 |
+
return _force_on_node(current_node_id, remote_func_or_actor_class)
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (188 Bytes). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/arrow.cpython-310.pyc
ADDED
|
Binary file (4.9 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/__pycache__/pandas.cpython-310.pyc
ADDED
|
Binary file (4.93 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/arrow.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pickle
|
| 2 |
+
import typing
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pyarrow as pa
|
| 6 |
+
from packaging.version import parse as parse_version
|
| 7 |
+
|
| 8 |
+
import ray.air.util.object_extensions.pandas
|
| 9 |
+
from ray._private.serialization import pickle_dumps
|
| 10 |
+
from ray._private.utils import _get_pyarrow_version
|
| 11 |
+
from ray.util.annotations import PublicAPI
|
| 12 |
+
|
| 13 |
+
MIN_PYARROW_VERSION_SCALAR_SUBCLASS = parse_version("9.0.0")
|
| 14 |
+
|
| 15 |
+
_VER = _get_pyarrow_version()
|
| 16 |
+
PYARROW_VERSION = None if _VER is None else parse_version(_VER)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def _object_extension_type_allowed() -> bool:
|
| 20 |
+
return (
|
| 21 |
+
PYARROW_VERSION is not None
|
| 22 |
+
and PYARROW_VERSION >= MIN_PYARROW_VERSION_SCALAR_SUBCLASS
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Please see https://arrow.apache.org/docs/python/extending_types.html for more info
|
| 27 |
+
@PublicAPI(stability="alpha")
|
| 28 |
+
class ArrowPythonObjectType(pa.ExtensionType):
|
| 29 |
+
"""Defines a new Arrow extension type for Python objects.
|
| 30 |
+
We do not require a parametrized type, so the constructor does not
|
| 31 |
+
take any arguments
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self) -> None:
|
| 35 |
+
# Defines the underlying storage type as the PyArrow LargeBinary type
|
| 36 |
+
super().__init__(pa.large_binary(), "ray.data.arrow_pickled_object")
|
| 37 |
+
|
| 38 |
+
def __arrow_ext_serialize__(self) -> bytes:
|
| 39 |
+
# Since there are no type parameters, we are free to return empty
|
| 40 |
+
return b""
|
| 41 |
+
|
| 42 |
+
@classmethod
|
| 43 |
+
def __arrow_ext_deserialize__(
|
| 44 |
+
cls, storage_type: pa.DataType, serialized: bytes
|
| 45 |
+
) -> "ArrowPythonObjectType":
|
| 46 |
+
return ArrowPythonObjectType()
|
| 47 |
+
|
| 48 |
+
def __arrow_ext_scalar_class__(self) -> type:
|
| 49 |
+
"""Returns the scalar class of the extension type. Indexing out of the
|
| 50 |
+
PyArrow extension array will return instances of this type.
|
| 51 |
+
"""
|
| 52 |
+
return ArrowPythonObjectScalar
|
| 53 |
+
|
| 54 |
+
def __arrow_ext_class__(self) -> type:
|
| 55 |
+
"""Returns the array type of the extension type. Selecting one array
|
| 56 |
+
out of the ChunkedArray that makes up a column in a Table with
|
| 57 |
+
this custom type will return an instance of this type.
|
| 58 |
+
"""
|
| 59 |
+
return ArrowPythonObjectArray
|
| 60 |
+
|
| 61 |
+
def to_pandas_dtype(self):
|
| 62 |
+
"""Pandas interoperability type. This describes the Pandas counterpart
|
| 63 |
+
to the Arrow type. See https://pandas.pydata.org/docs/development/extending.html
|
| 64 |
+
for more information.
|
| 65 |
+
"""
|
| 66 |
+
return ray.air.util.object_extensions.pandas.PythonObjectDtype()
|
| 67 |
+
|
| 68 |
+
def __reduce__(self):
|
| 69 |
+
# Earlier PyArrow versions require custom pickling behavior.
|
| 70 |
+
return self.__arrow_ext_deserialize__, (
|
| 71 |
+
self.storage_type,
|
| 72 |
+
self.__arrow_ext_serialize__(),
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@PublicAPI(stability="alpha")
|
| 77 |
+
class ArrowPythonObjectScalar(pa.ExtensionScalar):
|
| 78 |
+
"""Scalar class for ArrowPythonObjectType"""
|
| 79 |
+
|
| 80 |
+
def as_py(self) -> typing.Any:
|
| 81 |
+
if not isinstance(self.value, pa.LargeBinaryScalar):
|
| 82 |
+
raise RuntimeError(
|
| 83 |
+
f"{type(self.value)} is not the expected LargeBinaryScalar"
|
| 84 |
+
)
|
| 85 |
+
return pickle.load(pa.BufferReader(self.value.as_buffer()))
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
@PublicAPI(stability="alpha")
|
| 89 |
+
class ArrowPythonObjectArray(pa.ExtensionArray):
|
| 90 |
+
"""Array class for ArrowPythonObjectType"""
|
| 91 |
+
|
| 92 |
+
def from_objects(
|
| 93 |
+
objects: typing.Union[np.ndarray, typing.Iterable[typing.Any]]
|
| 94 |
+
) -> "ArrowPythonObjectArray":
|
| 95 |
+
if isinstance(objects, np.ndarray):
|
| 96 |
+
objects = objects.tolist()
|
| 97 |
+
type_ = ArrowPythonObjectType()
|
| 98 |
+
all_dumped_bytes = []
|
| 99 |
+
for obj in objects:
|
| 100 |
+
dumped_bytes = pickle_dumps(
|
| 101 |
+
obj, "Error pickling object to convert to Arrow"
|
| 102 |
+
)
|
| 103 |
+
all_dumped_bytes.append(dumped_bytes)
|
| 104 |
+
arr = pa.array(all_dumped_bytes, type=type_.storage_type)
|
| 105 |
+
return ArrowPythonObjectArray.from_storage(type_, arr)
|
| 106 |
+
|
| 107 |
+
def to_numpy(
|
| 108 |
+
self, zero_copy_only: bool = False, writable: bool = False
|
| 109 |
+
) -> np.ndarray:
|
| 110 |
+
arr = np.empty(len(self), dtype=object)
|
| 111 |
+
arr[:] = self.to_pylist()
|
| 112 |
+
return arr
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
pa.register_extension_type(ArrowPythonObjectType())
|
| 117 |
+
except pa.ArrowKeyError:
|
| 118 |
+
# Already registered
|
| 119 |
+
pass
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/object_extensions/pandas.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections.abc
|
| 2 |
+
import typing
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import pyarrow as pa
|
| 7 |
+
from pandas._libs import lib
|
| 8 |
+
from pandas._typing import ArrayLike, Dtype, PositionalIndexer, npt
|
| 9 |
+
|
| 10 |
+
import ray.air.util.object_extensions.arrow
|
| 11 |
+
from ray.util.annotations import PublicAPI
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# See https://pandas.pydata.org/docs/development/extending.html for more information.
|
| 15 |
+
@PublicAPI(stability="alpha")
|
| 16 |
+
class PythonObjectArray(pd.api.extensions.ExtensionArray):
|
| 17 |
+
"""Implements the Pandas extension array interface for the Arrow object array"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, values: collections.abc.Iterable[typing.Any]):
|
| 20 |
+
vals = list(values)
|
| 21 |
+
self.values = np.empty(len(vals), dtype=object)
|
| 22 |
+
self.values[:] = vals
|
| 23 |
+
|
| 24 |
+
@classmethod
|
| 25 |
+
def _from_sequence(
|
| 26 |
+
cls,
|
| 27 |
+
scalars: collections.abc.Sequence[typing.Any],
|
| 28 |
+
*,
|
| 29 |
+
dtype: typing.Union[Dtype, None] = None,
|
| 30 |
+
copy: bool = False,
|
| 31 |
+
) -> "PythonObjectArray":
|
| 32 |
+
return PythonObjectArray(scalars)
|
| 33 |
+
|
| 34 |
+
@classmethod
|
| 35 |
+
def _from_factorized(
|
| 36 |
+
cls, values: collections.abc.Sequence[typing.Any], original: "PythonObjectArray"
|
| 37 |
+
) -> "PythonObjectArray":
|
| 38 |
+
return PythonObjectArray(values)
|
| 39 |
+
|
| 40 |
+
def __getitem__(self, item: PositionalIndexer) -> typing.Any:
|
| 41 |
+
return self.values[item]
|
| 42 |
+
|
| 43 |
+
def __setitem__(self, key, value) -> None:
|
| 44 |
+
self.values[key] = value
|
| 45 |
+
|
| 46 |
+
def __len__(self) -> int:
|
| 47 |
+
return len(self.values)
|
| 48 |
+
|
| 49 |
+
def __eq__(self, other: object) -> ArrayLike:
|
| 50 |
+
if isinstance(other, PythonObjectArray):
|
| 51 |
+
return self.values == other.values
|
| 52 |
+
elif isinstance(other, np.ndarray):
|
| 53 |
+
return self.values == other
|
| 54 |
+
else:
|
| 55 |
+
return NotImplemented
|
| 56 |
+
|
| 57 |
+
def to_numpy(
|
| 58 |
+
self,
|
| 59 |
+
dtype: typing.Union["npt.DTypeLike", None] = None,
|
| 60 |
+
copy: bool = False,
|
| 61 |
+
na_value: object = lib.no_default,
|
| 62 |
+
) -> np.ndarray:
|
| 63 |
+
result = self.values
|
| 64 |
+
if copy or na_value is not lib.no_default:
|
| 65 |
+
result = result.copy()
|
| 66 |
+
if na_value is not lib.no_default:
|
| 67 |
+
result[self.isna()] = na_value
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def dtype(self) -> pd.api.extensions.ExtensionDtype:
|
| 72 |
+
return PythonObjectDtype()
|
| 73 |
+
|
| 74 |
+
@property
|
| 75 |
+
def nbytes(self) -> int:
|
| 76 |
+
return self.values.nbytes
|
| 77 |
+
|
| 78 |
+
def __arrow_array__(self, type=None):
|
| 79 |
+
return ray.air.util.object_extensions.arrow.ArrowPythonObjectArray.from_objects(
|
| 80 |
+
self.values
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
@PublicAPI(stability="alpha")
|
| 85 |
+
@pd.api.extensions.register_extension_dtype
|
| 86 |
+
class PythonObjectDtype(pd.api.extensions.ExtensionDtype):
|
| 87 |
+
@classmethod
|
| 88 |
+
def construct_from_string(cls, string: str):
|
| 89 |
+
if string != "python_object()":
|
| 90 |
+
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
|
| 91 |
+
return cls()
|
| 92 |
+
|
| 93 |
+
@property
|
| 94 |
+
def type(self):
|
| 95 |
+
"""
|
| 96 |
+
The scalar type for the array, e.g. ``int``
|
| 97 |
+
It's expected ``ExtensionArray[item]`` returns an instance
|
| 98 |
+
of ``ExtensionDtype.type`` for scalar ``item``, assuming
|
| 99 |
+
that value is valid (not NA). NA values do not need to be
|
| 100 |
+
instances of `type`.
|
| 101 |
+
"""
|
| 102 |
+
return object
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def name(self) -> str:
|
| 106 |
+
return "python_object()"
|
| 107 |
+
|
| 108 |
+
@classmethod
|
| 109 |
+
def construct_array_type(cls: type) -> type:
|
| 110 |
+
"""
|
| 111 |
+
Return the array type associated with this dtype.
|
| 112 |
+
"""
|
| 113 |
+
return PythonObjectArray
|
| 114 |
+
|
| 115 |
+
def __from_arrow__(
|
| 116 |
+
self, array: typing.Union[pa.Array, pa.ChunkedArray]
|
| 117 |
+
) -> PythonObjectArray:
|
| 118 |
+
return PythonObjectArray(array.to_pylist())
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/tensor_extensions/__pycache__/utils.cpython-310.pyc
ADDED
|
Binary file (3.98 kB). View file
|
|
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/torch_dist.py
ADDED
|
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""This file is modeled after ray/python/ray/train/torch/config.py
|
| 2 |
+
|
| 3 |
+
The logics are duplicated right now to allow maximum flexibility for
|
| 4 |
+
setting up PyTorch DDP process groups outside the context of Ray Train.
|
| 5 |
+
Eventually, these use cases should be consolidated.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
from abc import ABC
|
| 10 |
+
from collections import defaultdict
|
| 11 |
+
from datetime import timedelta
|
| 12 |
+
from typing import Callable, List, T
|
| 13 |
+
|
| 14 |
+
import torch
|
| 15 |
+
import torch.distributed as dist
|
| 16 |
+
|
| 17 |
+
import ray
|
| 18 |
+
from ray.actor import ActorHandle
|
| 19 |
+
from ray.air._internal.torch_utils import get_devices
|
| 20 |
+
from ray.train._internal.utils import get_address_and_port
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class TorchDistributedWorker(ABC):
|
| 24 |
+
"""Defines the interfaces required by the init_torch_dist_process_group().
|
| 25 |
+
|
| 26 |
+
This is modeled after RayTrainerWorker, which allows arbitrary functions
|
| 27 |
+
to be executed on a remote DDP worker.
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
def execute(self, func: Callable[..., T], *args, **kwargs) -> T:
|
| 31 |
+
"""Executes the input function and returns the output.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
func: The function to execute.
|
| 35 |
+
args, kwargs: The arguments to pass into func.
|
| 36 |
+
"""
|
| 37 |
+
return func(*args, **kwargs)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def _init_torch_distributed(
|
| 41 |
+
init_method: str,
|
| 42 |
+
backend: str,
|
| 43 |
+
rank: int,
|
| 44 |
+
world_size: int,
|
| 45 |
+
local_rank: int,
|
| 46 |
+
local_world_size: int,
|
| 47 |
+
master_addr: str,
|
| 48 |
+
master_port: str,
|
| 49 |
+
gpu_ids: List[int],
|
| 50 |
+
**init_process_group_kwargs,
|
| 51 |
+
):
|
| 52 |
+
"""Initialize torch distributed backend"""
|
| 53 |
+
if init_method == "env":
|
| 54 |
+
os.environ["MASTER_ADDR"] = str(master_addr)
|
| 55 |
+
os.environ["MASTER_PORT"] = str(master_port)
|
| 56 |
+
url = "env://"
|
| 57 |
+
elif init_method == "tcp":
|
| 58 |
+
url = f"tcp://{master_addr}:{master_port}"
|
| 59 |
+
else:
|
| 60 |
+
raise ValueError(
|
| 61 |
+
f"The provided init_method ("
|
| 62 |
+
f"{init_method}) is not supported. Must "
|
| 63 |
+
f"be either 'env' or 'tcp'."
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
if backend == "nccl":
|
| 67 |
+
# Same as in Ray Train
|
| 68 |
+
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "1"
|
| 69 |
+
# All workers on a same node should share the same set of
|
| 70 |
+
# visible GPUs. Otherwise they can't talk among themselves.
|
| 71 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(str(gid) for gid in gpu_ids)
|
| 72 |
+
|
| 73 |
+
init_process_group_kwargs.update(
|
| 74 |
+
dict(
|
| 75 |
+
backend=backend,
|
| 76 |
+
init_method=url,
|
| 77 |
+
rank=rank,
|
| 78 |
+
world_size=world_size,
|
| 79 |
+
)
|
| 80 |
+
)
|
| 81 |
+
init_process_group_kwargs.setdefault("timeout", timedelta(seconds=1800))
|
| 82 |
+
|
| 83 |
+
dist.init_process_group(**init_process_group_kwargs)
|
| 84 |
+
|
| 85 |
+
os.environ["RANK"] = str(rank)
|
| 86 |
+
os.environ["LOCAL_RANK"] = str(local_rank)
|
| 87 |
+
os.environ["WORLD_SIZE"] = str(world_size)
|
| 88 |
+
os.environ["LOCAL_WORLD_SIZE"] = str(local_world_size)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def _get_node_and_gpu_ids():
|
| 92 |
+
"""Returns the node_id and gpu_ids for this worker."""
|
| 93 |
+
node_id = ray.get_runtime_context().get_node_id()
|
| 94 |
+
gpu_ids = ray.get_gpu_ids()
|
| 95 |
+
return node_id, gpu_ids
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def init_torch_dist_process_group(
|
| 99 |
+
workers: List[ActorHandle],
|
| 100 |
+
backend: str = "gloo",
|
| 101 |
+
init_method: str = "env",
|
| 102 |
+
**init_process_group_kwargs,
|
| 103 |
+
) -> List[int]:
|
| 104 |
+
"""Initialize a torch distributed process group.
|
| 105 |
+
|
| 106 |
+
Note: this util assumes that the order of the workers passed in
|
| 107 |
+
are their global ranks.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
workers: A list of TorchDistributedWorker actors.
|
| 111 |
+
backend: The torch distributed backend to use,
|
| 112 |
+
possible choices are "gloo" or "nccl".
|
| 113 |
+
init_method: The initialization method to use,
|
| 114 |
+
possible choices are "env" or "tcp".
|
| 115 |
+
init_process_group_kwargs: Additional kwargs to pass to the call to
|
| 116 |
+
:meth:`torch.distributed.init_process_group`.
|
| 117 |
+
|
| 118 |
+
Returns:
|
| 119 |
+
Local ranks on their respective nodes for the list of workers.
|
| 120 |
+
"""
|
| 121 |
+
if not dist.is_available():
|
| 122 |
+
raise RuntimeError("Distributed torch is not available.")
|
| 123 |
+
|
| 124 |
+
# Build a map from node_id to workers on that node.
|
| 125 |
+
node_and_gpu_ids = ray.get(
|
| 126 |
+
[w.execute.remote(_get_node_and_gpu_ids) for w in workers]
|
| 127 |
+
)
|
| 128 |
+
# All the workers on a specific node.
|
| 129 |
+
node_to_workers = defaultdict(list)
|
| 130 |
+
# All the gpu ids visible to all the workers on a specific node.
|
| 131 |
+
node_to_gpu_ids = defaultdict(set)
|
| 132 |
+
for i, (node_id, gpu_ids) in enumerate(node_and_gpu_ids):
|
| 133 |
+
node_to_workers[node_id].append(i)
|
| 134 |
+
# Force list.
|
| 135 |
+
if not isinstance(gpu_ids, list):
|
| 136 |
+
gpu_ids = [gpu_ids]
|
| 137 |
+
# It is possible for a worker to have access to multiple GPUs.
|
| 138 |
+
for gpu_id in gpu_ids:
|
| 139 |
+
node_to_gpu_ids[node_id].add(gpu_id)
|
| 140 |
+
|
| 141 |
+
# Assume the first worker is the master.
|
| 142 |
+
master_addr, master_port = ray.get(workers[0].execute.remote(get_address_and_port))
|
| 143 |
+
|
| 144 |
+
setup_futures = []
|
| 145 |
+
world_size = len(workers)
|
| 146 |
+
local_ranks = []
|
| 147 |
+
for rank, worker in enumerate(workers):
|
| 148 |
+
node_id = node_and_gpu_ids[rank][0]
|
| 149 |
+
local_rank = node_to_workers[node_id].index(rank)
|
| 150 |
+
local_world_size = len(node_to_workers[node_id])
|
| 151 |
+
setup_futures.append(
|
| 152 |
+
worker.execute.remote(
|
| 153 |
+
_init_torch_distributed,
|
| 154 |
+
init_method=init_method,
|
| 155 |
+
backend=backend,
|
| 156 |
+
rank=rank,
|
| 157 |
+
world_size=world_size,
|
| 158 |
+
local_rank=local_rank,
|
| 159 |
+
local_world_size=local_world_size,
|
| 160 |
+
master_addr=master_addr,
|
| 161 |
+
master_port=master_port,
|
| 162 |
+
# list(set) will sort the gpu ids, so VISIBLE_CUDA_DEVICES
|
| 163 |
+
# is always sorted.
|
| 164 |
+
gpu_ids=list(node_to_gpu_ids[node_id]),
|
| 165 |
+
**init_process_group_kwargs,
|
| 166 |
+
)
|
| 167 |
+
)
|
| 168 |
+
local_ranks.append(local_rank)
|
| 169 |
+
|
| 170 |
+
# Wait for all workers to join the process group.
|
| 171 |
+
ray.get(setup_futures)
|
| 172 |
+
|
| 173 |
+
return local_ranks
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _shutdown_torch_distributed():
|
| 177 |
+
"""Shutdown torch distributed backend"""
|
| 178 |
+
dist.destroy_process_group()
|
| 179 |
+
|
| 180 |
+
if not torch.cuda.is_available():
|
| 181 |
+
return
|
| 182 |
+
|
| 183 |
+
# Clean up cuda memory.
|
| 184 |
+
devices = get_devices()
|
| 185 |
+
for device in devices:
|
| 186 |
+
with torch.cuda.device(device):
|
| 187 |
+
torch.cuda.empty_cache()
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def shutdown_torch_dist_process_group(workers: List[ActorHandle]):
|
| 191 |
+
ray.get([w.execute.remote(_shutdown_torch_distributed) for w in workers])
|
infer_4_37_2/lib/python3.10/site-packages/ray/air/util/transform_pyarrow.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
import pyarrow
|
| 3 |
+
except ImportError:
|
| 4 |
+
pyarrow = None
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def _is_column_extension_type(ca: "pyarrow.ChunkedArray") -> bool:
|
| 8 |
+
"""Whether the provided Arrow Table column is an extension array, using an Arrow
|
| 9 |
+
extension type.
|
| 10 |
+
"""
|
| 11 |
+
return isinstance(ca.type, pyarrow.ExtensionType)
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def _concatenate_extension_column(ca: "pyarrow.ChunkedArray") -> "pyarrow.Array":
|
| 15 |
+
"""Concatenate chunks of an extension column into a contiguous array.
|
| 16 |
+
|
| 17 |
+
This concatenation is required for creating copies and for .take() to work on
|
| 18 |
+
extension arrays.
|
| 19 |
+
See https://issues.apache.org/jira/browse/ARROW-16503.
|
| 20 |
+
"""
|
| 21 |
+
from ray.air.util.tensor_extensions.arrow import (
|
| 22 |
+
ArrowTensorArray,
|
| 23 |
+
get_arrow_extension_tensor_types,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
if not _is_column_extension_type(ca):
|
| 27 |
+
raise ValueError("Chunked array isn't an extension array: {ca}")
|
| 28 |
+
|
| 29 |
+
tensor_extension_types = get_arrow_extension_tensor_types()
|
| 30 |
+
|
| 31 |
+
if ca.num_chunks == 0:
|
| 32 |
+
# Create empty storage array.
|
| 33 |
+
storage = pyarrow.array([], type=ca.type.storage_type)
|
| 34 |
+
elif isinstance(ca.type, tensor_extension_types):
|
| 35 |
+
return ArrowTensorArray._concat_same_type(ca.chunks)
|
| 36 |
+
else:
|
| 37 |
+
storage = pyarrow.concat_arrays([c.storage for c in ca.chunks])
|
| 38 |
+
|
| 39 |
+
return ca.type.__arrow_ext_class__().from_storage(ca.type, storage)
|