file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
python/ray/tune/suggest/nevergrad.py
|
Python
|
import logging
import pickle
try:
import nevergrad as ng
except ImportError:
ng = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class NevergradSearch(SuggestionAlgorithm):
"""A wrapper around Nevergrad to provide trial suggestions.
Requires Nevergrad to be installed.
Nevergrad is an open source tool from Facebook for derivative free
optimization of parameters and/or hyperparameters. It features a wide
range of optimizers in a standard ask and tell interface. More information
can be found at https://github.com/facebookresearch/nevergrad.
Parameters:
optimizer (nevergrad.optimization.Optimizer): Optimizer provided
from Nevergrad.
parameter_names (list): List of parameter names. Should match
the dimension of the optimizer output. Alternatively, set to None
if the optimizer is already instrumented with kwargs
(see nevergrad v0.2.0+).
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> from nevergrad.optimization import optimizerlib
>>> instrumentation = 1
>>> optimizer = optimizerlib.OnePlusOne(instrumentation, budget=100)
>>> algo = NevergradSearch(optimizer, ["lr"], max_concurrent=4,
>>> metric="mean_loss", mode="min")
Note:
In nevergrad v0.2.0+, optimizers can be instrumented.
For instance, the following will specifies searching
for "lr" from 1 to 2.
>>> from nevergrad.optimization import optimizerlib
>>> from nevergrad import instrumentation as inst
>>> lr = inst.var.Array(1).bounded(1, 2).asfloat()
>>> instrumentation = inst.Instrumentation(lr=lr)
>>> optimizer = optimizerlib.OnePlusOne(instrumentation, budget=100)
>>> algo = NevergradSearch(optimizer, None, max_concurrent=4,
>>> metric="mean_loss", mode="min")
"""
def __init__(self,
optimizer,
parameter_names,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
**kwargs):
assert ng is not None, "Nevergrad must be installed!"
assert type(max_concurrent) is int and max_concurrent > 0
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._max_concurrent = max_concurrent
self._parameters = parameter_names
self._metric = metric
# nevergrad.tell internally minimizes, so "max" => -1
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
self._nevergrad_opt = optimizer
self._live_trial_mapping = {}
super(NevergradSearch, self).__init__(**kwargs)
# validate parameters
if hasattr(optimizer, "instrumentation"): # added in v0.2.0
if optimizer.instrumentation.kwargs:
if optimizer.instrumentation.args:
raise ValueError(
"Instrumented optimizers should use kwargs only")
if parameter_names is not None:
raise ValueError("Instrumented optimizers should provide "
"None as parameter_names")
else:
if parameter_names is None:
raise ValueError("Non-instrumented optimizers should have "
"a list of parameter_names")
if len(optimizer.instrumentation.args) != 1:
raise ValueError(
"Instrumented optimizers should use kwargs only")
if parameter_names is not None and optimizer.dimension != len(
parameter_names):
raise ValueError("len(parameters_names) must match optimizer "
"dimension for non-instrumented optimizers")
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
suggested_config = self._nevergrad_opt.ask()
self._live_trial_mapping[trial_id] = suggested_config
# in v0.2.0+, output of ask() is a Candidate,
# with fields args and kwargs
if hasattr(self._nevergrad_opt, "instrumentation"):
if not suggested_config.kwargs:
return dict(zip(self._parameters, suggested_config.args[0]))
else:
return suggested_config.kwargs
# legacy: output of ask() is a np.ndarray
return dict(zip(self._parameters, suggested_config))
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
The result is internally negated when interacting with Nevergrad
so that Nevergrad Optimizers can "maximize" this value,
as it minimizes on default.
"""
if result:
self._process_result(trial_id, result, early_terminated)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id, result, early_terminated=False):
if early_terminated and self._use_early_stopped is False:
return
ng_trial_info = self._live_trial_mapping[trial_id]
self._nevergrad_opt.tell(ng_trial_info,
self._metric_op * result[self._metric])
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self._nevergrad_opt, self._parameters)
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._nevergrad_opt = trials_object[0]
self._parameters = trials_object[1]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/search.py
|
Python
|
class SearchAlgorithm:
"""Interface of an event handler API for hyperparameter search.
Unlike TrialSchedulers, SearchAlgorithms will not have the ability
to modify the execution (i.e., stop and pause trials).
Trials added manually (i.e., via the Client API) will also notify
this class upon new events, so custom search algorithms should
maintain a list of trials ID generated from this class.
See also: `ray.tune.suggest.BasicVariantGenerator`.
"""
def add_configurations(self, experiments):
"""Tracks given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
raise NotImplementedError
def next_trials(self):
"""Provides Trial objects to be queued into the TrialRunner.
Returns:
trials (list): Returns a list of trials.
"""
raise NotImplementedError
def on_trial_result(self, trial_id, result):
"""Called on each intermediate result returned by a trial.
This will only be called when the trial is in the RUNNING state.
Arguments:
trial_id: Identifier for the trial.
"""
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
Arguments:
trial_id: Identifier for the trial.
result (dict): Defaults to None. A dict will
be provided with this notification when the trial is in
the RUNNING state AND either completes naturally or
by manual termination.
error (bool): Defaults to False. True if the trial is in
the RUNNING state and errors.
early_terminated (bool): Defaults to False. True if the trial
is stopped while in PAUSED or PENDING state.
"""
pass
def is_finished(self):
"""Returns True if no trials left to be queued into TrialRunner.
Can return True before all trials have finished executing.
"""
raise NotImplementedError
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/sigopt.py
|
Python
|
import copy
import os
import logging
import pickle
try:
import sigopt as sgo
except ImportError:
sgo = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class SigOptSearch(SuggestionAlgorithm):
"""A wrapper around SigOpt to provide trial suggestions.
Requires SigOpt to be installed. Requires user to store their SigOpt
API key locally as an environment variable at `SIGOPT_KEY`.
Parameters:
space (list of dict): SigOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
name (str): Name of experiment. Required by SigOpt.
max_concurrent (int): Number of maximum concurrent trials supported
based on the user's SigOpt plan. Defaults to 1.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
Example:
>>> space = [
>>> {
>>> 'name': 'width',
>>> 'type': 'int',
>>> 'bounds': {
>>> 'min': 0,
>>> 'max': 20
>>> },
>>> },
>>> {
>>> 'name': 'height',
>>> 'type': 'int',
>>> 'bounds': {
>>> 'min': -100,
>>> 'max': 100
>>> },
>>> },
>>> ]
>>> algo = SigOptSearch(
>>> space, name="SigOpt Example Experiment",
>>> max_concurrent=1, metric="mean_loss", mode="min")
"""
def __init__(self,
space,
name="Default Tune Experiment",
max_concurrent=1,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
**kwargs):
assert sgo is not None, "SigOpt must be installed!"
assert type(max_concurrent) is int and max_concurrent > 0
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as environ variable at SIGOPT_KEY"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
if "use_early_stopped_trials" in kwargs:
logger.warning(
"`use_early_stopped_trials` is not used in SigOptSearch.")
self._max_concurrent = max_concurrent
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
# Create a connection with SigOpt API, requires API key
self.conn = sgo.Connection(client_token=os.environ["SIGOPT_KEY"])
self.experiment = self.conn.experiments().create(
name=name,
parameters=space,
parallel_bandwidth=self._max_concurrent,
)
super(SigOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
# Get new suggestion from SigOpt
suggestion = self.conn.experiments(
self.experiment.id).suggestions().create()
self._live_trial_mapping[trial_id] = suggestion
return copy.deepcopy(suggestion.assignments)
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
If a trial fails, it will be reported as a failed Observation, telling
the optimizer that the Suggestion led to a metric failure, which
updates the feasible region and improves parameter recommendation.
Creates SigOpt Observation object for trial.
"""
if result:
self.conn.experiments(self.experiment.id).observations().create(
suggestion=self._live_trial_mapping[trial_id].id,
value=self._metric_op * result[self._metric],
)
# Update the experiment object
self.experiment = self.conn.experiments(self.experiment.id).fetch()
elif error or early_terminated:
# Reports a failed Observation
self.conn.experiments(self.experiment.id).observations().create(
failed=True, suggestion=self._live_trial_mapping[trial_id].id)
del self._live_trial_mapping[trial_id]
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self.conn, self.experiment)
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self.conn = trials_object[0]
self.experiment = trials_object[1]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/skopt.py
|
Python
|
import logging
import pickle
try:
import skopt as sko
except ImportError:
sko = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
def _validate_warmstart(parameter_names, points_to_evaluate,
evaluated_rewards):
if points_to_evaluate:
if not isinstance(points_to_evaluate, list):
raise TypeError(
"points_to_evaluate expected to be a list, got {}.".format(
type(points_to_evaluate)))
for point in points_to_evaluate:
if not isinstance(point, list):
raise TypeError(
"points_to_evaluate expected to include list, got {}.".
format(point))
if not len(point) == len(parameter_names):
raise ValueError("Dim of point {}".format(point) +
" and parameter_names {}".format(
parameter_names) + " do not match.")
if points_to_evaluate and evaluated_rewards:
if not isinstance(evaluated_rewards, list):
raise TypeError(
"evaluated_rewards expected to be a list, got {}.".format(
type(evaluated_rewards)))
if not len(evaluated_rewards) == len(points_to_evaluate):
raise ValueError(
"Dim of evaluated_rewards {}".format(evaluated_rewards) +
" and points_to_evaluate {}".format(points_to_evaluate) +
" do not match.")
class SkOptSearch(SuggestionAlgorithm):
"""A wrapper around skopt to provide trial suggestions.
Requires skopt to be installed.
Parameters:
optimizer (skopt.optimizer.Optimizer): Optimizer provided
from skopt.
parameter_names (list): List of parameter names. Should match
the dimension of the optimizer output.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list of lists): A list of points you'd like to run
first before sampling from the optimiser, e.g. these could be
parameter configurations you already know work well to help
the optimiser select good values. Each point is a list of the
parameters using the order definition given by parameter_names.
evaluated_rewards (list): If you have previously evaluated the
parameters passed in as points_to_evaluate you can avoid
re-running those trials by passing in the reward attributes
as a list so the optimiser can be told the results without
needing to re-compute the trial. Must be the same length as
points_to_evaluate. (See tune/examples/skopt_example.py)
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> from skopt import Optimizer
>>> optimizer = Optimizer([(0,20),(-100,100)])
>>> current_best_params = [[10, 0], [15, -20]]
>>> algo = SkOptSearch(optimizer,
>>> ["width", "height"],
>>> max_concurrent=4,
>>> metric="mean_loss",
>>> mode="min",
>>> points_to_evaluate=current_best_params)
"""
def __init__(self,
optimizer,
parameter_names,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
points_to_evaluate=None,
evaluated_rewards=None,
**kwargs):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
assert type(max_concurrent) is int and max_concurrent > 0
_validate_warmstart(parameter_names, points_to_evaluate,
evaluated_rewards)
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._initial_points = []
if points_to_evaluate and evaluated_rewards:
optimizer.tell(points_to_evaluate, evaluated_rewards)
elif points_to_evaluate:
self._initial_points = points_to_evaluate
self._max_concurrent = max_concurrent
self._parameters = parameter_names
self._metric = metric
# Skopt internally minimizes, so "max" => -1
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
self._skopt_opt = optimizer
self._live_trial_mapping = {}
super(SkOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
if self._initial_points:
suggested_config = self._initial_points[0]
del self._initial_points[0]
else:
suggested_config = self._skopt_opt.ask()
self._live_trial_mapping[trial_id] = suggested_config
return dict(zip(self._parameters, suggested_config))
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
"""
if result:
self._process_result(trial_id, result, early_terminated)
self._live_trial_mapping.pop(trial_id)
def _process_result(self, trial_id, result, early_terminated=False):
if early_terminated and self._use_early_stopped is False:
return
skopt_trial_info = self._live_trial_mapping[trial_id]
self._skopt_opt.tell(skopt_trial_info,
self._metric_op * result[self._metric])
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self._initial_points, self._skopt_opt)
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._initial_points = trials_object[0]
self._skopt_opt = trials_object[1]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/suggestion.py
|
Python
|
import itertools
import copy
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list
from ray.tune.config_parser import make_parser, create_trial_from_spec
from ray.tune.suggest.search import SearchAlgorithm
from ray.tune.suggest.variant_generator import format_vars, resolve_nested_dict
from ray.tune.trial import Trial
from ray.tune.utils import merge_dicts, flatten_dict
class SuggestionAlgorithm(SearchAlgorithm):
"""Abstract class for suggestion-based algorithms.
Custom search algorithms can extend this class easily by overriding the
`_suggest` method provide generated parameters for the trials.
To track suggestions and their corresponding evaluations, the method
`_suggest` will be passed a trial_id, which will be used in
subsequent notifications.
Example:
>>> suggester = SuggestionAlgorithm()
>>> suggester.add_configurations({ ... })
>>> new_parameters = suggester._suggest()
>>> suggester.on_trial_complete(trial_id, result)
>>> better_parameters = suggester._suggest()
"""
def __init__(self, use_early_stopped_trials=True):
"""Constructs a generator given experiment specifications.
"""
self._parser = make_parser()
self._trial_generator = []
self._counter = 0
self._finished = False
self._use_early_stopped = use_early_stopped_trials
def add_configurations(self, experiments):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
self._trial_generator = itertools.chain(
self._trial_generator,
self._generate_trials(experiment.spec, experiment.name))
def next_trials(self):
"""Provides a batch of Trial objects to be queued into the TrialRunner.
A batch ends when self._trial_generator returns None.
Returns:
trials (list): Returns a list of trials.
"""
trials = []
for trial in self._trial_generator:
if trial is None:
return trials
trials += [trial]
self._finished = True
return trials
def _generate_trials(self, experiment_spec, output_path=""):
"""Generates trials with configurations from `_suggest`.
Creates a trial_id that is passed into `_suggest`.
Yields:
Trial objects constructed according to `spec`
"""
if "run" not in experiment_spec:
raise TuneError("Must specify `run` in {}".format(experiment_spec))
for _ in range(experiment_spec.get("num_samples", 1)):
trial_id = Trial.generate_id()
while True:
suggested_config = self._suggest(trial_id)
if suggested_config is None:
yield None
else:
break
spec = copy.deepcopy(experiment_spec)
spec["config"] = merge_dicts(spec["config"],
copy.deepcopy(suggested_config))
flattened_config = resolve_nested_dict(spec["config"])
self._counter += 1
tag = "{0}_{1}".format(
str(self._counter), format_vars(flattened_config))
yield create_trial_from_spec(
spec,
output_path,
self._parser,
evaluated_params=flatten_dict(suggested_config),
experiment_tag=tag,
trial_id=trial_id)
def is_finished(self):
return self._finished
def _suggest(self, trial_id):
"""Queries the algorithm to retrieve the next set of parameters.
Arguments:
trial_id: Trial ID used for subsequent notifications.
Returns:
dict|None: Configuration for a trial, if possible.
Else, returns None, which will temporarily stop the
TrialRunner from querying.
Example:
>>> suggester = SuggestionAlgorithm(max_concurrent=1)
>>> suggester.add_configurations({ ... })
>>> parameters_1 = suggester._suggest()
>>> parameters_2 = suggester._suggest()
>>> parameters_2 is None
>>> suggester.on_trial_complete(trial_id, result)
>>> parameters_2 = suggester._suggest()
>>> parameters_2 is not None
"""
raise NotImplementedError
def save(self, checkpoint_dir):
raise NotImplementedError
def restore(self, checkpoint_dir):
raise NotImplementedError
class _MockSuggestionAlgorithm(SuggestionAlgorithm):
def __init__(self, max_concurrent=2, **kwargs):
self._max_concurrent = max_concurrent
self.live_trials = {}
self.counter = {"result": 0, "complete": 0}
self.final_results = []
self.stall = False
self.results = []
super(_MockSuggestionAlgorithm, self).__init__(**kwargs)
def _suggest(self, trial_id):
if len(self.live_trials) < self._max_concurrent and not self.stall:
self.live_trials[trial_id] = 1
return {"test_variable": 2}
return None
def on_trial_result(self, trial_id, result):
self.counter["result"] += 1
self.results += [result]
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
self.counter["complete"] += 1
if result:
self._process_result(result, early_terminated)
del self.live_trials[trial_id]
def _process_result(self, result, early_terminated):
if early_terminated and self._use_early_stopped:
self.final_results += [result]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/variant_generator.py
|
Python
|
import copy
import logging
import numpy
import random
import types
from ray.tune import TuneError
from ray.tune.sample import sample_from
logger = logging.getLogger(__name__)
def generate_variants(unresolved_spec):
"""Generates variants from a spec (dict) with unresolved values.
There are two types of unresolved values:
Grid search: These define a grid search over values. For example, the
following grid search values in a spec will produce six distinct
variants in combination:
"activation": grid_search(["relu", "tanh"])
"learning_rate": grid_search([1e-3, 1e-4, 1e-5])
Lambda functions: These are evaluated to produce a concrete value, and
can express dependencies or conditional distributions between values.
They can also be used to express random search (e.g., by calling
into the `random` or `np` module).
"cpu": lambda spec: spec.config.num_workers
"batch_size": lambda spec: random.uniform(1, 1000)
Finally, to support defining specs in plain JSON / YAML, grid search
and lambda functions can also be defined alternatively as follows:
"activation": {"grid_search": ["relu", "tanh"]}
"cpu": {"eval": "spec.config.num_workers"}
Use `format_vars` to format the returned dict of hyperparameters.
Yields:
(Dict of resolved variables, Spec object)
"""
for resolved_vars, spec in _generate_variants(unresolved_spec):
assert not _unresolved_values(spec)
yield resolved_vars, spec
def grid_search(values):
"""Convenience method for specifying grid search over a value.
Arguments:
values: An iterable whose parameters will be gridded.
"""
return {"grid_search": values}
_STANDARD_IMPORTS = {
"random": random,
"np": numpy,
}
_MAX_RESOLUTION_PASSES = 20
def resolve_nested_dict(nested_dict):
"""Flattens a nested dict by joining keys into tuple of paths.
Can then be passed into `format_vars`.
"""
res = {}
for k, v in nested_dict.items():
if isinstance(v, dict):
for k_, v_ in resolve_nested_dict(v).items():
res[(k, ) + k_] = v_
else:
res[(k, )] = v
return res
def format_vars(resolved_vars):
"""Formats the resolved variable dict into a single string."""
out = []
for path, value in sorted(resolved_vars.items()):
if path[0] in ["run", "env", "resources_per_trial"]:
continue # TrialRunner already has these in the experiment_tag
pieces = []
last_string = True
for k in path[::-1]:
if isinstance(k, int):
pieces.append(str(k))
elif last_string:
last_string = False
pieces.append(k)
pieces.reverse()
out.append(_clean_value("_".join(pieces)) + "=" + _clean_value(value))
return ",".join(out)
def flatten_resolved_vars(resolved_vars):
"""Formats the resolved variable dict into a mapping of (str -> value)."""
flattened_resolved_vars_dict = {}
for pieces, value in resolved_vars.items():
if pieces[0] == "config":
pieces = pieces[1:]
pieces = [str(piece) for piece in pieces]
flattened_resolved_vars_dict["/".join(pieces)] = value
return flattened_resolved_vars_dict
def _clean_value(value):
if isinstance(value, float):
return "{:.5}".format(value)
else:
return str(value).replace("/", "_")
def _generate_variants(spec):
spec = copy.deepcopy(spec)
unresolved = _unresolved_values(spec)
if not unresolved:
yield {}, spec
return
grid_vars = []
lambda_vars = []
for path, value in unresolved.items():
if isinstance(value, types.FunctionType):
lambda_vars.append((path, value))
else:
grid_vars.append((path, value))
grid_vars.sort()
grid_search = _grid_search_generator(spec, grid_vars)
for resolved_spec in grid_search:
resolved_vars = _resolve_lambda_vars(resolved_spec, lambda_vars)
for resolved, spec in _generate_variants(resolved_spec):
for path, value in grid_vars:
resolved_vars[path] = _get_value(spec, path)
for k, v in resolved.items():
if (k in resolved_vars and v != resolved_vars[k]
and _is_resolved(resolved_vars[k])):
raise ValueError(
"The variable `{}` could not be unambiguously "
"resolved to a single value. Consider simplifying "
"your configuration.".format(k))
resolved_vars[k] = v
yield resolved_vars, spec
def _assign_value(spec, path, value):
for k in path[:-1]:
spec = spec[k]
spec[path[-1]] = value
def _get_value(spec, path):
for k in path:
spec = spec[k]
return spec
def _resolve_lambda_vars(spec, lambda_vars):
resolved = {}
error = True
num_passes = 0
while error and num_passes < _MAX_RESOLUTION_PASSES:
num_passes += 1
error = False
for path, fn in lambda_vars:
try:
value = fn(_UnresolvedAccessGuard(spec))
except RecursiveDependencyError as e:
error = e
except Exception:
raise ValueError(
"Failed to evaluate expression: {}: {}".format(path, fn))
else:
_assign_value(spec, path, value)
resolved[path] = value
if error:
raise error
return resolved
def _grid_search_generator(unresolved_spec, grid_vars):
value_indices = [0] * len(grid_vars)
def increment(i):
value_indices[i] += 1
if value_indices[i] >= len(grid_vars[i][1]):
value_indices[i] = 0
if i + 1 < len(value_indices):
return increment(i + 1)
else:
return True
return False
if not grid_vars:
yield unresolved_spec
return
while value_indices[-1] < len(grid_vars[-1][1]):
spec = copy.deepcopy(unresolved_spec)
for i, (path, values) in enumerate(grid_vars):
_assign_value(spec, path, values[value_indices[i]])
yield spec
if grid_vars:
done = increment(0)
if done:
break
def _is_resolved(v):
resolved, _ = _try_resolve(v)
return resolved
def _try_resolve(v):
if isinstance(v, sample_from):
# Function to sample from
return False, v.func
elif isinstance(v, dict) and len(v) == 1 and "eval" in v:
# Lambda function in eval syntax
return False, lambda spec: eval(
v["eval"], _STANDARD_IMPORTS, {"spec": spec})
elif isinstance(v, dict) and len(v) == 1 and "grid_search" in v:
# Grid search values
grid_values = v["grid_search"]
if not isinstance(grid_values, list):
raise TuneError(
"Grid search expected list of values, got: {}".format(
grid_values))
return False, grid_values
return True, v
def _unresolved_values(spec):
found = {}
for k, v in spec.items():
resolved, v = _try_resolve(v)
if not resolved:
found[(k, )] = v
elif isinstance(v, dict):
# Recurse into a dict
for (path, value) in _unresolved_values(v).items():
found[(k, ) + path] = value
elif isinstance(v, list):
# Recurse into a list
for i, elem in enumerate(v):
for (path, value) in _unresolved_values({i: elem}).items():
found[(k, ) + path] = value
return found
class _UnresolvedAccessGuard(dict):
def __init__(self, *args, **kwds):
super(_UnresolvedAccessGuard, self).__init__(*args, **kwds)
self.__dict__ = self
def __getattribute__(self, item):
value = dict.__getattribute__(self, item)
if not _is_resolved(value):
raise RecursiveDependencyError(
"`{}` recursively depends on {}".format(item, value))
elif isinstance(value, dict):
return _UnresolvedAccessGuard(value)
else:
return value
class RecursiveDependencyError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/sync_client.py
|
Python
|
import distutils
import distutils.spawn
import logging
import subprocess
import tempfile
import types
from shlex import quote
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
S3_PREFIX = "s3://"
GS_PREFIX = "gs://"
ALLOWED_REMOTE_PREFIXES = (S3_PREFIX, GS_PREFIX)
noop_template = ": {target}" # noop in bash
def noop(*args):
return
def get_sync_client(sync_function, delete_function=None):
"""Returns a sync client.
Args:
sync_function (Optional[str|function]): Sync function.
delete_function (Optional[str|function]): Delete function. Must be
the same type as sync_function if it is provided.
Raises:
ValueError if sync_function or delete_function are malformed.
"""
if sync_function is None:
return None
if delete_function and type(sync_function) != type(delete_function):
raise ValueError("Sync and delete functions must be of same type.")
if isinstance(sync_function, types.FunctionType):
delete_function = delete_function or noop
client_cls = FunctionBasedClient
elif isinstance(sync_function, str):
delete_function = delete_function or noop_template
client_cls = CommandBasedClient
else:
raise ValueError("Sync function {} must be string or function".format(
sync_function))
return client_cls(sync_function, sync_function, delete_function)
def get_cloud_sync_client(remote_path):
"""Returns a CommandBasedClient that can sync to/from remote storage.
Args:
remote_path (str): Path to remote storage (S3 or GS).
Raises:
ValueError if malformed remote_dir.
"""
if remote_path.startswith(S3_PREFIX):
if not distutils.spawn.find_executable("aws"):
raise ValueError(
"Upload uri starting with '{}' requires awscli tool"
" to be installed".format(S3_PREFIX))
template = "aws s3 sync {source} {target} --only-show-errors"
delete_template = "aws s3 rm {target} --recursive --only-show-errors"
elif remote_path.startswith(GS_PREFIX):
if not distutils.spawn.find_executable("gsutil"):
raise ValueError(
"Upload uri starting with '{}' requires gsutil tool"
" to be installed".format(GS_PREFIX))
template = "gsutil rsync -r {source} {target}"
delete_template = "gsutil rm -r {target}"
else:
raise ValueError("Upload uri must start with one of: {}"
"".format(ALLOWED_REMOTE_PREFIXES))
return CommandBasedClient(template, template, delete_template)
class SyncClient:
"""Client interface for interacting with remote storage options."""
def sync_up(self, source, target):
"""Syncs up from source to target.
Args:
source (str): Source path.
target (str): Target path.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def sync_down(self, source, target):
"""Syncs down from source to target.
Args:
source (str): Source path.
target (str): Target path.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def delete(self, target):
"""Deletes target.
Args:
target (str): Target path.
Returns:
True if delete initiation successful, False otherwise.
"""
raise NotImplementedError
def wait(self):
"""Waits for current sync to complete, if asynchronously started."""
pass
def reset(self):
"""Resets state."""
pass
class FunctionBasedClient(SyncClient):
def __init__(self, sync_up_func, sync_down_func, delete_func=None):
self.sync_up_func = sync_up_func
self.sync_down_func = sync_down_func
self.delete_func = delete_func or noop
def sync_up(self, source, target):
self.sync_up_func(source, target)
return True
def sync_down(self, source, target):
self.sync_down_func(source, target)
return True
def delete(self, target):
self.delete_func(target)
return True
NOOP = FunctionBasedClient(noop, noop)
class CommandBasedClient(SyncClient):
def __init__(self,
sync_up_template,
sync_down_template,
delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self.cmd_process = None
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False)
def sync_up(self, source, target):
return self._execute(self.sync_up_template, source, target)
def sync_down(self, source, target):
return self._execute(self.sync_down_template, source, target)
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
return True
def wait(self):
if self.cmd_process:
_, error_msg = self.cmd_process.communicate()
error_msg = error_msg.decode("ascii")
code = self.cmd_process.returncode
args = self.cmd_process.args
self.cmd_process = None
if code != 0:
raise TuneError("Sync error. Ran command: {}\n"
"Error message ({}): {}".format(
args, code, error_msg))
def reset(self):
if self.is_running:
logger.warning("Sync process still running but resetting anyways.")
self.cmd_process = None
@property
def is_running(self):
"""Returns whether a sync or delete process is running."""
if self.cmd_process:
self.cmd_process.poll()
return self.cmd_process.returncode is None
return False
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(
source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
return True
@staticmethod
def _validate_sync_string(sync_string):
if not isinstance(sync_string, str):
raise ValueError("{} is not a string.".format(sync_string))
if "{source}" not in sync_string:
raise ValueError("Sync template missing '{source}'.")
if "{target}" not in sync_string:
raise ValueError("Sync template missing '{target}'.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/syncer.py
|
Python
|
import distutils
import logging
import os
import time
from shlex import quote
from ray import services
from ray.tune.cluster_info import get_ssh_key, get_ssh_user
from ray.tune.sync_client import (CommandBasedClient, get_sync_client,
get_cloud_sync_client, NOOP)
logger = logging.getLogger(__name__)
SYNC_PERIOD = 300
_log_sync_warned = False
_syncers = {}
def wait_for_sync():
for syncer in _syncers.values():
syncer.wait()
def log_sync_template(options=""):
"""Template enabling syncs between driver and worker when possible.
Requires ray cluster to be started with the autoscaler. Also requires
rsync to be installed.
Args:
options (str): Additional rsync options.
Returns:
Sync template with source and target parameters. None if rsync
unavailable.
"""
if not distutils.spawn.find_executable("rsync"):
logger.error("Log sync requires rsync to be installed.")
return None
global _log_sync_warned
ssh_key = get_ssh_key()
if ssh_key is None:
if not _log_sync_warned:
logger.debug("Log sync requires cluster to be setup with "
"`ray up`.")
_log_sync_warned = True
return None
rsh = "ssh -i {ssh_key} -o ConnectTimeout=120s -o StrictHostKeyChecking=no"
rsh = rsh.format(ssh_key=quote(ssh_key))
template = "rsync {options} -savz -e {rsh} {{source}} {{target}}"
return template.format(options=options, rsh=quote(rsh))
class Syncer:
def __init__(self, local_dir, remote_dir, sync_client=NOOP):
"""Syncs between two directories with the sync_function.
Arguments:
local_dir (str): Directory to sync. Uniquely identifies the syncer.
remote_dir (str): Remote directory to sync with.
sync_client (SyncClient): Client for syncing between local_dir and
remote_dir. Defaults to a Noop.
"""
self._local_dir = (os.path.join(local_dir, "")
if local_dir else local_dir)
self._remote_dir = remote_dir
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client = sync_client
def sync_up_if_needed(self):
if time.time() - self.last_sync_up_time > SYNC_PERIOD:
self.sync_up()
def sync_down_if_needed(self):
if time.time() - self.last_sync_down_time > SYNC_PERIOD:
self.sync_down()
def sync_up(self):
"""Attempts to start the sync-up to the remote path.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_up(self._local_dir,
self._remote_path)
self.last_sync_up_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def sync_down(self):
"""Attempts to start the sync-down from the remote path.
Returns:
Whether the sync (if feasible) was successfully started.
"""
result = False
if self.validate_hosts(self._local_dir, self._remote_path):
try:
result = self.sync_client.sync_down(self._remote_path,
self._local_dir)
self.last_sync_down_time = time.time()
except Exception:
logger.exception("Sync execution failed.")
return result
def validate_hosts(self, source, target):
if not (source and target):
logger.debug("Source or target is empty, skipping log sync for "
"{}".format(self._local_dir))
return False
return True
def wait(self):
"""Waits for the sync client to complete the current sync."""
self.sync_client.wait()
def reset(self):
self.last_sync_up_time = float("-inf")
self.last_sync_down_time = float("-inf")
self.sync_client.reset()
@property
def _remote_path(self):
return self._remote_dir
class NodeSyncer(Syncer):
"""Syncer for syncing files to/from a remote dir to a local dir."""
def __init__(self, local_dir, remote_dir, sync_client):
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
def set_worker_ip(self, worker_ip):
"""Sets the worker IP to sync logs from."""
self.worker_ip = worker_ip
def has_remote_target(self):
"""Returns whether the Syncer has a remote target."""
if not self.worker_ip:
logger.debug("Worker IP unknown, skipping sync for %s",
self._local_dir)
return False
if self.worker_ip == self.local_ip:
logger.debug("Worker IP is local IP, skipping sync for %s",
self._local_dir)
return False
return True
def sync_up_if_needed(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up_if_needed()
def sync_down_if_needed(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_down_if_needed()
def sync_up_to_new_location(self, worker_ip):
if worker_ip != self.worker_ip:
logger.debug("Setting new worker IP to %s", worker_ip)
self.set_worker_ip(worker_ip)
self.reset()
if not self.sync_up():
logger.warning(
"Sync up to new location skipped. This should not occur.")
else:
logger.warning("Sync attempted to same IP %s.", worker_ip)
def sync_up(self):
if not self.has_remote_target():
return True
return super(NodeSyncer, self).sync_up()
def sync_down(self):
if not self.has_remote_target():
return True
logger.debug("Syncing from %s to %s", self._remote_path,
self._local_dir)
return super(NodeSyncer, self).sync_down()
@property
def _remote_path(self):
ssh_user = get_ssh_user()
global _log_sync_warned
if not self.has_remote_target():
return None
if ssh_user is None:
if not _log_sync_warned:
logger.error("Syncer requires cluster to be setup with "
"`ray up`.")
_log_sync_warned = True
return None
return "{}@{}:{}/".format(ssh_user, self.worker_ip, self._remote_dir)
def get_cloud_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a Syncer.
This syncer is in charge of syncing the local_dir with upload_dir.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
no-op Syncer is returned.
sync_function (func | str): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If not provided, it defaults
to standard S3 or gsutil sync commands.
Raises:
ValueError if malformed remote_dir.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
if not remote_dir:
_syncers[key] = Syncer(local_dir, remote_dir, NOOP)
return _syncers[key]
client = get_sync_client(sync_function)
if client:
_syncers[key] = Syncer(local_dir, remote_dir, client)
return _syncers[key]
sync_client = get_cloud_sync_client(remote_dir)
_syncers[key] = Syncer(local_dir, remote_dir, sync_client)
return _syncers[key]
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/example.py
|
Python
|
# flake8: noqa
# This is an example quickstart for Tune.
# To connect to a cluster, uncomment below:
# import ray
# import argparse
# parser = argparse.ArgumentParser()
# parser.add_argument("--address")
# args = parser.parse_args()
# ray.init(address=args.address)
# __quick_start_begin__
import torch.optim as optim
from ray import tune
from ray.tune.examples.mnist_pytorch import get_data_loaders, ConvNet, train, test
def train_mnist(config):
train_loader, test_loader = get_data_loaders()
model = ConvNet()
optimizer = optim.SGD(model.parameters(), lr=config["lr"])
for i in range(10):
train(model, optimizer, train_loader)
acc = test(model, test_loader)
tune.track.log(mean_accuracy=acc)
analysis = tune.run(
train_mnist, config={"lr": tune.grid_search([0.001, 0.01, 0.1])})
print("Best config: ", analysis.get_best_config(metric="mean_accuracy"))
# Get a dataframe for analyzing trial results.
df = analysis.dataframe()
# __quick_start_end__
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_actor_reuse.py
|
Python
|
import unittest
import ray
from ray.tune import Trainable, run_experiments
from ray.tune.error import TuneError
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
class FrequentPausesScheduler(FIFOScheduler):
def on_trial_result(self, trial_runner, trial, result):
return TrialScheduler.PAUSE
def create_resettable_class():
class MyResettableClass(Trainable):
def _setup(self, config):
self.config = config
self.num_resets = 0
self.iter = 0
def _train(self):
self.iter += 1
return {"num_resets": self.num_resets, "done": self.iter > 1}
def _save(self, chkpt_dir):
return {"iter": self.iter}
def _restore(self, item):
self.iter = item["iter"]
def reset_config(self, new_config):
if "fake_reset_not_supported" in self.config:
return False
self.num_resets += 1
return True
return MyResettableClass
class ActorReuseTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0)
def tearDown(self):
ray.shutdown()
def testTrialReuseDisabled(self):
trials = run_experiments(
{
"foo": {
"run": create_resettable_class(),
"num_samples": 4,
"config": {},
}
},
reuse_actors=False,
scheduler=FrequentPausesScheduler())
self.assertEqual([t.last_result["num_resets"] for t in trials],
[0, 0, 0, 0])
def testTrialReuseEnabled(self):
trials = run_experiments(
{
"foo": {
"run": create_resettable_class(),
"num_samples": 4,
"config": {},
}
},
reuse_actors=True,
scheduler=FrequentPausesScheduler())
self.assertEqual([t.last_result["num_resets"] for t in trials],
[1, 2, 3, 4])
def testTrialReuseEnabledError(self):
def run():
run_experiments(
{
"foo": {
"run": create_resettable_class(),
"max_failures": 1,
"num_samples": 4,
"config": {
"fake_reset_not_supported": True
},
}
},
reuse_actors=True,
scheduler=FrequentPausesScheduler())
self.assertRaises(TuneError, lambda: run())
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_api.py
|
Python
|
import shutil
import copy
import os
import time
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import DurableTrainable, Trainable, TuneError
from ray.tune import register_env, register_trainable, run_experiments
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.trial import Trial
from ray.tune.result import (TIMESTEPS_TOTAL, DONE, HOSTNAME, NODE_IP, PID,
EPISODES_TOTAL, TRAINING_ITERATION,
TIMESTEPS_THIS_ITER, TIME_THIS_ITER_S,
TIME_TOTAL_S, TRIAL_ID, EXPERIMENT_TAG)
from ray.tune.logger import Logger
from ray.tune.experiment import Experiment
from ray.tune.resources import Resources
from ray.tune.suggest import grid_search
from ray.tune.suggest.suggestion import _MockSuggestionAlgorithm
from ray.tune.utils import (flatten_dict, get_pinned_object,
pin_in_object_store)
from ray.tune.utils.mock import mock_storage_client, MOCK_REMOTE_DIR
class TrainableFunctionApiTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, num_gpus=0, object_store_memory=150 * 1024 * 1024)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def checkAndReturnConsistentLogs(self, results, sleep_per_iter=None):
"""Checks logging is the same between APIs.
Ignore "DONE" for logging but checks that the
scheduler is notified properly with the last result.
"""
class_results = copy.deepcopy(results)
function_results = copy.deepcopy(results)
class_output = []
function_output = []
scheduler_notif = []
class MockScheduler(FIFOScheduler):
def on_trial_complete(self, runner, trial, result):
scheduler_notif.append(result)
class ClassAPILogger(Logger):
def on_result(self, result):
class_output.append(result)
class FunctionAPILogger(Logger):
def on_result(self, result):
function_output.append(result)
class _WrappedTrainable(Trainable):
def _setup(self, config):
del config
self._result_iter = copy.deepcopy(class_results)
def _train(self):
if sleep_per_iter:
time.sleep(sleep_per_iter)
res = self._result_iter.pop(0) # This should not fail
if not self._result_iter: # Mark "Done" for last result
res[DONE] = True
return res
def _function_trainable(config, reporter):
for result in function_results:
if sleep_per_iter:
time.sleep(sleep_per_iter)
reporter(**result)
class_trainable_name = "class_trainable"
register_trainable(class_trainable_name, _WrappedTrainable)
trials = run_experiments(
{
"function_api": {
"run": _function_trainable,
"loggers": [FunctionAPILogger],
},
"class_api": {
"run": class_trainable_name,
"loggers": [ClassAPILogger],
},
},
raise_on_failed_trial=False,
scheduler=MockScheduler())
# Ignore these fields
NO_COMPARE_FIELDS = {
HOSTNAME,
NODE_IP,
TRIAL_ID,
EXPERIMENT_TAG,
PID,
TIME_THIS_ITER_S,
TIME_TOTAL_S,
DONE, # This is ignored because FunctionAPI has different handling
"timestamp",
"time_since_restore",
"experiment_id",
"date",
}
self.assertEqual(len(class_output), len(results))
self.assertEqual(len(function_output), len(results))
def as_comparable_result(result):
return {
k: v
for k, v in result.items() if k not in NO_COMPARE_FIELDS
}
function_comparable = [
as_comparable_result(result) for result in function_output
]
class_comparable = [
as_comparable_result(result) for result in class_output
]
self.assertEqual(function_comparable, class_comparable)
self.assertEqual(sum(t.get(DONE) for t in scheduler_notif), 2)
self.assertEqual(
as_comparable_result(scheduler_notif[0]),
as_comparable_result(scheduler_notif[1]))
# Make sure the last result is the same.
self.assertEqual(
as_comparable_result(trials[0].last_result),
as_comparable_result(trials[1].last_result))
return function_output, trials
def testPinObject(self):
X = pin_in_object_store("hello")
@ray.remote
def f():
return get_pinned_object(X)
self.assertEqual(ray.get(f.remote()), "hello")
def testFetchPinned(self):
X = pin_in_object_store("hello")
def train(config, reporter):
get_pinned_object(X)
reporter(timesteps_total=100, done=True)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testRegisterEnv(self):
register_env("foo", lambda: None)
self.assertRaises(TypeError, lambda: register_env("foo", 2))
def testRegisterEnvOverwrite(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
def train2(config, reporter):
reporter(timesteps_total=200, done=True)
register_trainable("f1", train)
register_trainable("f1", train2)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 200)
def testRegisterTrainable(self):
def train(config, reporter):
pass
class A:
pass
class B(Trainable):
pass
register_trainable("foo", train)
Experiment("test", train)
register_trainable("foo", B)
Experiment("test", B)
self.assertRaises(TypeError, lambda: register_trainable("foo", B()))
self.assertRaises(TuneError, lambda: Experiment("foo", B()))
self.assertRaises(TypeError, lambda: register_trainable("foo", A))
self.assertRaises(TypeError, lambda: Experiment("foo", A))
def testTrainableCallable(self):
def dummy_fn(config, reporter, steps):
reporter(timesteps_total=steps, done=True)
from functools import partial
steps = 500
register_trainable("test", partial(dummy_fn, steps=steps))
[trial] = run_experiments({
"foo": {
"run": "test",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
[trial] = tune.run(partial(dummy_fn, steps=steps)).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], steps)
def testBuiltInTrainableResources(self):
class B(Trainable):
@classmethod
def default_resource_request(cls, config):
return Resources(cpu=config["cpu"], gpu=config["gpu"])
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("B", B)
def f(cpus, gpus, queue_trials):
return run_experiments(
{
"foo": {
"run": "B",
"config": {
"cpu": cpus,
"gpu": gpus,
},
}
},
queue_trials=queue_trials)[0]
# Should all succeed
self.assertEqual(f(0, 0, False).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
self.assertEqual(f(1, 0, True).status, Trial.TERMINATED)
# Too large resource request
self.assertRaises(TuneError, lambda: f(100, 100, False))
self.assertRaises(TuneError, lambda: f(0, 100, False))
self.assertRaises(TuneError, lambda: f(100, 0, False))
# TODO(ekl) how can we test this is queued (hangs)?
# f(100, 0, True)
def testRewriteEnv(self):
def train(config, reporter):
reporter(timesteps_total=1)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
"env": "CartPole-v0",
}
})
self.assertEqual(trial.config["env"], "CartPole-v0")
def testConfigPurity(self):
def train(config, reporter):
assert config == {"a": "b"}, config
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"config": {
"a": "b"
},
}
})
def testLogdir(self):
def train(config, reporter):
assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": "/tmp/logdir",
"config": {
"a": "b"
},
}
})
def testLogdirStartingWithTilde(self):
local_dir = "~/ray_results/local_dir"
def train(config, reporter):
cwd = os.getcwd()
assert cwd.startswith(os.path.expanduser(local_dir)), cwd
assert not cwd.startswith("~"), cwd
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": local_dir,
"config": {
"a": "b"
},
}
})
def testLongFilename(self):
def train(config, reporter):
assert "/tmp/logdir/foo" in os.getcwd(), os.getcwd()
reporter(timesteps_total=1)
register_trainable("f1", train)
run_experiments({
"foo": {
"run": "f1",
"local_dir": "/tmp/logdir",
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40),
},
}
})
def testBadParams(self):
def f():
run_experiments({"foo": {}})
self.assertRaises(TuneError, f)
def testBadParams2(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
"bah": "this param is not allowed",
}
})
self.assertRaises(TuneError, f)
def testBadParams3(self):
def f():
run_experiments({
"foo": {
"run": grid_search("invalid grid search"),
}
})
self.assertRaises(TuneError, f)
def testBadParams4(self):
def f():
run_experiments({
"foo": {
"run": "asdf",
}
})
self.assertRaises(TuneError, f)
def testBadParams5(self):
def f():
run_experiments({"foo": {"run": "PPO", "stop": {"asdf": 1}}})
self.assertRaises(TuneError, f)
def testBadParams6(self):
def f():
run_experiments({
"foo": {
"run": "PPO",
"resources_per_trial": {
"asdf": 1
}
}
})
self.assertRaises(TuneError, f)
def testBadStoppingReturn(self):
def train(config, reporter):
reporter()
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
"stop": {
"time": 10
},
}
})
self.assertRaises(TuneError, f)
def testNestedStoppingReturn(self):
def train(config, reporter):
for i in range(10):
reporter(test={"test1": {"test2": i}})
with self.assertRaises(TuneError):
[trial] = tune.run(
train, stop={
"test": {
"test1": {
"test2": 6
}
}
}).trials
[trial] = tune.run(train, stop={"test/test1/test2": 6}).trials
self.assertEqual(trial.last_result["training_iteration"], 7)
def testStoppingFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
def stop(trial_id, result):
return result["test"] > 6
[trial] = tune.run(train, stop=stop).trials
self.assertEqual(trial.last_result["training_iteration"], 8)
def testStoppingMemberFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
class Stopper:
def stop(self, trial_id, result):
return result["test"] > 6
[trial] = tune.run(train, stop=Stopper().stop).trials
self.assertEqual(trial.last_result["training_iteration"], 8)
def testBadStoppingFunction(self):
def train(config, reporter):
for i in range(10):
reporter(test=i)
class Stopper:
def stop(self, result):
return result["test"] > 6
def stop(result):
return result["test"] > 6
with self.assertRaises(ValueError):
tune.run(train, stop=Stopper().stop)
with self.assertRaises(ValueError):
tune.run(train, stop=stop)
def testEarlyReturn(self):
def train(config, reporter):
reporter(timesteps_total=100, done=True)
time.sleep(99999)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 100)
def testReporterNoUsage(self):
def run_task(config, reporter):
print("hello")
experiment = Experiment(run=run_task, name="ray_crash_repro")
[trial] = ray.tune.run(experiment).trials
print(trial.last_result)
self.assertEqual(trial.last_result[DONE], True)
def testErrorReturn(self):
def train(config, reporter):
raise Exception("uh oh")
register_trainable("f1", train)
def f():
run_experiments({
"foo": {
"run": "f1",
}
})
self.assertRaises(TuneError, f)
def testSuccess(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testNoRaiseFlag(self):
def train(config, reporter):
raise Exception()
register_trainable("f1", train)
[trial] = run_experiments(
{
"foo": {
"run": "f1",
}
}, raise_on_failed_trial=False)
self.assertEqual(trial.status, Trial.ERROR)
def testReportInfinity(self):
def train(config, reporter):
for i in range(100):
reporter(mean_accuracy=float("inf"))
register_trainable("f1", train)
[trial] = run_experiments({
"foo": {
"run": "f1",
}
})
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["mean_accuracy"], float("inf"))
def testNestedResults(self):
def create_result(i):
return {"test": {"1": {"2": {"3": i, "4": False}}}}
flattened_keys = list(flatten_dict(create_result(0)))
class _MockScheduler(FIFOScheduler):
results = []
def on_trial_result(self, trial_runner, trial, result):
self.results += [result]
return TrialScheduler.CONTINUE
def on_trial_complete(self, trial_runner, trial, result):
self.complete_result = result
def train(config, reporter):
for i in range(100):
reporter(**create_result(i))
algo = _MockSuggestionAlgorithm()
scheduler = _MockScheduler()
[trial] = tune.run(
train,
scheduler=scheduler,
search_alg=algo,
stop={
"test/1/2/3": 20
}).trials
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result["test"]["1"]["2"]["3"], 20)
self.assertEqual(trial.last_result["test"]["1"]["2"]["4"], False)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 21)
self.assertEqual(len(scheduler.results), 20)
self.assertTrue(
all(
set(result) >= set(flattened_keys)
for result in scheduler.results))
self.assertTrue(set(scheduler.complete_result) >= set(flattened_keys))
self.assertEqual(len(algo.results), 20)
self.assertTrue(
all(set(result) >= set(flattened_keys) for result in algo.results))
with self.assertRaises(TuneError):
[trial] = tune.run(train, stop={"1/2/3": 20})
with self.assertRaises(TuneError):
[trial] = tune.run(train, stop={"test": 1}).trials
def testReportTimeStep(self):
# Test that no timestep count are logged if never the Trainable never
# returns any.
results1 = [dict(mean_accuracy=5, done=i == 99) for i in range(100)]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
self.assertTrue(all(log[TIMESTEPS_TOTAL] is None for log in logs1))
# Test that no timesteps_this_iter are logged if only timesteps_total
# are returned.
results2 = [dict(timesteps_total=5, done=i == 9) for i in range(10)]
logs2, _ = self.checkAndReturnConsistentLogs(results2)
# Re-run the same trials but with added delay. This is to catch some
# inconsistent timestep counting that was present in the multi-threaded
# FunctionRunner. This part of the test can be removed once the
# multi-threaded FunctionRunner is removed from ray/tune.
# TODO: remove once the multi-threaded function runner is gone.
logs2, _ = self.checkAndReturnConsistentLogs(results2, 0.5)
# check all timesteps_total report the same value
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 5 for log in logs2))
# check that none of the logs report timesteps_this_iter
self.assertFalse(
any(hasattr(log, TIMESTEPS_THIS_ITER) for log in logs2))
# Test that timesteps_total and episodes_total are reported when
# timesteps_this_iter and episodes_this_iter despite only return zeros.
results3 = [
dict(timesteps_this_iter=0, episodes_this_iter=0)
for i in range(10)
]
logs3, _ = self.checkAndReturnConsistentLogs(results3)
self.assertTrue(all(log[TIMESTEPS_TOTAL] == 0 for log in logs3))
self.assertTrue(all(log[EPISODES_TOTAL] == 0 for log in logs3))
# Test that timesteps_total and episodes_total are properly counted
# when timesteps_this_iter and episodes_this_iter report non-zero
# values.
results4 = [
dict(timesteps_this_iter=3, episodes_this_iter=i)
for i in range(10)
]
logs4, _ = self.checkAndReturnConsistentLogs(results4)
# The last reported result should not be double-logged.
self.assertEqual(logs4[-1][TIMESTEPS_TOTAL], 30)
self.assertNotEqual(logs4[-2][TIMESTEPS_TOTAL],
logs4[-1][TIMESTEPS_TOTAL])
self.assertEqual(logs4[-1][EPISODES_TOTAL], 45)
self.assertNotEqual(logs4[-2][EPISODES_TOTAL],
logs4[-1][EPISODES_TOTAL])
def testAllValuesReceived(self):
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2, done=i == 4)
for i in range(5)
]
logs1, _ = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results))
# check if done was logged exactly once
self.assertEqual(len([r for r in logs1 if r.get("done")]), 1)
def testNoDoneReceived(self):
# repeat same test but without explicitly reporting done=True
results1 = [
dict(timesteps_total=(i + 1), my_score=i**2) for i in range(5)
]
logs1, trials = self.checkAndReturnConsistentLogs(results1)
# check if the correct number of results were reported.
self.assertEqual(len(logs1), len(results1))
def check_no_missing(reported_result, result):
common_results = [reported_result[k] == result[k] for k in result]
return all(common_results)
# check that no result was dropped or modified
complete_results1 = [
check_no_missing(log, result)
for log, result in zip(logs1, results1)
]
self.assertTrue(all(complete_results1))
def testDurableTrainable(self):
class TestTrain(DurableTrainable):
def _setup(self, config):
self.state = {"hi": 1, "iter": 0}
def _train(self):
self.state["iter"] += 1
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
sync_client = mock_storage_client()
mock_get_client = "ray.tune.durable_trainable.get_cloud_sync_client"
with patch(mock_get_client) as mock_get_cloud_sync_client:
mock_get_cloud_sync_client.return_value = sync_client
test_trainable = TestTrain(remote_checkpoint_dir=MOCK_REMOTE_DIR)
checkpoint_path = test_trainable.save()
test_trainable.train()
test_trainable.state["hi"] = 2
test_trainable.restore(checkpoint_path)
self.assertEqual(test_trainable.state["hi"], 1)
self.addCleanup(shutil.rmtree, MOCK_REMOTE_DIR)
def testCheckpointDict(self):
class TestTrain(Trainable):
def _setup(self, config):
self.state = {"hi": 1}
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
test_trainable = TestTrain()
result = test_trainable.save()
test_trainable.state["hi"] = 2
test_trainable.restore(result)
self.assertEqual(test_trainable.state["hi"], 1)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testMultipleCheckpoints(self):
class TestTrain(Trainable):
def _setup(self, config):
self.state = {"hi": 1, "iter": 0}
def _train(self):
self.state["iter"] += 1
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
test_trainable = TestTrain()
checkpoint_1 = test_trainable.save()
test_trainable.train()
checkpoint_2 = test_trainable.save()
self.assertNotEqual(checkpoint_1, checkpoint_2)
test_trainable.restore(checkpoint_2)
self.assertEqual(test_trainable.state["iter"], 1)
test_trainable.restore(checkpoint_1)
self.assertEqual(test_trainable.state["iter"], 0)
trials = run_experiments({
"foo": {
"run": TestTrain,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testIterationCounter(self):
def train(config, reporter):
for i in range(100):
reporter(itr=i, timesteps_this_iter=1)
register_trainable("exp", train)
config = {
"my_exp": {
"run": "exp",
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
}
[trial] = run_experiments(config)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TRAINING_ITERATION], 100)
self.assertEqual(trial.last_result["itr"], 99)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_automl_searcher.py
|
Python
|
import random
import unittest
from ray.tune import register_trainable
from ray.tune.automl import SearchSpace, DiscreteSpace, GridSearch
class AutoMLSearcherTest(unittest.TestCase):
def setUp(self):
def dummy_train(config, reporter):
reporter(timesteps_total=100, done=True)
register_trainable("f1", dummy_train)
def testExpandSearchSpace(self):
exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}}
space = SearchSpace([
DiscreteSpace("a.b.c", [1, 2]),
DiscreteSpace("a.d", ["a", "b"]),
])
searcher = GridSearch(space, "reward")
searcher.add_configurations(exp)
trials = searcher.next_trials()
self.assertEqual(len(trials), 4)
self.assertTrue(trials[0].config["a"]["b"]["c"] in [1, 2])
self.assertTrue(trials[1].config["a"]["d"] in ["a", "b"])
def testSearchRound(self):
exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}}
space = SearchSpace([
DiscreteSpace("a.b.c", [1, 2]),
DiscreteSpace("a.d", ["a", "b"]),
])
searcher = GridSearch(space, "reward")
searcher.add_configurations(exp)
trials = searcher.next_trials()
self.assertEqual(len(searcher.next_trials()), 0)
for trial in trials[1:]:
searcher.on_trial_complete(trial.trial_id)
searcher.on_trial_complete(trials[0].trial_id, error=True)
self.assertTrue(searcher.is_finished())
def testBestTrial(self):
exp = {"test-exp": {"run": "f1", "config": {"a": {"d": "dummy"}}}}
space = SearchSpace([
DiscreteSpace("a.b.c", [1, 2]),
DiscreteSpace("a.d", ["a", "b"]),
])
searcher = GridSearch(space, "reward")
searcher.add_configurations(exp)
trials = searcher.next_trials()
self.assertEqual(len(searcher.next_trials()), 0)
for i, trial in enumerate(trials):
rewards = list(range(i, i + 10))
random.shuffle(rewards)
for reward in rewards:
searcher.on_trial_result(trial.trial_id, {"reward": reward})
best_trial = searcher.get_best_trial()
self.assertEqual(best_trial, trials[-1])
self.assertEqual(best_trial.best_result["reward"], 3 + 10 - 1)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_checkpoint_manager.py
|
Python
|
# coding: utf-8
import random
import sys
import unittest
from unittest.mock import patch
from ray.tune.checkpoint_manager import Checkpoint, CheckpointManager, logger
class CheckpointManagerTest(unittest.TestCase):
@staticmethod
def mock_result(i):
return {"i": i}
def checkpoint_manager(self, keep_checkpoints_num):
return CheckpointManager(
keep_checkpoints_num, "i", delete_fn=lambda c: None)
def testOnCheckpointOrdered(self):
"""
Tests increasing priorities. Also tests that that the worst checkpoints
are deleted when necessary.
"""
keep_checkpoints_num = 2
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
checkpoints = [
Checkpoint(Checkpoint.PERSISTENT, {i}, self.mock_result(i))
for i in range(3)
]
with patch.object(checkpoint_manager, "delete") as \
delete_mock:
for j in range(3):
checkpoint_manager.on_checkpoint(checkpoints[j])
expected_deletes = 0 if j != 2 else 1
self.assertEqual(delete_mock.call_count, expected_deletes, j)
self.assertEqual(checkpoint_manager.newest_checkpoint,
checkpoints[j])
best_checkpoints = checkpoint_manager.best_checkpoints()
self.assertEqual(len(best_checkpoints), keep_checkpoints_num)
self.assertIn(checkpoints[1], best_checkpoints)
self.assertIn(checkpoints[2], best_checkpoints)
def testOnCheckpointUnordered(self):
"""
Tests priorities that aren't inserted in ascending order. Also tests
that the worst checkpoints are deleted when necessary.
"""
keep_checkpoints_num = 2
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
checkpoints = [
Checkpoint(Checkpoint.PERSISTENT, {i}, self.mock_result(i))
for i in range(3, -1, -1)
]
with patch.object(checkpoint_manager, "delete") as delete_mock:
for j in range(0, len(checkpoints)):
checkpoint_manager.on_checkpoint(checkpoints[j])
expected_deletes = 0 if j != 3 else 1
self.assertEqual(delete_mock.call_count, expected_deletes)
self.assertEqual(checkpoint_manager.newest_checkpoint,
checkpoints[j])
best_checkpoints = checkpoint_manager.best_checkpoints()
self.assertEqual(len(best_checkpoints), keep_checkpoints_num)
self.assertIn(checkpoints[0], best_checkpoints)
self.assertIn(checkpoints[1], best_checkpoints)
def testBestCheckpoints(self):
"""
Tests that the best checkpoints are tracked and ordered correctly.
"""
keep_checkpoints_num = 4
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
checkpoints = [
Checkpoint(Checkpoint.MEMORY, i, self.mock_result(i))
for i in range(16)
]
random.shuffle(checkpoints)
for checkpoint in checkpoints:
checkpoint_manager.on_checkpoint(checkpoint)
best_checkpoints = checkpoint_manager.best_checkpoints()
self.assertEqual(len(best_checkpoints), keep_checkpoints_num)
for i in range(len(best_checkpoints)):
self.assertEqual(best_checkpoints[i].value, i + 12)
def testOnCheckpointUnavailableAttribute(self):
"""
Tests that an error is logged when the associated result of the
checkpoint has no checkpoint score attribute.
"""
keep_checkpoints_num = 1
checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num)
no_attr_checkpoint = Checkpoint(Checkpoint.MEMORY, 0, {})
with patch.object(logger, "error") as log_error_mock:
checkpoint_manager.on_checkpoint(no_attr_checkpoint)
log_error_mock.assert_called_once()
# The newest checkpoint should still be set despite this error.
assert checkpoint_manager.newest_checkpoint == no_attr_checkpoint
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_cluster.py
|
Python
|
import inspect
import json
import time
import os
import pytest
import shutil
import sys
from unittest.mock import MagicMock, patch
import ray
from ray import tune
from ray.rllib import _register_all
from ray.cluster_utils import Cluster
from ray.test_utils import run_string_as_driver_nonblocking
from ray.tune import register_trainable
from ray.tune.experiment import Experiment
from ray.tune.error import TuneError
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.syncer import Syncer
from ray.tune.trainable import TrainableUtil
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.utils.mock import (MockDurableTrainer, MockRemoteTrainer,
MockNodeSyncer, mock_storage_client,
MOCK_REMOTE_DIR)
def _start_new_cluster():
cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
# Pytest doesn't play nicely with imports
register_trainable("__fake_remote", MockRemoteTrainer)
register_trainable("__fake_durable", MockDurableTrainer)
_register_all()
return cluster
@pytest.fixture
def start_connected_cluster():
# Start the Ray processes.
cluster = _start_new_cluster()
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
@pytest.fixture
def start_connected_emptyhead_cluster():
"""Starts head with no resources."""
cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 0,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
# Pytest doesn't play nicely with imports
_register_all()
register_trainable("__fake_remote", MockRemoteTrainer)
register_trainable("__fake_durable", MockDurableTrainer)
yield cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_counting_resources(start_connected_cluster):
"""Tests that Tune accounting is consistent with actual cluster."""
cluster = start_connected_cluster
nodes = []
assert ray.cluster_resources()["CPU"] == 1
runner = TrialRunner(BasicVariantGenerator())
kwargs = {"stopping_criterion": {"training_iteration": 10}}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # run 1
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 2
cluster.remove_node(nodes.pop())
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
runner.step() # run 2
assert sum(t.status == Trial.RUNNING for t in runner.get_trials()) == 1
for i in range(5):
nodes += [cluster.add_node(num_cpus=1)]
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 6
runner.step() # 1 result
assert sum(t.status == Trial.RUNNING for t in runner.get_trials()) == 2
def test_trial_processed_after_node_failure(start_connected_emptyhead_cluster):
"""Tests that Tune processes a trial as failed if its node died."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
mock_process_failure = MagicMock(side_effect=runner._process_trial_failure)
runner._process_trial_failure = mock_process_failure
runner.add_trial(Trial("__fake"))
runner.step()
runner.step()
assert not mock_process_failure.called
cluster.remove_node(node)
runner.step()
if not mock_process_failure.called:
runner.step()
assert mock_process_failure.called
def test_remove_node_before_result(start_connected_emptyhead_cluster):
"""Tune continues when node is removed before trial returns."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"checkpoint_freq": 2,
"max_failures": 2
}
trial = Trial("__fake", **kwargs)
runner.add_trial(trial)
runner.step() # run 1
assert trial.status == Trial.RUNNING
cluster.remove_node(node)
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
assert ray.cluster_resources()["CPU"] == 1
for i in range(3):
runner.step()
assert trial.status == Trial.TERMINATED
with pytest.raises(TuneError):
runner.step()
def test_queue_trials(start_connected_emptyhead_cluster):
"""Tests explicit oversubscription for autoscaling.
Tune oversubscribes a trial when `queue_trials=True`, but
does not block other trials from running.
"""
cluster = start_connected_emptyhead_cluster
runner = TrialRunner()
def create_trial(cpu, gpu=0):
kwargs = {
"resources": Resources(cpu=cpu, gpu=gpu),
"stopping_criterion": {
"training_iteration": 3
}
}
return Trial("__fake", **kwargs)
runner.add_trial(create_trial(cpu=1))
with pytest.raises(TuneError):
runner.step() # run 1
del runner
executor = RayTrialExecutor(queue_trials=True)
runner = TrialRunner(trial_executor=executor)
cluster.add_node(num_cpus=2)
cluster.wait_for_nodes()
cpu_only = create_trial(cpu=1)
runner.add_trial(cpu_only)
runner.step() # add cpu_only trial
gpu_trial = create_trial(cpu=1, gpu=1)
runner.add_trial(gpu_trial)
runner.step() # queue gpu_trial
# This tests that the cpu_only trial should bypass the queued trial.
for i in range(3):
runner.step()
assert cpu_only.status == Trial.TERMINATED
assert gpu_trial.status == Trial.RUNNING
# Scale up
cluster.add_node(num_cpus=1, num_gpus=1)
cluster.wait_for_nodes()
for i in range(3):
runner.step()
assert gpu_trial.status == Trial.TERMINATED
@pytest.mark.parametrize("trainable_id", ["__fake", "__fake_durable"])
def test_trial_migration(start_connected_emptyhead_cluster, trainable_id):
"""Removing a node while cluster has space should migrate trial.
The trial state should also be consistent with the checkpoint.
"""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 4
},
"checkpoint_freq": 2,
"max_failures": 2,
"remote_checkpoint_dir": MOCK_REMOTE_DIR,
"sync_to_driver_fn": trainable_id == "__fake",
}
# Test recovery of trial that hasn't been checkpointed
t = Trial(trainable_id, **kwargs)
runner.add_trial(t)
runner.step() # start
runner.step() # 1 result
assert t.last_result
node2 = cluster.add_node(num_cpus=1)
cluster.remove_node(node)
cluster.wait_for_nodes()
runner.step() # Recovery step
# TODO(rliaw): This assertion is not critical but will not pass
# because checkpoint handling is messy and should be refactored
# rather than hotfixed.
# assert t.last_result is None, "Trial result not restored correctly."
for i in range(4):
runner.step()
assert t.status == Trial.TERMINATED
# Test recovery of trial that has been checkpointed
t2 = Trial(trainable_id, **kwargs)
runner.add_trial(t2)
runner.step() # start
runner.step() # 1 result
runner.step() # 2 result and checkpoint
assert t2.has_checkpoint()
node3 = cluster.add_node(num_cpus=1)
cluster.remove_node(node2)
cluster.wait_for_nodes()
runner.step() # 3 result + start and fail 4 result
runner.step() # Recovery step
runner.step() # Process recovery
runner.step() # result
if t2.status != Trial.TERMINATED:
runner.step()
assert t2.status == Trial.TERMINATED, runner.debug_string()
# Test recovery of trial that won't be checkpointed
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"remote_checkpoint_dir": MOCK_REMOTE_DIR,
"sync_to_driver_fn": trainable_id == "__fake",
}
t3 = Trial(trainable_id, **kwargs)
runner.add_trial(t3)
runner.step() # start
runner.step() # 1 result
cluster.add_node(num_cpus=1)
cluster.remove_node(node3)
cluster.wait_for_nodes()
runner.step() # Error handling step
if t3.status != Trial.ERROR:
runner.step()
assert t3.status == Trial.ERROR, runner.debug_string()
with pytest.raises(TuneError):
runner.step()
@pytest.mark.parametrize("trainable_id", ["__fake", "__fake_durable"])
def test_trial_requeue(start_connected_emptyhead_cluster, trainable_id):
"""Removing a node in full cluster causes Trial to be requeued."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"checkpoint_freq": 1,
"max_failures": 1,
"remote_checkpoint_dir": MOCK_REMOTE_DIR,
"sync_to_driver_fn": trainable_id == "__fake",
}
trials = [Trial(trainable_id, **kwargs), Trial(trainable_id, **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # start
runner.step() # 1 result
cluster.remove_node(node)
cluster.wait_for_nodes()
runner.step()
assert all(t.status == Trial.PENDING for t in trials)
with pytest.raises(TuneError):
runner.step()
@pytest.mark.parametrize("trainable_id", ["__fake_remote", "__fake_durable"])
def test_migration_checkpoint_removal(start_connected_emptyhead_cluster,
trainable_id):
"""Test checks that trial restarts if checkpoint is lost w/ node fail."""
cluster = start_connected_emptyhead_cluster
node = cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
runner = TrialRunner(BasicVariantGenerator())
kwargs = {
"stopping_criterion": {
"training_iteration": 4
},
"checkpoint_freq": 2,
"max_failures": 2,
"remote_checkpoint_dir": MOCK_REMOTE_DIR,
"sync_to_driver_fn": trainable_id == "__fake_remote",
}
# The following patches only affect __fake_remote.
find_checkpoint_dir = TrainableUtil.find_checkpoint_dir
with patch("ray.tune.logger.get_node_syncer") as mock_get_node_syncer:
trainable_util = "ray.tune.ray_trial_executor.TrainableUtil"
with patch(trainable_util + ".find_checkpoint_dir") as mock_find_dir:
def mock_get_syncer_fn(local_dir, remote_dir, sync_function):
client = mock_storage_client()
return MockNodeSyncer(local_dir, remote_dir, client)
mock_get_node_syncer.side_effect = mock_get_syncer_fn
def mock_find_dir_fn(checkpoint_path):
"""Converts back to local path first."""
checkpoint_path = checkpoint_path[len(MOCK_REMOTE_DIR):]
checkpoint_path = os.path.join("/", checkpoint_path)
return find_checkpoint_dir(checkpoint_path)
# __fake_remote trainables save to a separate "remote" directory.
# TrainableUtil will not check this path unless we mock it.
mock_find_dir.side_effect = mock_find_dir_fn
# Test recovery of trial that has been checkpointed
t1 = Trial(trainable_id, **kwargs)
runner.add_trial(t1)
runner.step() # start
runner.step() # 1 result
runner.step() # 2 result and checkpoint
assert t1.has_checkpoint()
cluster.add_node(num_cpus=1)
cluster.remove_node(node)
cluster.wait_for_nodes()
shutil.rmtree(os.path.dirname(t1.checkpoint.value))
runner.step() # collect result 3, kick off + fail result 4
runner.step() # Recovery step
runner.step() # Process Recovery + step 4
for i in range(3):
if t1.status != Trial.TERMINATED:
runner.step()
assert t1.status == Trial.TERMINATED, runner.debug_string()
@pytest.mark.parametrize("trainable_id", ["__fake", "__fake_durable"])
def test_cluster_down_simple(start_connected_cluster, tmpdir, trainable_id):
"""Tests that TrialRunner save/restore works on cluster shutdown."""
cluster = start_connected_cluster
cluster.add_node(num_cpus=1)
cluster.wait_for_nodes()
dirpath = str(tmpdir)
runner = TrialRunner(local_checkpoint_dir=dirpath, checkpoint_period=0)
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"checkpoint_freq": 1,
"max_failures": 1,
"remote_checkpoint_dir": MOCK_REMOTE_DIR,
"sync_to_driver_fn": trainable_id == "__fake",
}
trials = [Trial(trainable_id, **kwargs), Trial(trainable_id, **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step() # start
runner.step() # start2
runner.step() # step
assert all(t.status == Trial.RUNNING for t in runner.get_trials())
runner.checkpoint()
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
runner = TrialRunner(resume="LOCAL", local_checkpoint_dir=dirpath)
runner.step() # start
runner.step() # process restore
runner.step() # start2
for i in range(3):
runner.step()
with pytest.raises(TuneError):
runner.step()
assert all(t.status == Trial.TERMINATED for t in runner.get_trials())
ray.shutdown()
cluster.shutdown()
@pytest.mark.parametrize("trainable_id", ["__fake", "__fake_durable"])
def test_cluster_down_full(start_connected_cluster, tmpdir, trainable_id):
"""Tests that run_experiment restoring works on cluster shutdown."""
cluster = start_connected_cluster
dirpath = str(tmpdir)
use_default_sync = trainable_id == "__fake"
from ray.tune.result import DEFAULT_RESULTS_DIR
local_dir = DEFAULT_RESULTS_DIR
upload_dir = None if use_default_sync else MOCK_REMOTE_DIR
base_dict = dict(
run=trainable_id,
stop=dict(training_iteration=3),
local_dir=local_dir,
upload_dir=upload_dir,
sync_to_driver=use_default_sync,
)
exp1_args = base_dict
exp2_args = dict(base_dict.items(), local_dir=dirpath, checkpoint_freq=1)
exp3_args = dict(base_dict.items(), config=dict(mock_error=True))
exp4_args = dict(
base_dict.items(), config=dict(mock_error=True), checkpoint_freq=1)
all_experiments = {
"exp1": exp1_args,
"exp2": exp2_args,
"exp3": exp3_args,
"exp4": exp4_args
}
mock_get_client = "ray.tune.trial_runner.get_cloud_syncer"
with patch(mock_get_client) as mock_get_cloud_syncer:
mock_syncer = Syncer(local_dir, upload_dir, mock_storage_client())
mock_get_cloud_syncer.return_value = mock_syncer
tune.run_experiments(all_experiments, raise_on_failed_trial=False)
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
trials = tune.run_experiments(
all_experiments, resume=True, raise_on_failed_trial=False)
assert len(trials) == 4
assert all(t.status in [Trial.TERMINATED, Trial.ERROR] for t in trials)
ray.shutdown()
cluster.shutdown()
@pytest.mark.skip(reason="Not very consistent.")
def test_cluster_rllib_restore(start_connected_cluster, tmpdir):
cluster = start_connected_cluster
dirpath = str(tmpdir)
script = """
import time
import ray
from ray import tune
ray.init(address="{address}")
tune.run(
"PG",
name="experiment",
config=dict(env="CartPole-v1"),
stop=dict(training_iteration=10),
local_dir="{checkpoint_dir}",
checkpoint_freq=1,
max_failures=1,
dict(experiment=kwargs),
raise_on_failed_trial=False)
""".format(
address=cluster.address, checkpoint_dir=dirpath)
run_string_as_driver_nonblocking(script)
# Wait until the right checkpoint is saved.
# The trainable returns every 0.5 seconds, so this should not miss
# the checkpoint.
local_checkpoint_dir = os.path.join(dirpath, "experiment")
for i in range(100):
if TrialRunner.checkpoint_exists(local_checkpoint_dir):
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
last_res = trials[0].last_result
if last_res and last_res.get("training_iteration"):
break
time.sleep(0.3)
if not TrialRunner.checkpoint_exists(local_checkpoint_dir):
raise RuntimeError("Checkpoint file didn't appear.")
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
cluster.wait_for_nodes()
# Restore properly from checkpoint
trials2 = tune.run_experiments(
{
"experiment": {
"run": "PG",
"checkpoint_freq": 1,
"local_dir": dirpath
}
},
resume=True)
assert all(t.status == Trial.TERMINATED for t in trials2)
ray.shutdown()
cluster.shutdown()
def test_cluster_interrupt(start_connected_cluster, tmpdir):
"""Tests run_experiment on cluster shutdown with actual interrupt.
This is an end-to-end test.
"""
cluster = start_connected_cluster
dirpath = str(tmpdir)
# Needs to be in scope for pytest
class _Mock(tune.Trainable):
"""Finishes on the 4th iteration."""
def _setup(self, config):
self.state = {"hi": 0}
def _train(self):
self.state["hi"] += 1
time.sleep(0.5)
return {"done": self.state["hi"] >= 4}
def _save(self, path):
return self.state
def _restore(self, state):
self.state = state
# Removes indent from class.
reformatted = "\n".join(line[4:] if len(line) else line
for line in inspect.getsource(_Mock).split("\n"))
script = """
import time
import ray
from ray import tune
ray.init(address="{address}")
{fail_class_code}
tune.run(
{fail_class},
name="experiment",
stop=dict(training_iteration=5),
local_dir="{checkpoint_dir}",
checkpoint_freq=1,
global_checkpoint_period=0,
max_failures=1,
raise_on_failed_trial=False)
""".format(
address=cluster.address,
checkpoint_dir=dirpath,
fail_class_code=reformatted,
fail_class=_Mock.__name__)
run_string_as_driver_nonblocking(script)
# Wait until the right checkpoint is saved.
# The trainable returns every 0.5 seconds, so this should not miss
# the checkpoint.
local_checkpoint_dir = os.path.join(dirpath, "experiment")
for i in range(50):
if TrialRunner.checkpoint_exists(local_checkpoint_dir):
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
last_res = trials[0].last_result
if last_res and last_res.get("training_iteration") == 3:
break
time.sleep(0.2)
if not TrialRunner.checkpoint_exists(local_checkpoint_dir):
raise RuntimeError("Checkpoint file didn't appear.")
ray.shutdown()
cluster.shutdown()
cluster = _start_new_cluster()
Experiment.register_if_needed(_Mock)
# Inspect the internal trialrunner
runner = TrialRunner(
resume="LOCAL", local_checkpoint_dir=local_checkpoint_dir)
trials = runner.get_trials()
assert trials[0].last_result["training_iteration"] == 3
assert trials[0].status == Trial.PENDING
# Restore properly from checkpoint
trials2 = tune.run_experiments(
{
"experiment": {
"run": _Mock,
"local_dir": dirpath,
"checkpoint_freq": 1
}
},
resume=True,
raise_on_failed_trial=False)
assert all(t.status == Trial.TERMINATED for t in trials2)
assert {t.trial_id for t in trials2} == {t.trial_id for t in trials}
ray.shutdown()
cluster.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_commands.py
|
Python
|
import click
import os
import pytest
import subprocess
import sys
import time
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import commands
from ray.tune.result import CONFIG_PREFIX
class Capturing:
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
self.captured = []
return self
def __exit__(self, *args):
self.captured.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
@pytest.fixture
def start_ray():
ray.init(log_to_driver=False, local_mode=True)
_register_all()
yield
ray.shutdown()
def test_time(start_ray, tmpdir):
experiment_name = "test_time"
experiment_path = os.path.join(str(tmpdir), experiment_name)
num_samples = 2
tune.run_experiments({
experiment_name: {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"num_samples": num_samples,
"local_dir": str(tmpdir)
}
})
times = []
for i in range(5):
start = time.time()
subprocess.check_call(["tune", "ls", experiment_path])
times += [time.time() - start]
assert sum(times) / len(times) < 2.0, "CLI is taking too long!"
def test_ls(start_ray, tmpdir):
"""This test captures output of list_trials."""
experiment_name = "test_ls"
experiment_path = os.path.join(str(tmpdir), experiment_name)
num_samples = 3
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
num_samples=num_samples,
local_dir=str(tmpdir))
columns = ["episode_reward_mean", "training_iteration", "trial_id"]
limit = 2
with Capturing() as output:
commands.list_trials(experiment_path, info_keys=columns, limit=limit)
lines = output.captured
assert all(col in lines[1] for col in columns)
assert lines[1].count("|") == len(columns) + 1
assert len(lines) == 3 + limit + 1
with Capturing() as output:
commands.list_trials(
experiment_path,
sort=["trial_id"],
info_keys=("trial_id", "training_iteration"),
filter_op="training_iteration == 1")
lines = output.captured
assert len(lines) == 3 + num_samples + 1
with pytest.raises(click.ClickException):
commands.list_trials(
experiment_path,
sort=["trial_id"],
info_keys=("training_iteration", ))
with pytest.raises(click.ClickException):
commands.list_trials(experiment_path, info_keys=("asdf", ))
def test_ls_with_cfg(start_ray, tmpdir):
experiment_name = "test_ls_with_cfg"
experiment_path = os.path.join(str(tmpdir), experiment_name)
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
config={"test_variable": tune.grid_search(list(range(5)))},
local_dir=str(tmpdir))
columns = [CONFIG_PREFIX + "test_variable", "trial_id"]
limit = 4
with Capturing() as output:
commands.list_trials(experiment_path, info_keys=columns, limit=limit)
lines = output.captured
assert all(col in lines[1] for col in columns)
assert lines[1].count("|") == len(columns) + 1
assert len(lines) == 3 + limit + 1
def test_lsx(start_ray, tmpdir):
"""This test captures output of list_experiments."""
project_path = str(tmpdir)
num_experiments = 3
for i in range(num_experiments):
experiment_name = "test_lsx{}".format(i)
tune.run(
"__fake",
name=experiment_name,
stop={"training_iteration": 1},
num_samples=1,
local_dir=project_path)
limit = 2
with Capturing() as output:
commands.list_experiments(
project_path, info_keys=("total_trials", ), limit=limit)
lines = output.captured
assert "total_trials" in lines[1]
assert lines[1].count("|") == 2
assert len(lines) == 3 + limit + 1
with Capturing() as output:
commands.list_experiments(
project_path,
sort=["total_trials"],
info_keys=("total_trials", ),
filter_op="total_trials == 1")
lines = output.captured
assert sum("1" in line for line in lines) >= num_experiments
assert len(lines) == 3 + num_experiments + 1
if __name__ == "__main__":
# Make click happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main([__file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_dependency.py
|
Python
|
#!/usr/bin/env python
import sys
import ray
from ray.tune import register_trainable, run_experiments
def f(config, reporter):
reporter(timesteps_total=1)
if __name__ == "__main__":
ray.init()
register_trainable("my_class", f)
run_experiments({
"test": {
"run": "my_class",
"stop": {
"training_iteration": 1
}
}
})
assert "ray.rllib" not in sys.modules, "RLlib should not be imported"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_experiment.py
|
Python
|
import unittest
import ray
from ray.rllib import _register_all
from ray.tune import register_trainable
from ray.tune.experiment import Experiment, convert_to_experiment_list
from ray.tune.error import TuneError
class ExperimentTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def setUp(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
def testConvertExperimentFromExperiment(self):
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
"config": {
"script_min_iter_time_s": 0
}
})
result = convert_to_experiment_list(exp1)
self.assertEqual(len(result), 1)
self.assertEqual(type(result), list)
def testConvertExperimentNone(self):
result = convert_to_experiment_list(None)
self.assertEqual(len(result), 0)
self.assertEqual(type(result), list)
def testConvertExperimentList(self):
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
"config": {
"script_min_iter_time_s": 0
}
})
result = convert_to_experiment_list([exp1, exp1])
self.assertEqual(len(result), 2)
self.assertEqual(type(result), list)
def testConvertExperimentJSON(self):
experiment = {
"name": {
"run": "f1",
"config": {
"script_min_iter_time_s": 0
}
},
"named": {
"run": "f1",
"config": {
"script_min_iter_time_s": 0
}
}
}
result = convert_to_experiment_list(experiment)
self.assertEqual(len(result), 2)
self.assertEqual(type(result), list)
def testConvertExperimentIncorrect(self):
self.assertRaises(TuneError, lambda: convert_to_experiment_list("hi"))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_experiment_analysis.py
|
Python
|
import unittest
import shutil
import tempfile
import random
import os
import pandas as pd
import ray
from ray.tune import run, sample_from
from ray.tune.examples.async_hyperband_example import MyTrainableClass
class ExperimentAnalysisSuite(unittest.TestCase):
def setUp(self):
ray.init(local_mode=False)
self.test_dir = tempfile.mkdtemp()
self.test_name = "analysis_exp"
self.num_samples = 10
self.metric = "episode_reward_mean"
self.test_path = os.path.join(self.test_dir, self.test_name)
self.run_test_exp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
ray.shutdown()
def run_test_exp(self):
self.ea = run(
MyTrainableClass,
name=self.test_name,
local_dir=self.test_dir,
stop={"training_iteration": 1},
checkpoint_freq=1,
num_samples=self.num_samples,
config={
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
def testDataframe(self):
df = self.ea.dataframe()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEquals(df.shape[0], self.num_samples)
def testStats(self):
assert self.ea.stats()
assert self.ea.runner_data()
def testTrialDataframe(self):
checkpoints = self.ea._checkpoints
idx = random.randint(0, len(checkpoints) - 1)
trial_df = self.ea.trial_dataframes[checkpoints[idx]["logdir"]]
self.assertTrue(isinstance(trial_df, pd.DataFrame))
self.assertEqual(trial_df.shape[0], 1)
def testBestConfig(self):
best_config = self.ea.get_best_config(self.metric)
self.assertTrue(isinstance(best_config, dict))
self.assertTrue("width" in best_config)
self.assertTrue("height" in best_config)
def testBestLogdir(self):
logdir = self.ea.get_best_logdir(self.metric)
self.assertTrue(logdir.startswith(self.test_path))
logdir2 = self.ea.get_best_logdir(self.metric, mode="min")
self.assertTrue(logdir2.startswith(self.test_path))
self.assertNotEquals(logdir, logdir2)
def testGetTrialCheckpointsPathsByTrial(self):
best_trial = self.ea.get_best_trial(self.metric)
checkpoints_metrics = self.ea.get_trial_checkpoints_paths(best_trial)
logdir = self.ea.get_best_logdir(self.metric)
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert checkpoints_metrics[0][0] == expected_path
assert checkpoints_metrics[0][1] == 1
def testGetTrialCheckpointsPathsByPath(self):
logdir = self.ea.get_best_logdir(self.metric)
checkpoints_metrics = self.ea.get_trial_checkpoints_paths(logdir)
expected_path = os.path.join(logdir, "checkpoint_1/", "checkpoint")
assert checkpoints_metrics[0][0] == expected_path
assert checkpoints_metrics[0][1] == 1
def testGetTrialCheckpointsPathsWithMetricByTrial(self):
best_trial = self.ea.get_best_trial(self.metric)
paths = self.ea.get_trial_checkpoints_paths(best_trial, self.metric)
logdir = self.ea.get_best_logdir(self.metric)
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert paths[0][0] == expected_path
assert paths[0][1] == best_trial.metric_analysis[self.metric]["last"]
def testGetTrialCheckpointsPathsWithMetricByPath(self):
best_trial = self.ea.get_best_trial(self.metric)
logdir = self.ea.get_best_logdir(self.metric)
paths = self.ea.get_trial_checkpoints_paths(best_trial, self.metric)
expected_path = os.path.join(logdir, "checkpoint_1", "checkpoint")
assert paths[0][0] == expected_path
assert paths[0][1] == best_trial.metric_analysis[self.metric]["last"]
def testAllDataframes(self):
dataframes = self.ea.trial_dataframes
self.assertTrue(len(dataframes) == self.num_samples)
self.assertTrue(isinstance(dataframes, dict))
for df in dataframes.values():
self.assertEqual(df.training_iteration.max(), 1)
def testIgnoreOtherExperiment(self):
analysis = run(
MyTrainableClass,
name="test_example",
local_dir=self.test_dir,
return_trials=False,
stop={"training_iteration": 1},
num_samples=1,
config={
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
df = analysis.dataframe()
self.assertEquals(df.shape[0], 1)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_experiment_analysis_mem.py
|
Python
|
import unittest
import shutil
import tempfile
import random
import pandas as pd
import ray
from ray.tune import run, Trainable, sample_from, Analysis, grid_search
from ray.tune.examples.async_hyperband_example import MyTrainableClass
class ExperimentAnalysisInMemorySuite(unittest.TestCase):
def setUp(self):
class MockTrainable(Trainable):
def _setup(self, config):
self.id = config["id"]
self.idx = 0
self.scores_dict = {
0: [5, 0],
1: [4, 1],
2: [2, 8],
3: [9, 6],
4: [7, 3]
}
def _train(self):
val = self.scores_dict[self.id][self.idx]
self.idx += 1
return {"score": val}
def _save(self, checkpoint_dir):
pass
def _restore(self, checkpoint_path):
pass
self.MockTrainable = MockTrainable
ray.init(local_mode=False, num_cpus=1)
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
ray.shutdown()
def testCompareTrials(self):
self.test_dir = tempfile.mkdtemp()
scores_all = [5, 4, 2, 9, 7, 0, 1, 8, 6, 3]
scores_last = scores_all[5:]
ea = run(
self.MockTrainable,
name="analysis_exp",
local_dir=self.test_dir,
stop={"training_iteration": 2},
num_samples=1,
config={"id": grid_search(list(range(5)))})
max_all = ea.get_best_trial("score",
"max").metric_analysis["score"]["max"]
min_all = ea.get_best_trial("score",
"min").metric_analysis["score"]["min"]
max_last = ea.get_best_trial("score", "max",
"last").metric_analysis["score"]["last"]
self.assertEqual(max_all, max(scores_all))
self.assertEqual(min_all, min(scores_all))
self.assertEqual(max_last, max(scores_last))
self.assertNotEqual(max_last, max(scores_all))
class AnalysisSuite(unittest.TestCase):
def setUp(self):
ray.init(local_mode=True)
self.test_dir = tempfile.mkdtemp()
self.num_samples = 10
self.metric = "episode_reward_mean"
self.run_test_exp(test_name="analysis_exp1")
self.run_test_exp(test_name="analysis_exp2")
def run_test_exp(self, test_name=None):
run(MyTrainableClass,
name=test_name,
local_dir=self.test_dir,
return_trials=False,
stop={"training_iteration": 1},
num_samples=self.num_samples,
config={
"width": sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
ray.shutdown()
def testDataframe(self):
analysis = Analysis(self.test_dir)
df = analysis.dataframe()
self.assertTrue(isinstance(df, pd.DataFrame))
self.assertEquals(df.shape[0], self.num_samples * 2)
def testBestLogdir(self):
analysis = Analysis(self.test_dir)
logdir = analysis.get_best_logdir(self.metric)
self.assertTrue(logdir.startswith(self.test_dir))
logdir2 = analysis.get_best_logdir(self.metric, mode="min")
self.assertTrue(logdir2.startswith(self.test_dir))
self.assertNotEquals(logdir, logdir2)
def testBestConfigIsLogdir(self):
analysis = Analysis(self.test_dir)
for metric, mode in [(self.metric, "min"), (self.metric, "max")]:
logdir = analysis.get_best_logdir(metric, mode=mode)
best_config = analysis.get_best_config(metric, mode=mode)
self.assertEquals(analysis.get_all_configs()[logdir], best_config)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_logger.py
|
Python
|
from collections import namedtuple
import unittest
import tempfile
import shutil
from ray.tune.logger import tf2_compat_logger, JsonLogger, CSVLogger, TBXLogger
Trial = namedtuple("MockTrial", ["evaluated_params", "trial_id"])
def result(t, rew):
return dict(
time_total_s=t,
episode_reward_mean=rew,
mean_accuracy=rew * 2,
training_iteration=int(t))
class LoggerSuite(unittest.TestCase):
"""Test built-in loggers."""
def setUp(self):
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir, ignore_errors=True)
def testTensorBoardLogger(self):
config = {"a": 2, "b": 5}
t = Trial(evaluated_params=config, trial_id=5342)
logger = tf2_compat_logger(
config=config, logdir=self.test_dir, trial=t)
logger.on_result(result(2, 4))
logger.on_result(result(2, 4))
logger.close()
def testCSV(self):
config = {"a": 2, "b": 5}
t = Trial(evaluated_params=config, trial_id="csv")
logger = CSVLogger(config=config, logdir=self.test_dir, trial=t)
logger.on_result(result(2, 4))
logger.on_result(result(2, 4))
logger.close()
def testJSON(self):
config = {"a": 2, "b": 5}
t = Trial(evaluated_params=config, trial_id="json")
logger = JsonLogger(config=config, logdir=self.test_dir, trial=t)
logger.on_result(result(2, 4))
logger.on_result(result(2, 4))
logger.close()
def testTBX(self):
config = {"a": 2, "b": 5}
t = Trial(evaluated_params=config, trial_id="tbx")
logger = TBXLogger(config=config, logdir=self.test_dir, trial=t)
logger.on_result(result(2, 4))
logger.on_result(result(2, 4))
logger.close()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_progress_reporter.py
|
Python
|
import collections
import time
import unittest
from unittest.mock import MagicMock
from ray.tune.trial import Trial
from ray.tune.progress_reporter import _fair_filter_trials
class ProgressReporterTest(unittest.TestCase):
def mock_trial(self, status, start_time):
mock = MagicMock()
mock.status = status
mock.start_time = start_time
return mock
def testFairFilterTrials(self):
"""Tests that trials are represented fairly."""
trials_by_state = collections.defaultdict(list)
# States for which trials are under and overrepresented
states_under = (Trial.PAUSED, Trial.ERROR)
states_over = (Trial.PENDING, Trial.RUNNING, Trial.TERMINATED)
max_trials = 13
num_trials_under = 2 # num of trials for each underrepresented state
num_trials_over = 10 # num of trials for each overrepresented state
for state in states_under:
for _ in range(num_trials_under):
trials_by_state[state].append(
self.mock_trial(state, time.time()))
for state in states_over:
for _ in range(num_trials_over):
trials_by_state[state].append(
self.mock_trial(state, time.time()))
filtered_trials_by_state = _fair_filter_trials(
trials_by_state, max_trials=max_trials)
for state in trials_by_state:
if state in states_under:
expected_num_trials = num_trials_under
else:
expected_num_trials = (max_trials - num_trials_under *
len(states_under)) / len(states_over)
state_trials = filtered_trials_by_state[state]
self.assertEqual(len(state_trials), expected_num_trials)
# Make sure trials are sorted newest-first within state.
for i in range(len(state_trials) - 1):
self.assertGreaterEqual(state_trials[i].start_time,
state_trials[i + 1].start_time)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_ray_trial_executor.py
|
Python
|
# coding: utf-8
import json
import unittest
import ray
from ray.rllib import _register_all
from ray.tune import Trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, Checkpoint
from ray.tune.resources import Resources
from ray.cluster_utils import Cluster
class RayTrialExecutorTest(unittest.TestCase):
def setUp(self):
self.trial_executor = RayTrialExecutor(queue_trials=False)
ray.init()
_register_all() # Needed for flaky tests
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testStartStop(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
running = self.trial_executor.get_running_trials()
self.assertEqual(1, len(running))
self.trial_executor.stop_trial(trial)
def testSaveRestore(self):
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.save(trial, Checkpoint.PERSISTENT)
self.trial_executor.restore(trial)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testPauseResume(self):
"""Tests that pausing works for trials in flight."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testStartFailure(self):
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trial = Trial("asdf", resources=Resources(1, 0))
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.ERROR, trial.status)
def testPauseResume2(self):
"""Tests that pausing works for trials being processed."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.fetch_result(trial)
self.trial_executor.pause_trial(trial)
self.assertEqual(Trial.PAUSED, trial.status)
self.trial_executor.start_trial(trial)
self.assertEqual(Trial.RUNNING, trial.status)
self.trial_executor.stop_trial(trial)
self.assertEqual(Trial.TERMINATED, trial.status)
def testNoResetTrial(self):
"""Tests that reset handles NotImplemented properly."""
trial = Trial("__fake")
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {}, "modified_mock")
self.assertEqual(exists, False)
self.assertEqual(Trial.RUNNING, trial.status)
def testResetTrial(self):
"""Tests that reset works as expected."""
class B(Trainable):
def _train(self):
return dict(timesteps_this_iter=1, done=True)
def reset_config(self, config):
self.config = config
return True
trials = self.generate_trials({
"run": B,
"config": {
"foo": 0
},
}, "grid_search")
trial = trials[0]
self.trial_executor.start_trial(trial)
exists = self.trial_executor.reset_trial(trial, {"hi": 1},
"modified_mock")
self.assertEqual(exists, True)
self.assertEqual(trial.config.get("hi"), 1)
self.assertEqual(trial.experiment_tag, "modified_mock")
self.assertEqual(Trial.RUNNING, trial.status)
def generate_trials(self, spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
return suggester.next_trials()
class RayExecutorQueueTest(unittest.TestCase):
def setUp(self):
self.trial_executor = RayTrialExecutor(
queue_trials=True, refresh_period=0)
self.cluster = Cluster(
initialize_head=True,
connect=True,
head_node_args={
"num_cpus": 1,
"_internal_config": json.dumps({
"num_heartbeats_timeout": 10
})
})
# Pytest doesn't play nicely with imports
_register_all()
def tearDown(self):
ray.shutdown()
self.cluster.shutdown()
_register_all() # re-register the evicted objects
def testQueueTrial(self):
"""Tests that reset handles NotImplemented properly."""
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
cpu_only = create_trial(1, 0)
self.assertTrue(self.trial_executor.has_resources(cpu_only.resources))
self.trial_executor.start_trial(cpu_only)
gpu_only = create_trial(0, 1)
self.assertTrue(self.trial_executor.has_resources(gpu_only.resources))
def testHeadBlocking(self):
def create_trial(cpu, gpu=0):
return Trial("__fake", resources=Resources(cpu=cpu, gpu=gpu))
gpu_trial = create_trial(1, 1)
self.assertTrue(self.trial_executor.has_resources(gpu_trial.resources))
self.trial_executor.start_trial(gpu_trial)
# TODO(rliaw): This behavior is probably undesirable, but right now
# trials with different resource requirements is not often used.
cpu_only_trial = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources(cpu_only_trial.resources))
self.cluster.add_node(num_cpus=1, num_gpus=1)
self.cluster.wait_for_nodes()
self.assertTrue(
self.trial_executor.has_resources(cpu_only_trial.resources))
self.trial_executor.start_trial(cpu_only_trial)
cpu_only_trial2 = create_trial(1, 0)
self.assertTrue(
self.trial_executor.has_resources(cpu_only_trial2.resources))
self.trial_executor.start_trial(cpu_only_trial2)
cpu_only_trial3 = create_trial(1, 0)
self.assertFalse(
self.trial_executor.has_resources(cpu_only_trial3.resources))
class LocalModeExecutorTest(RayTrialExecutorTest):
def setUp(self):
self.trial_executor = RayTrialExecutor(queue_trials=False)
ray.init(local_mode=True)
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_run_experiment.py
|
Python
|
import os
import unittest
import ray
from ray.rllib import _register_all
from ray.tune.result import TIMESTEPS_TOTAL
from ray.tune import Trainable, TuneError
from ray.tune import register_trainable, run_experiments
from ray.tune.logger import Logger
from ray.tune.experiment import Experiment
from ray.tune.trial import Trial, ExportFormat
class RunExperimentTest(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testDict(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": "f1",
},
"bar": {
"run": "f1",
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperiment(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
[trial] = run_experiments(exp1)
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testExperimentList(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
register_trainable("f1", train)
exp1 = Experiment(**{
"name": "foo",
"run": "f1",
})
exp2 = Experiment(**{
"name": "bar",
"run": "f1",
})
trials = run_experiments([exp1, exp2])
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertEqual(trial.last_result[TIMESTEPS_TOTAL], 99)
def testAutoregisterTrainable(self):
def train(config, reporter):
for i in range(100):
reporter(timesteps_total=i)
class B(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
register_trainable("f1", train)
trials = run_experiments({
"foo": {
"run": train,
},
"bar": {
"run": B
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCheckpointAtEnd(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, path):
checkpoint = path + "/checkpoint"
with open(checkpoint, "w") as f:
f.write("OK")
return checkpoint
trials = run_experiments({
"foo": {
"run": train,
"checkpoint_at_end": True
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(trial.has_checkpoint())
def testExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
path = export_dir + "/exported"
with open(path, "w") as f:
f.write("OK")
return {export_formats[0]: path}
trials = run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "exported")))
def testInvalidExportFormats(self):
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _export_model(self, export_formats, export_dir):
ExportFormat.validate(export_formats)
return {}
def fail_trial():
run_experiments({
"foo": {
"run": train,
"export_formats": ["format"]
}
})
self.assertRaises(TuneError, fail_trial)
def testCustomResources(self):
ray.shutdown()
ray.init(resources={"hi": 3})
class train(Trainable):
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
trials = run_experiments({
"foo": {
"run": train,
"resources_per_trial": {
"cpu": 1,
"custom_resources": {
"hi": 2
}
}
}
})
for trial in trials:
self.assertEqual(trial.status, Trial.TERMINATED)
def testCustomLogger(self):
class CustomLogger(Logger):
def on_result(self, result):
with open(os.path.join(self.logdir, "test.log"), "w") as f:
f.write("hi")
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": [CustomLogger]
}
})
self.assertTrue(os.path.exists(os.path.join(trial.logdir, "test.log")))
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
}
}
})
self.assertTrue(
os.path.exists(os.path.join(trial.logdir, "params.json")))
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"loggers": []
}
})
self.assertFalse(
os.path.exists(os.path.join(trial.logdir, "params.json")))
def testCustomTrialString(self):
[trial] = run_experiments({
"foo": {
"run": "__fake",
"stop": {
"training_iteration": 1
},
"trial_name_creator":
lambda t: "{}_{}_321".format(t.trainable_name, t.trial_id)
}
})
self.assertEquals(
str(trial), "{}_{}_321".format(trial.trainable_name,
trial.trial_id))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_sync.py
|
Python
|
import glob
import os
import shutil
import sys
import tempfile
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import TuneError
from ray.tune.syncer import CommandBasedClient
class TestSyncFunctionality(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
@patch("ray.tune.sync_client.S3_PREFIX", "test")
def testNoUploadDir(self):
"""No Upload Dir is given."""
with self.assertRaises(AssertionError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"sync_to_cloud": "echo {source} {target}"
}).trials
@patch("ray.tune.sync_client.S3_PREFIX", "test")
def testCloudProperString(self):
with self.assertRaises(ValueError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_to_cloud": "ls {target}"
}).trials
with self.assertRaises(ValueError):
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_to_cloud": "ls {source}"
}).trials
tmpdir = tempfile.mkdtemp()
logfile = os.path.join(tmpdir, "test.log")
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"upload_dir": "test",
"sync_to_cloud": "echo {source} {target} > " + logfile
}).trials
with open(logfile) as f:
lines = f.read()
self.assertTrue("test" in lines)
shutil.rmtree(tmpdir)
def testClusterProperString(self):
"""Tests that invalid commands throw.."""
with self.assertRaises(TuneError):
# This raises TuneError because logger is init in safe zone.
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"sync_to_driver": "ls {target}"
}).trials
with self.assertRaises(TuneError):
# This raises TuneError because logger is init in safe zone.
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"sync_to_driver": "ls {source}"
}).trials
with patch.object(CommandBasedClient, "_execute") as mock_fn:
with patch("ray.services.get_node_ip_address") as mock_sync:
mock_sync.return_value = "0.0.0.0"
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"sync_to_driver": "echo {source} {target}"
}).trials
self.assertGreater(mock_fn.call_count, 0)
def testCloudFunctions(self):
tmpdir = tempfile.mkdtemp()
tmpdir2 = tempfile.mkdtemp()
os.mkdir(os.path.join(tmpdir2, "foo"))
def sync_func(local, remote):
for filename in glob.glob(os.path.join(local, "*.json")):
shutil.copy(filename, remote)
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
local_dir=tmpdir,
stop={
"training_iteration": 1
},
upload_dir=tmpdir2,
sync_to_cloud=sync_func).trials
test_file_path = glob.glob(os.path.join(tmpdir2, "foo", "*.json"))
self.assertTrue(test_file_path)
shutil.rmtree(tmpdir)
shutil.rmtree(tmpdir2)
def testClusterSyncFunction(self):
def sync_func_driver(source, target):
assert ":" in source, "Source {} not a remote path.".format(source)
assert ":" not in target, "Target is supposed to be local."
with open(os.path.join(target, "test.log2"), "w") as f:
print("writing to", f.name)
f.write(source)
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_to_driver=sync_func_driver).trials
test_file_path = os.path.join(trial.logdir, "test.log2")
self.assertFalse(os.path.exists(test_file_path))
with patch("ray.services.get_node_ip_address") as mock_sync:
mock_sync.return_value = "0.0.0.0"
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
stop={
"training_iteration": 1
},
sync_to_driver=sync_func_driver).trials
test_file_path = os.path.join(trial.logdir, "test.log2")
self.assertTrue(os.path.exists(test_file_path))
os.remove(test_file_path)
def testNoSync(self):
"""Sync should not run on a single node."""
def sync_func(source, target):
pass
with patch.object(CommandBasedClient, "_execute") as mock_sync:
[trial] = tune.run(
"__fake",
name="foo",
max_failures=0,
**{
"stop": {
"training_iteration": 1
},
"sync_to_driver": sync_func
}).trials
self.assertEqual(mock_sync.call_count, 0)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_track.py
|
Python
|
import os
import pandas as pd
import unittest
import ray
from ray import tune
from ray.tune import track
from ray.tune.result import EXPR_PARAM_FILE, EXPR_RESULT_FILE
def _check_json_val(fname, key, val):
with open(fname, "r") as f:
df = pd.read_json(f, typ="frame", lines=True)
return key in df.columns and (df[key].tail(n=1) == val).all()
class TrackApiTest(unittest.TestCase):
def tearDown(self):
track.shutdown()
ray.shutdown()
def testSessionInitShutdown(self):
self.assertTrue(track._session is None)
# Checks that the singleton _session is created/destroyed
# by track.init() and track.shutdown()
for _ in range(2):
# do it twice to see that we can reopen the session
track.init(trial_name="test_init")
self.assertTrue(track._session is not None)
track.shutdown()
self.assertTrue(track._session is None)
def testLogCreation(self):
"""Checks that track.init() starts logger and creates log files."""
track.init(trial_name="test_init")
session = track.get_session()
self.assertTrue(session is not None)
self.assertTrue(os.path.isdir(session.logdir))
params_path = os.path.join(session.logdir, EXPR_PARAM_FILE)
result_path = os.path.join(session.logdir, EXPR_RESULT_FILE)
self.assertTrue(os.path.exists(params_path))
self.assertTrue(os.path.exists(result_path))
self.assertTrue(session.logdir == track.trial_dir())
def testMetric(self):
track.init(trial_name="test_log")
session = track.get_session()
for i in range(5):
track.log(test=i)
result_path = os.path.join(session.logdir, EXPR_RESULT_FILE)
self.assertTrue(_check_json_val(result_path, "test", i))
def testRayOutput(self):
"""Checks that local and remote log format are the same."""
ray.init()
def testme(config):
for i in range(config["iters"]):
track.log(iteration=i, hi="test")
trials = tune.run(testme, config={"iters": 5}).trials
trial_res = trials[0].last_result
self.assertTrue(trial_res["hi"], "test")
self.assertTrue(trial_res["training_iteration"], 5)
def testLocalMetrics(self):
"""Checks that metric state is updated correctly."""
track.init(trial_name="test_logs")
session = track.get_session()
self.assertEqual(set(session.trial_config.keys()), {"trial_id"})
result_path = os.path.join(session.logdir, EXPR_RESULT_FILE)
track.log(test=1)
self.assertTrue(_check_json_val(result_path, "test", 1))
track.log(iteration=1, test=2)
self.assertTrue(_check_json_val(result_path, "test", 2))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_trainable_util.py
|
Python
|
import os
import pickle
import shutil
import unittest
from ray.tune.trainable import TrainableUtil
class TrainableUtilTest(unittest.TestCase):
def setUp(self):
self.checkpoint_dir = "/tmp/tune/MyTrainable123"
TrainableUtil.make_checkpoint_dir(self.checkpoint_dir)
def tearDown(self):
self.addCleanup(shutil.rmtree, self.checkpoint_dir)
def testFindCheckpointDir(self):
checkpoint_path = os.path.join(self.checkpoint_dir, "my/nested/chkpt")
os.makedirs(checkpoint_path)
found_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
self.assertEquals(self.checkpoint_dir, found_dir)
with self.assertRaises(FileNotFoundError):
parent = os.path.dirname(found_dir)
TrainableUtil.find_checkpoint_dir(parent)
def testPickleCheckpoint(self):
for i in range(5):
path = os.path.join(self.checkpoint_dir, str(i))
with open(path, "w") as f:
f.write(str(i))
checkpoint_path = os.path.join(self.checkpoint_dir, "0")
data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path)
loaded = pickle.loads(data_dict)
checkpoint_name = os.path.basename(checkpoint_path)
self.assertEqual(loaded["checkpoint_name"], checkpoint_name)
for i in range(5):
path = os.path.join(self.checkpoint_dir, str(i))
self.assertEquals(loaded["data"][str(i)], open(path, "rb").read())
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_trial_runner.py
|
Python
|
import sys
import unittest
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune import TuneError, register_trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator
class TrialRunnerTest(unittest.TestCase):
def setUp(self):
_register_all() # re-register the evicted objects
def tearDown(self):
ray.shutdown()
def testTrialStatus(self):
ray.init()
trial = Trial("__fake")
trial_executor = RayTrialExecutor()
self.assertEqual(trial.status, Trial.PENDING)
trial_executor.start_trial(trial)
self.assertEqual(trial.status, Trial.RUNNING)
trial_executor.stop_trial(trial)
self.assertEqual(trial.status, Trial.TERMINATED)
trial_executor.stop_trial(trial, error=True)
self.assertEqual(trial.status, Trial.ERROR)
def testExperimentTagTruncation(self):
ray.init()
def train(config, reporter):
reporter(timesteps_total=1)
trial_executor = RayTrialExecutor()
register_trainable("f1", train)
experiments = {
"foo": {
"run": "f1",
"config": {
"a" * 50: tune.sample_from(lambda spec: 5.0 / 7),
"b" * 50: tune.sample_from(lambda spec: "long" * 40)
},
}
}
for name, spec in experiments.items():
trial_generator = BasicVariantGenerator()
trial_generator.add_configurations({name: spec})
for trial in trial_generator.next_trials():
trial_executor.start_trial(trial)
self.assertLessEqual(len(trial.logdir), 200)
trial_executor.stop_trial(trial)
def testExtraResources(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, extra_cpu=3, extra_gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=0, custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testExtraCustomResources(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(
cpu=1, gpu=0, extra_custom_resources={"a": 2}),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertTrue(sum(t.status == Trial.RUNNING for t in trials) < 2)
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
def testCustomResources2(self):
ray.init(num_cpus=4, num_gpus=2, resources={"a": 2})
runner = TrialRunner()
resource1 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource1))
resource2 = Resources(cpu=1, gpu=0, custom_resources={"a": 2})
self.assertTrue(runner.has_resources(resource2))
resource3 = Resources(cpu=1, gpu=0, custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource3))
resource4 = Resources(cpu=1, gpu=0, extra_custom_resources={"a": 3})
self.assertFalse(runner.has_resources(resource4))
def testFractionalGpus(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=0.5),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
for _ in range(10):
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.PENDING)
self.assertEqual(trials[3].status, Trial.PENDING)
def testResourceNumericalError(self):
resource = Resources(cpu=0.99, gpu=0.99, custom_resources={"a": 0.99})
small_resource = Resources(
cpu=0.33, gpu=0.33, custom_resources={"a": 0.33})
for i in range(3):
resource = Resources.subtract(resource, small_resource)
self.assertTrue(resource.is_nonnegative())
def testResourceScheduler(self):
ray.init(num_cpus=4, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.TERMINATED)
def testMultiStepRun(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testMultiStepRun2(self):
"""Checks that runner.step throws when overstepping."""
ray.init(num_cpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertRaises(TuneError, runner.step)
def testChangeResources(self):
"""Checks that resource requirements can be changed on fly."""
ray.init(num_cpus=2)
class ChangingScheduler(FIFOScheduler):
def on_trial_result(self, trial_runner, trial, result):
if result["training_iteration"] == 1:
executor = trial_runner.trial_executor
executor.stop_trial(trial, stop_logger=False)
trial.update_resources(2, 0)
executor.start_trial(trial)
return TrialScheduler.CONTINUE
runner = TrialRunner(scheduler=ChangingScheduler())
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=0),
}
trials = [Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 1)
self.assertRaises(ValueError, lambda: trials[0].update_resources(2, 0))
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(runner.trial_executor._committed_resources.cpu, 2)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_trial_runner_2.py
|
Python
|
import os
import sys
import unittest
from unittest.mock import patch
import ray
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.schedulers import FIFOScheduler
from ray.tune.result import DONE
from ray.tune.registry import _global_registry, TRAINABLE_CLASS
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources
from ray.tune.suggest import BasicVariantGenerator
def create_mock_components():
class _MockScheduler(FIFOScheduler):
errored_trials = []
def on_trial_error(self, trial_runner, trial):
self.errored_trials += [trial]
class _MockSearchAlg(BasicVariantGenerator):
errored_trials = []
def on_trial_complete(self, trial_id, error=False, **kwargs):
if error:
self.errored_trials += [trial_id]
searchalg = _MockSearchAlg()
scheduler = _MockScheduler()
return searchalg, scheduler
class TrialRunnerTest2(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testErrorHandling(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
_global_registry.register(TRAINABLE_CLASS, "asdf", None)
trials = [Trial("asdf", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[1].status, Trial.RUNNING)
def testThrowOnOverstep(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
runner.step()
self.assertRaises(TuneError, runner.step)
def testFailureRecoveryDisabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 0,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 1)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryEnabled(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 0)
def testFailureRecoveryNodeRemoval(self):
ray.init(num_cpus=1, num_gpus=1)
searchalg, scheduler = create_mock_components()
runner = TrialRunner(searchalg, scheduler=scheduler)
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 1,
"config": {
"mock_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
with patch("ray.cluster_resources") as resource_mock:
resource_mock.return_value = {"CPU": 1, "GPU": 1}
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
# Mimic a node failure
resource_mock.return_value = {"CPU": 0, "GPU": 0}
runner.step()
self.assertEqual(trials[0].status, Trial.PENDING)
self.assertEqual(trials[0].num_failures, 1)
self.assertEqual(len(searchalg.errored_trials), 0)
self.assertEqual(len(scheduler.errored_trials), 1)
def testFailureRecoveryMaxFailures(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
"checkpoint_freq": 1,
"max_failures": 2,
"config": {
"mock_error": True,
"persistent_error": True,
},
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 1)
runner.step() # Restore step
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[0].num_failures, 2)
runner.step() # Restore step
runner.step()
self.assertEqual(trials[0].status, Trial.ERROR)
self.assertEqual(trials[0].num_failures, 3)
def testCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 1
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
path = runner.trial_executor.save(trials[0])
kwargs["restore_path"] = path
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[1].runner.get_info.remote()), 1)
self.addCleanup(os.remove, path)
def testRestoreMetricsAfterCheckpointing(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
path = runner.trial_executor.save(trials[0])
runner.trial_executor.stop_trial(trials[0])
kwargs["restore_path"] = path
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
runner.step() # Restore step
runner.step()
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 10)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 1)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
runner.step()
self.assertEqual(trials[1].last_result["timesteps_since_restore"], 20)
self.assertEqual(trials[1].last_result["iterations_since_restore"], 2)
self.assertGreater(trials[1].last_result["time_since_restore"], 0)
self.addCleanup(os.remove, path)
def testCheckpointingAtEnd(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"checkpoint_at_end": True,
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
runner.step()
self.assertEqual(trials[0].last_result[DONE], True)
self.assertEqual(trials[0].has_checkpoint(), True)
def testResultDone(self):
"""Tests that last_result is marked `done` after trial is complete."""
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertNotEqual(trials[0].last_result[DONE], True)
runner.step()
self.assertEqual(trials[0].last_result[DONE], True)
def testPauseThenResume(self):
ray.init(num_cpus=1, num_gpus=1)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 2
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), None)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.trial_executor.pause_trial(trials[0])
self.assertEqual(trials[0].status, Trial.PAUSED)
runner.trial_executor.resume_trial(trials[0])
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.get_info.remote()), 1)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_trial_runner_3.py
|
Python
|
import os
import shutil
import sys
import tempfile
import unittest
import ray
from ray.rllib import _register_all
from ray.tune import TuneError
from ray.tune.schedulers import TrialScheduler, FIFOScheduler
from ray.tune.experiment import Experiment
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.resources import Resources, json_to_resources, resources_to_json
from ray.tune.suggest.suggestion import (_MockSuggestionAlgorithm,
SuggestionAlgorithm)
class TrialRunnerTest3(unittest.TestCase):
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testStepHook(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
def on_step_begin(self, trialrunner):
self._update_avail_resources()
cnt = self.pre_step if hasattr(self, "pre_step") else 0
setattr(self, "pre_step", cnt + 1)
def on_step_end(self, trialrunner):
cnt = self.pre_step if hasattr(self, "post_step") else 0
setattr(self, "post_step", 1 + cnt)
import types
runner.trial_executor.on_step_begin = types.MethodType(
on_step_begin, runner.trial_executor)
runner.trial_executor.on_step_end = types.MethodType(
on_step_end, runner.trial_executor)
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
runner.add_trial(Trial("__fake", **kwargs))
runner.step()
self.assertEqual(runner.trial_executor.pre_step, 1)
self.assertEqual(runner.trial_executor.post_step, 1)
def testStopTrial(self):
ray.init(num_cpus=4, num_gpus=2)
runner = TrialRunner()
kwargs = {
"stopping_criterion": {
"training_iteration": 5
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs),
Trial("__fake", **kwargs)
]
for t in trials:
runner.add_trial(t)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
# Stop trial while running
runner.stop_trial(trials[0])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.PENDING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.PENDING)
# Stop trial while pending
runner.stop_trial(trials[-1])
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(trials[-1].status, Trial.TERMINATED)
def testSearchAlgNotification(self):
"""Checks notification of trial to the Search Algorithm."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(searcher.counter["result"], 1)
self.assertEqual(searcher.counter["complete"], 1)
def testSearchAlgFinished(self):
"""Checks that SearchAlg is Finished before all trials are done."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 1}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgSchedulerInteraction(self):
"""Checks that TrialScheduler killing trial will notify SearchAlg."""
class _MockScheduler(FIFOScheduler):
def on_trial_result(self, *args, **kwargs):
return TrialScheduler.STOP
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=10)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher, scheduler=_MockScheduler())
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertTrue(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgSchedulerEarlyStop(self):
"""Early termination notif to Searcher can be turned off."""
class _MockScheduler(FIFOScheduler):
def on_trial_result(self, *args, **kwargs):
return TrialScheduler.STOP
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {"run": "__fake", "stop": {"training_iteration": 2}}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(use_early_stopped_trials=True)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher, scheduler=_MockScheduler())
runner.step()
runner.step()
self.assertEqual(len(searcher.final_results), 1)
searcher = _MockSuggestionAlgorithm(use_early_stopped_trials=False)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher, scheduler=_MockScheduler())
runner.step()
runner.step()
self.assertEqual(len(searcher.final_results), 0)
def testSearchAlgStalled(self):
"""Checks that runner and searcher state is maintained when stalled."""
ray.init(num_cpus=4, num_gpus=2)
experiment_spec = {
"run": "__fake",
"num_samples": 3,
"stop": {
"training_iteration": 1
}
}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=1)
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[0].status, Trial.RUNNING)
runner.step()
self.assertEqual(trials[0].status, Trial.TERMINATED)
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[1].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
searcher.stall = True
runner.step()
self.assertEqual(trials[1].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(all(trial.is_finished() for trial in trials))
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
searcher.stall = False
runner.step()
trials = runner.get_trials()
self.assertEqual(trials[2].status, Trial.RUNNING)
self.assertEqual(len(searcher.live_trials), 1)
runner.step()
self.assertEqual(trials[2].status, Trial.TERMINATED)
self.assertEqual(len(searcher.live_trials), 0)
self.assertTrue(searcher.is_finished())
self.assertTrue(runner.is_finished())
def testSearchAlgFinishes(self):
"""Empty SearchAlg changing state in `next_trials` does not crash."""
class FinishFastAlg(SuggestionAlgorithm):
_index = 0
def next_trials(self):
trials = []
self._index += 1
for trial in self._trial_generator:
trials += [trial]
break
if self._index > 4:
self._finished = True
return trials
def _suggest(self, trial_id):
return {}
ray.init(num_cpus=2)
experiment_spec = {
"run": "__fake",
"num_samples": 2,
"stop": {
"training_iteration": 1
}
}
searcher = FinishFastAlg()
experiments = [Experiment.from_json("test", experiment_spec)]
searcher.add_configurations(experiments)
runner = TrialRunner(search_alg=searcher)
self.assertFalse(runner.is_finished())
runner.step() # This launches a new run
runner.step() # This launches a 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the first run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # This kills the 2nd run
self.assertFalse(searcher.is_finished())
self.assertFalse(runner.is_finished())
runner.step() # this converts self._finished to True
self.assertTrue(searcher.is_finished())
self.assertRaises(TuneError, runner.step)
def testTrialSaveRestore(self):
"""Creates different trials to test runner.checkpoint/restore."""
ray.init(num_cpus=3)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
trials = [
Trial(
"__fake",
trial_id="trial_terminate",
stopping_criterion={"training_iteration": 1},
checkpoint_freq=1)
]
runner.add_trial(trials[0])
runner.step() # start
runner.step()
self.assertEquals(trials[0].status, Trial.TERMINATED)
trials += [
Trial(
"__fake",
trial_id="trial_fail",
stopping_criterion={"training_iteration": 3},
checkpoint_freq=1,
config={"mock_error": True})
]
runner.add_trial(trials[1])
runner.step()
runner.step()
runner.step()
self.assertEquals(trials[1].status, Trial.ERROR)
trials += [
Trial(
"__fake",
trial_id="trial_succ",
stopping_criterion={"training_iteration": 2},
checkpoint_freq=1)
]
runner.add_trial(trials[2])
runner.step()
self.assertEquals(len(runner.trial_executor.get_checkpoints()), 3)
self.assertEquals(trials[2].status, Trial.RUNNING)
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
for tid in ["trial_terminate", "trial_fail"]:
original_trial = runner.get_trial(tid)
restored_trial = runner2.get_trial(tid)
self.assertEqual(original_trial.status, restored_trial.status)
restored_trial = runner2.get_trial("trial_succ")
self.assertEqual(Trial.PENDING, restored_trial.status)
runner2.step()
runner2.step()
runner2.step()
self.assertRaises(TuneError, runner2.step)
shutil.rmtree(tmpdir)
def testTrialNoSave(self):
"""Check that non-checkpointing trials are not saved."""
ray.init(num_cpus=3)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
runner.add_trial(
Trial(
"__fake",
trial_id="non_checkpoint",
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="checkpoint",
checkpoint_at_end=True,
stopping_criterion={"training_iteration": 2}))
while not all(t.status == Trial.TERMINATED
for t in runner.get_trials()):
runner.step()
runner.add_trial(
Trial(
"__fake",
trial_id="pending",
stopping_criterion={"training_iteration": 2}))
runner.step()
runner.step()
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
new_trials = runner2.get_trials()
self.assertEquals(len(new_trials), 3)
self.assertTrue(
runner2.get_trial("non_checkpoint").status == Trial.TERMINATED)
self.assertTrue(
runner2.get_trial("checkpoint").status == Trial.TERMINATED)
self.assertTrue(runner2.get_trial("pending").status == Trial.PENDING)
self.assertTrue(not runner2.get_trial("pending").last_result)
runner2.step()
shutil.rmtree(tmpdir)
def testCheckpointWithFunction(self):
ray.init()
trial = Trial(
"__fake",
config={"callbacks": {
"on_episode_start": lambda i: i,
}},
checkpoint_freq=1)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
runner.add_trial(trial)
for i in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
new_trial = runner2.get_trials()[0]
self.assertTrue("callbacks" in new_trial.config)
self.assertTrue("on_episode_start" in new_trial.config["callbacks"])
shutil.rmtree(tmpdir)
def testCheckpointOverwrite(self):
def count_checkpoints(cdir):
return sum((fname.startswith("experiment_state")
and fname.endswith(".json"))
for fname in os.listdir(cdir))
ray.init()
trial = Trial("__fake", checkpoint_freq=1)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
runner.add_trial(trial)
for i in range(5):
runner.step()
# force checkpoint
runner.checkpoint()
self.assertEquals(count_checkpoints(tmpdir), 1)
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
for i in range(5):
runner2.step()
self.assertEquals(count_checkpoints(tmpdir), 2)
runner2.checkpoint()
self.assertEquals(count_checkpoints(tmpdir), 2)
shutil.rmtree(tmpdir)
def testUserCheckpoint(self):
ray.init(num_cpus=3)
tmpdir = tempfile.mkdtemp()
runner = TrialRunner(local_checkpoint_dir=tmpdir, checkpoint_period=0)
runner.add_trial(Trial("__fake", config={"user_checkpoint_freq": 2}))
trials = runner.get_trials()
runner.step()
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(ray.get(trials[0].runner.set_info.remote(1)), 1)
runner.step() # 0
self.assertFalse(trials[0].has_checkpoint())
runner.step() # 1
self.assertFalse(trials[0].has_checkpoint())
runner.step() # 2
self.assertTrue(trials[0].has_checkpoint())
runner2 = TrialRunner(resume="LOCAL", local_checkpoint_dir=tmpdir)
runner2.step()
trials2 = runner2.get_trials()
self.assertEqual(ray.get(trials2[0].runner.get_info.remote()), 1)
shutil.rmtree(tmpdir)
class SearchAlgorithmTest(unittest.TestCase):
def testNestedSuggestion(self):
class TestSuggestion(SuggestionAlgorithm):
def _suggest(self, trial_id):
return {"a": {"b": {"c": {"d": 4, "e": 5}}}}
alg = TestSuggestion()
alg.add_configurations({"test": {"run": "__fake"}})
trial = alg.next_trials()[0]
self.assertTrue("e=5" in trial.experiment_tag)
self.assertTrue("d=4" in trial.experiment_tag)
class ResourcesTest(unittest.TestCase):
def testSubtraction(self):
resource_1 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
resource_2 = Resources(
1,
0,
0,
1,
custom_resources={
"a": 1,
"b": 2
},
extra_custom_resources={
"a": 1,
"b": 1
})
new_res = Resources.subtract(resource_1, resource_2)
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(all(k == 0 for k in new_res.custom_resources.values()))
self.assertTrue(
all(k == 0 for k in new_res.extra_custom_resources.values()))
def testDifferentResources(self):
resource_1 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
resource_2 = Resources(1, 0, 0, 1, custom_resources={"a": 1, "c": 2})
new_res = Resources.subtract(resource_1, resource_2)
assert "c" in new_res.custom_resources
assert "b" in new_res.custom_resources
self.assertTrue(new_res.cpu == 0)
self.assertTrue(new_res.gpu == 0)
self.assertTrue(new_res.extra_cpu == 0)
self.assertTrue(new_res.extra_gpu == 0)
self.assertTrue(new_res.get("a") == 0)
def testSerialization(self):
original = Resources(1, 0, 0, 1, custom_resources={"a": 1, "b": 2})
jsoned = resources_to_json(original)
new_resource = json_to_resources(jsoned)
self.assertEquals(original, new_resource)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_trial_scheduler.py
|
Python
|
import os
import json
import random
import unittest
import numpy as np
import sys
import tempfile
import shutil
from unittest.mock import MagicMock
import ray
from ray.tune.result import TRAINING_ITERATION
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
PopulationBasedTraining, MedianStoppingRule,
TrialScheduler, HyperBandForBOHB)
from ray.tune.schedulers.pbt import explore
from ray.tune.trial import Trial, Checkpoint
from ray.tune.trial_executor import TrialExecutor
from ray.tune.resources import Resources
from ray.rllib import _register_all
_register_all()
def result(t, rew):
return dict(
time_total_s=t, episode_reward_mean=rew, training_iteration=int(t))
def mock_trial_runner(trials=None):
trial_runner = MagicMock()
trial_runner.get_trials.return_value = trials or []
return trial_runner
class EarlyStoppingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, rule):
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
runner = mock_trial_runner()
for i in range(10):
r1 = result(i, i * 100)
print("basicSetup:", i)
self.assertEqual(
rule.on_trial_result(runner, t1, r1), TrialScheduler.CONTINUE)
for i in range(5):
r2 = result(i, 450)
self.assertEqual(
rule.on_trial_result(runner, t2, r2), TrialScheduler.CONTINUE)
return t1, t2
def testMedianStoppingConstantPerf(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
rule.on_trial_complete(runner, t1, result(10, 1000))
self.assertEqual(
rule.on_trial_result(runner, t2, result(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t2, result(6, 0)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t2, result(10, 450)),
TrialScheduler.STOP)
def testMedianStoppingOnCompleteOnly(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
self.assertEqual(
rule.on_trial_result(runner, t2, result(100, 0)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(runner, t1, result(101, 1000))
self.assertEqual(
rule.on_trial_result(runner, t2, result(101, 0)),
TrialScheduler.STOP)
def testMedianStoppingGracePeriod(self):
rule = MedianStoppingRule(grace_period=2.5, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
rule.on_trial_complete(runner, t1, result(10, 1000))
rule.on_trial_complete(runner, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(runner, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t3, result(3, 10)),
TrialScheduler.STOP)
def testMedianStoppingMinSamples(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=2)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
rule.on_trial_complete(runner, t1, result(10, 1000))
t3 = Trial("PPO")
# Insufficient samples to evaluate t3
self.assertEqual(
rule.on_trial_result(runner, t3, result(5, 10)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(runner, t2, result(5, 1000))
# Sufficient samples to evaluate t3
self.assertEqual(
rule.on_trial_result(runner, t3, result(5, 10)),
TrialScheduler.STOP)
def testMedianStoppingUsesMedian(self):
rule = MedianStoppingRule(grace_period=0, min_samples_required=1)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
rule.on_trial_complete(runner, t1, result(10, 1000))
rule.on_trial_complete(runner, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(runner, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t3, result(2, 260)),
TrialScheduler.STOP)
def testMedianStoppingSoftStop(self):
rule = MedianStoppingRule(
grace_period=0, min_samples_required=1, hard_stop=False)
t1, t2 = self.basicSetup(rule)
runner = mock_trial_runner()
rule.on_trial_complete(runner, t1, result(10, 1000))
rule.on_trial_complete(runner, t2, result(10, 1000))
t3 = Trial("PPO")
self.assertEqual(
rule.on_trial_result(runner, t3, result(1, 260)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t3, result(2, 260)),
TrialScheduler.PAUSE)
def _test_metrics(self, result_func, metric, mode):
rule = MedianStoppingRule(
grace_period=0,
min_samples_required=1,
time_attr="training_iteration",
metric=metric,
mode=mode)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
runner = mock_trial_runner()
for i in range(10):
self.assertEqual(
rule.on_trial_result(runner, t1, result_func(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
rule.on_trial_result(runner, t2, result_func(i, 450)),
TrialScheduler.CONTINUE)
rule.on_trial_complete(runner, t1, result_func(10, 1000))
self.assertEqual(
rule.on_trial_result(runner, t2, result_func(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
rule.on_trial_result(runner, t2, result_func(6, 0)),
TrialScheduler.CONTINUE)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
self._test_metrics(result2, "neg_mean_loss", "max")
def testAlternateMetricsMin(self):
def result2(t, rew):
return dict(training_iteration=t, mean_loss=-rew)
self._test_metrics(result2, "mean_loss", "min")
class _MockTrialExecutor(TrialExecutor):
def start_trial(self, trial, checkpoint_obj=None):
trial.logger_running = True
trial.restored_checkpoint = checkpoint_obj.value
trial.status = Trial.RUNNING
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
trial.status = Trial.ERROR if error else Trial.TERMINATED
if stop_logger:
trial.logger_running = False
def restore(self, trial, checkpoint=None):
pass
def save(self, trial, type=Checkpoint.PERSISTENT, result=None):
return trial.trainable_name
def reset_trial(self, trial, new_config, new_experiment_tag):
return False
class _MockTrialRunner():
def __init__(self, scheduler):
self._scheduler_alg = scheduler
self.trials = []
self.trial_executor = _MockTrialExecutor()
def process_action(self, trial, action):
if action == TrialScheduler.CONTINUE:
pass
elif action == TrialScheduler.PAUSE:
self._pause_trial(trial)
elif action == TrialScheduler.STOP:
self.trial_executor.stop_trial(trial)
def stop_trial(self, trial):
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
else:
self._scheduler_alg.on_trial_complete(self, trial, result(100, 10))
def add_trial(self, trial):
self.trials.append(trial)
self._scheduler_alg.on_trial_add(self, trial)
def get_trials(self):
return self.trials
def has_resources(self, resources):
return True
def _pause_trial(self, trial):
trial.status = Trial.PAUSED
def _launch_trial(self, trial):
trial.status = Trial.RUNNING
class HyperbandSuite(unittest.TestCase):
def setUp(self):
ray.init(object_store_memory=int(1e8))
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def schedulerSetup(self, num_trials, max_t=81):
"""Setup a scheduler and Runner with max Iter = 9.
Bracketing is placed as follows:
(5, 81);
(8, 27) -> (3, 54);
(15, 9) -> (5, 27) -> (2, 45);
(34, 3) -> (12, 9) -> (4, 27) -> (2, 42);
(81, 1) -> (27, 3) -> (9, 9) -> (3, 27) -> (1, 41);"""
sched = HyperBandScheduler(max_t=max_t)
for i in range(num_trials):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
return sched, runner
def default_statistics(self):
"""Default statistics for HyperBand."""
sched = HyperBandScheduler()
res = {
str(s): {
"n": sched._get_n0(s),
"r": sched._get_r0(s)
}
for s in range(sched._s_max_1)
}
res["max_trials"] = sum(v["n"] for v in res.values())
res["brack_count"] = sched._s_max_1
res["s_max"] = sched._s_max_1 - 1
return res
def downscale(self, n, sched):
return int(np.ceil(n / sched._eta))
def basicSetup(self):
"""Setup and verify full band."""
stats = self.default_statistics()
sched, _ = self.schedulerSetup(stats["max_trials"])
self.assertEqual(len(sched._hyperbands), 1)
self.assertEqual(sched._cur_band_filled(), True)
filled_band = sched._hyperbands[0]
for bracket in filled_band:
self.assertEqual(bracket.filled(), True)
return sched
def advancedSetup(self):
sched = self.basicSetup()
for i in range(4):
t = Trial("__fake")
sched.on_trial_add(None, t)
self.assertEqual(sched._cur_band_filled(), False)
unfilled_band = sched._hyperbands[-1]
self.assertEqual(len(unfilled_band), 2)
bracket = unfilled_band[-1]
self.assertEqual(bracket.filled(), False)
self.assertEqual(len(bracket.current_trials()), 7)
return sched
def testConfigSameEta(self):
sched = HyperBandScheduler()
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 5)
self.assertEqual(sched._hyperbands[0][0]._n, 5)
self.assertEqual(sched._hyperbands[0][0]._r, 81)
self.assertEqual(sched._hyperbands[0][-1]._n, 81)
self.assertEqual(sched._hyperbands[0][-1]._r, 1)
reduction_factor = 10
sched = HyperBandScheduler(
max_t=1000, reduction_factor=reduction_factor)
i = 0
while not sched._cur_band_filled():
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 4)
self.assertEqual(sched._hyperbands[0][0]._n, 4)
self.assertEqual(sched._hyperbands[0][0]._r, 1000)
self.assertEqual(sched._hyperbands[0][-1]._n, 1000)
self.assertEqual(sched._hyperbands[0][-1]._r, 1)
def testConfigSameEtaSmall(self):
sched = HyperBandScheduler(max_t=1)
i = 0
while len(sched._hyperbands) < 2:
t = Trial("__fake")
sched.on_trial_add(None, t)
i += 1
self.assertEqual(len(sched._hyperbands[0]), 1)
def testSuccessiveHalving(self):
"""Setup full band, then iterate through last bracket (n=81)
to make sure successive halving is correct."""
stats = self.default_statistics()
sched, mock_runner = self.schedulerSetup(stats["max_trials"])
big_bracket = sched._state["bracket"]
cur_units = stats[str(stats["s_max"])]["r"]
# The last bracket will downscale 4 times
for x in range(stats["brack_count"] - 1):
trials = big_bracket.current_trials()
current_length = len(trials)
for trl in trials:
mock_runner._launch_trial(trl)
# Provides results from 0 to 8 in order, keeping last one running
for i, trl in enumerate(trials):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
if i < current_length - 1:
self.assertEqual(action, TrialScheduler.PAUSE)
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.CONTINUE)
new_length = len(big_bracket.current_trials())
self.assertEqual(new_length, self.downscale(current_length, sched))
cur_units = int(cur_units * sched._eta)
self.assertEqual(len(big_bracket.current_trials()), 1)
def testHalvingStop(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# # Provides result in reverse order, killing the last one
cur_units = stats[str(1)]["r"]
for i, trl in reversed(list(enumerate(big_bracket.current_trials()))):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testStopsLastOne(self):
stats = self.default_statistics()
num_trials = stats[str(0)]["n"] # setup one bracket
sched, mock_runner = self.schedulerSetup(num_trials)
big_bracket = sched._state["bracket"]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# # Provides result in reverse order, killing the last one
cur_units = stats[str(0)]["r"]
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(mock_runner, trl,
result(cur_units, i))
mock_runner.process_action(trl, action)
self.assertEqual(action, TrialScheduler.STOP)
def testTrialErrored(self):
"""If a trial errored, make sure successive halving still happens"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_error(mock_runner, t3)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialErrored2(self):
"""Check successive halving happened even when last trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_error(mock_runner, trials[-1])
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testTrialEndedEarly(self):
"""Check successive halving happened even when one trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 3
sched, mock_runner = self.schedulerSetup(trial_count)
t1, t2, t3 = sched._state["bracket"].current_trials()
for t in [t1, t2, t3]:
mock_runner._launch_trial(t)
sched.on_trial_complete(mock_runner, t3, result(1, 12))
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t1,
result(stats[str(1)]["r"], 10)))
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t2,
result(stats[str(1)]["r"], 10)))
def testTrialEndedEarly2(self):
"""Check successive halving happened even when last trial failed"""
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + stats[str(1)]["n"]
sched, mock_runner = self.schedulerSetup(trial_count)
trials = sched._state["bracket"].current_trials()
for t in trials[:-1]:
mock_runner._launch_trial(t)
sched.on_trial_result(mock_runner, t, result(
stats[str(1)]["r"], 10))
mock_runner._launch_trial(trials[-1])
sched.on_trial_complete(mock_runner, trials[-1], result(100, 12))
self.assertEqual(
len(sched._state["bracket"].current_trials()),
self.downscale(stats[str(1)]["n"], sched))
def testAddAfterHalving(self):
stats = self.default_statistics()
trial_count = stats[str(0)]["n"] + 1
sched, mock_runner = self.schedulerSetup(trial_count)
bracket_trials = sched._state["bracket"].current_trials()
init_units = stats[str(1)]["r"]
for t in bracket_trials:
mock_runner._launch_trial(t)
for i, t in enumerate(bracket_trials):
action = sched.on_trial_result(mock_runner, t, result(
init_units, i))
self.assertEqual(action, TrialScheduler.CONTINUE)
t = Trial("__fake")
sched.on_trial_add(None, t)
mock_runner._launch_trial(t)
self.assertEqual(len(sched._state["bracket"].current_trials()), 2)
# Make sure that newly added trial gets fair computation (not just 1)
self.assertEqual(
TrialScheduler.CONTINUE,
sched.on_trial_result(mock_runner, t, result(init_units, 12)))
new_units = init_units + int(init_units * sched._eta)
self.assertEqual(
TrialScheduler.PAUSE,
sched.on_trial_result(mock_runner, t, result(new_units, 12)))
def _test_metrics(self, result_func, metric, mode):
sched = HyperBandScheduler(
time_attr="time_total_s", metric=metric, mode=mode)
stats = self.default_statistics()
for i in range(stats["max_trials"]):
t = Trial("__fake")
sched.on_trial_add(None, t)
runner = _MockTrialRunner(sched)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
runner._launch_trial(trl)
current_length = len(big_bracket.current_trials())
# Provides results from 0 to 8 in order, keeping the last one running
for i, trl in enumerate(big_bracket.current_trials()):
action = sched.on_trial_result(runner, trl, result_func(1, i))
runner.process_action(trl, action)
new_length = len(big_bracket.current_trials())
self.assertEqual(action, TrialScheduler.CONTINUE)
self.assertEqual(new_length, self.downscale(current_length, sched))
def testAlternateMetrics(self):
"""Checking that alternate metrics will pass."""
def result2(t, rew):
return dict(time_total_s=t, neg_mean_loss=rew)
self._test_metrics(result2, "neg_mean_loss", "max")
def testAlternateMetricsMin(self):
"""Checking that alternate metrics will pass."""
def result2(t, rew):
return dict(time_total_s=t, mean_loss=-rew)
self._test_metrics(result2, "mean_loss", "min")
def testJumpingTime(self):
sched, mock_runner = self.schedulerSetup(81)
big_bracket = sched._hyperbands[0][-1]
for trl in big_bracket.current_trials():
mock_runner._launch_trial(trl)
# Provides results from 0 to 8 in order, keeping the last one running
main_trials = big_bracket.current_trials()[:-1]
jump = big_bracket.current_trials()[-1]
for i, trl in enumerate(main_trials):
action = sched.on_trial_result(mock_runner, trl, result(1, i))
mock_runner.process_action(trl, action)
action = sched.on_trial_result(mock_runner, jump, result(4, i))
self.assertEqual(action, TrialScheduler.PAUSE)
current_length = len(big_bracket.current_trials())
self.assertLess(current_length, 27)
def testRemove(self):
"""Test with 4: start 1, remove 1 pending, add 2, remove 1 pending."""
sched, runner = self.schedulerSetup(4)
trials = sorted(sched._trial_info, key=lambda t: t.trial_id)
runner._launch_trial(trials[0])
sched.on_trial_result(runner, trials[0], result(1, 5))
self.assertEqual(trials[0].status, Trial.RUNNING)
self.assertEqual(trials[1].status, Trial.PENDING)
bracket, _ = sched._trial_info[trials[1]]
self.assertTrue(trials[1] in bracket._live_trials)
sched.on_trial_remove(runner, trials[1])
self.assertFalse(trials[1] in bracket._live_trials)
for i in range(2):
trial = Trial("__fake")
sched.on_trial_add(None, trial)
bracket, _ = sched._trial_info[trial]
self.assertTrue(trial in bracket._live_trials)
sched.on_trial_remove(runner, trial) # where trial is not running
self.assertFalse(trial in bracket._live_trials)
def testFilterNoneBracket(self):
sched, runner = self.schedulerSetup(100, 20)
# "sched" should contains None brackets
non_brackets = [
b for hyperband in sched._hyperbands for b in hyperband
if b is None
]
self.assertTrue(non_brackets)
# Make sure "choose_trial_to_run" still works
trial = sched.choose_trial_to_run(runner)
self.assertIsNotNone(trial)
class BOHBSuite(unittest.TestCase):
def setUp(self):
ray.init(object_store_memory=int(1e8))
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def testLargestBracketFirst(self):
sched = HyperBandForBOHB(max_t=3, reduction_factor=3)
runner = _MockTrialRunner(sched)
for i in range(3):
t = Trial("__fake")
sched.on_trial_add(runner, t)
runner._launch_trial(t)
self.assertEqual(sched.state()["num_brackets"], 1)
sched.on_trial_add(runner, Trial("__fake"))
self.assertEqual(sched.state()["num_brackets"], 2)
def testCheckTrialInfoUpdate(self):
def result(score, ts):
return {"episode_reward_mean": score, TRAINING_ITERATION: ts}
sched = HyperBandForBOHB(max_t=3, reduction_factor=3)
runner = _MockTrialRunner(sched)
runner._search_alg = MagicMock()
trials = [Trial("__fake") for i in range(3)]
for t in trials:
runner.add_trial(t)
runner._launch_trial(t)
for trial, trial_result in zip(trials, [result(1, 1), result(2, 1)]):
decision = sched.on_trial_result(runner, trial, trial_result)
self.assertEqual(decision, TrialScheduler.PAUSE)
runner._pause_trial(trial)
spy_result = result(0, 1)
decision = sched.on_trial_result(runner, trials[-1], spy_result)
self.assertEqual(decision, TrialScheduler.STOP)
sched.choose_trial_to_run(runner)
self.assertEqual(runner._search_alg.on_pause.call_count, 2)
self.assertEqual(runner._search_alg.on_unpause.call_count, 1)
self.assertTrue("hyperband_info" in spy_result)
self.assertEquals(spy_result["hyperband_info"]["budget"], 1)
def testCheckTrialInfoUpdateMin(self):
def result(score, ts):
return {"episode_reward_mean": score, TRAINING_ITERATION: ts}
sched = HyperBandForBOHB(max_t=3, reduction_factor=3, mode="min")
runner = _MockTrialRunner(sched)
runner._search_alg = MagicMock()
trials = [Trial("__fake") for i in range(3)]
for t in trials:
runner.add_trial(t)
runner._launch_trial(t)
for trial, trial_result in zip(trials, [result(1, 1), result(2, 1)]):
decision = sched.on_trial_result(runner, trial, trial_result)
self.assertEqual(decision, TrialScheduler.PAUSE)
runner._pause_trial(trial)
spy_result = result(0, 1)
decision = sched.on_trial_result(runner, trials[-1], spy_result)
self.assertEqual(decision, TrialScheduler.CONTINUE)
sched.choose_trial_to_run(runner)
self.assertEqual(runner._search_alg.on_pause.call_count, 2)
self.assertTrue("hyperband_info" in spy_result)
self.assertEquals(spy_result["hyperband_info"]["budget"], 1)
class _MockTrial(Trial):
def __init__(self, i, config):
self.trainable_name = "trial_{}".format(i)
self.trial_id = Trial.generate_id()
self.config = config
self.experiment_tag = "{}tag".format(i)
self.trial_name_creator = None
self.logger_running = False
self.restored_checkpoint = None
self.resources = Resources(1, 0)
self.custom_trial_name = None
class PopulationBasedTestingSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self,
resample_prob=0.0,
explore=None,
perturbation_interval=10,
log_config=False,
step_once=True):
pbt = PopulationBasedTraining(
time_attr="training_iteration",
perturbation_interval=perturbation_interval,
resample_probability=resample_prob,
quantile_fraction=0.25,
hyperparam_mutations={
"id_factor": [100],
"float_factor": lambda: 100.0,
"int_factor": lambda: 10,
},
custom_explore_fn=explore,
log_config=log_config)
runner = _MockTrialRunner(pbt)
for i in range(5):
trial = _MockTrial(
i, {
"id_factor": i,
"float_factor": 2.0,
"const_factor": 3,
"int_factor": 10
})
runner.add_trial(trial)
trial.status = Trial.RUNNING
if step_once:
self.assertEqual(
pbt.on_trial_result(runner, trial, result(10, 50 * i)),
TrialScheduler.CONTINUE)
pbt.reset_stats()
return pbt, runner
def testCheckpointsMostPromisingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
# no checkpoint: haven't hit next perturbation interval yet
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 0)
# checkpoint: both past interval and upper quantile
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 50, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 1)
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(30, 201)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [200, 201, 100, 150, 200])
self.assertEqual(pbt._num_checkpoints, 2)
# not upper quantile any more
self.assertEqual(
pbt.on_trial_result(runner, trials[4], result(30, 199)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt._num_checkpoints, 2)
self.assertEqual(pbt._num_perturbations, 0)
def testPerturbsLowPerformingTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
# no perturbation: haven't hit next perturbation interval
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(15, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [0, 50, 100, 150, 200])
self.assertTrue("@perturbed" not in trials[0].experiment_tag)
self.assertEqual(pbt._num_perturbations, 0)
# perturb since it's lower quantile
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 100, 150, 200])
self.assertTrue("@perturbed" in trials[0].experiment_tag)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(pbt._num_perturbations, 1)
# also perturbed
self.assertEqual(
pbt.on_trial_result(runner, trials[2], result(20, 40)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt.last_scores(trials), [-100, 50, 40, 150, 200])
self.assertEqual(pbt._num_perturbations, 2)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertTrue("@perturbed" in trials[2].experiment_tag)
def testPerturbWithoutResample(self):
pbt, runner = self.basicSetup(resample_prob=0.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertIn(trials[0].config["id_factor"], [100])
self.assertIn(trials[0].config["float_factor"], [2.4, 1.6])
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertIn(trials[0].config["int_factor"], [8, 12])
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbWithResample(self):
pbt, runner = self.basicSetup(resample_prob=1.0)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertIn(trials[0].restored_checkpoint, ["trial_3", "trial_4"])
self.assertEqual(trials[0].config["id_factor"], 100)
self.assertEqual(trials[0].config["float_factor"], 100.0)
self.assertEqual(type(trials[0].config["float_factor"]), float)
self.assertEqual(trials[0].config["int_factor"], 10)
self.assertEqual(type(trials[0].config["int_factor"]), int)
self.assertEqual(trials[0].config["const_factor"], 3)
def testPerturbationValues(self):
def assertProduces(fn, values):
random.seed(0)
seen = set()
for _ in range(100):
seen.add(fn()["v"])
self.assertEqual(seen, values)
# Categorical case
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 8})
assertProduces(
lambda: explore({"v": 3}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4})
assertProduces(
lambda: explore({"v": 10}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{8, 10})
assertProduces(
lambda: explore({"v": 7}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
{3, 4, 8, 10})
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 1.0, lambda x: x),
{3, 4, 8, 10})
# Continuous case
assertProduces(
lambda: explore({"v": 100}, {
"v": lambda: random.choice([10, 100])
}, 0.0, lambda x: x), {80, 120})
assertProduces(
lambda: explore({"v": 100.0}, {
"v": lambda: random.choice([10, 100])
}, 0.0, lambda x: x), {80.0, 120.0})
assertProduces(
lambda: explore({"v": 100.0}, {
"v": lambda: random.choice([10, 100])
}, 1.0, lambda x: x), {10.0, 100.0})
def deep_add(seen, new_values):
for k, new_value in new_values.items():
if isinstance(new_value, dict):
if k not in seen:
seen[k] = {}
seen[k].update(deep_add(seen[k], new_value))
else:
if k not in seen:
seen[k] = set()
seen[k].add(new_value)
return seen
def assertNestedProduces(fn, values):
random.seed(0)
seen = {}
for _ in range(100):
new_config = fn()
seen = deep_add(seen, new_config)
self.assertEqual(seen, values)
# Nested mutation and spec
assertNestedProduces(
lambda: explore({
"a": {
"b": 4
},
"1": {
"2": {
"3": 100
}
},
}, {
"a": {
"b": [3, 4, 8, 10]
},
"1": {
"2": {
"3": lambda: random.choice([10, 100])
}
},
}, 0.0, lambda x: x), {
"a": {
"b": {3, 8}
},
"1": {
"2": {
"3": {80, 120}
}
},
})
custom_explore_fn = MagicMock(side_effect=lambda x: x)
# Nested mutation and spec
assertNestedProduces(
lambda: explore({
"a": {
"b": 4
},
"1": {
"2": {
"3": 100
}
},
}, {
"a": {
"b": [3, 4, 8, 10]
},
"1": {
"2": {
"3": lambda: random.choice([10, 100])
}
},
}, 0.0, custom_explore_fn), {
"a": {
"b": {3, 8}
},
"1": {
"2": {
"3": {80, 120}
}
},
})
# Expect call count to be 100 because we call explore 100 times
self.assertEqual(custom_explore_fn.call_count, 100)
def testYieldsTimeToOtherTrials(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
trials[0].status = Trial.PENDING # simulate not enough resources
self.assertEqual(
pbt.on_trial_result(runner, trials[1], result(20, 1000)),
TrialScheduler.PAUSE)
self.assertEqual(pbt.last_scores(trials), [0, 1000, 100, 150, 200])
self.assertEqual(pbt.choose_trial_to_run(runner), trials[0])
def testSchedulesMostBehindTrialToRun(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(800, 1000))
pbt.on_trial_result(runner, trials[1], result(700, 1001))
pbt.on_trial_result(runner, trials[2], result(600, 1002))
pbt.on_trial_result(runner, trials[3], result(500, 1003))
pbt.on_trial_result(runner, trials[4], result(700, 1004))
self.assertEqual(pbt.choose_trial_to_run(runner), None)
for i in range(5):
trials[i].status = Trial.PENDING
self.assertEqual(pbt.choose_trial_to_run(runner), trials[3])
def testPerturbationResetsLastPerturbTime(self):
pbt, runner = self.basicSetup()
trials = runner.get_trials()
pbt.on_trial_result(runner, trials[0], result(10000, 1005))
pbt.on_trial_result(runner, trials[1], result(10000, 1004))
pbt.on_trial_result(runner, trials[2], result(600, 1003))
self.assertEqual(pbt._num_perturbations, 0)
pbt.on_trial_result(runner, trials[3], result(500, 1002))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(600, 100))
self.assertEqual(pbt._num_perturbations, 1)
pbt.on_trial_result(runner, trials[3], result(11000, 100))
self.assertEqual(pbt._num_perturbations, 2)
def testLogConfig(self):
def check_policy(policy):
self.assertIsInstance(policy[2], int)
self.assertIsInstance(policy[3], int)
self.assertIn(policy[0], ["0tag", "2tag", "3tag", "4tag"])
self.assertIn(policy[1], ["0tag", "2tag", "3tag", "4tag"])
self.assertIn(policy[2], [0, 2, 3, 4])
self.assertIn(policy[3], [0, 2, 3, 4])
for i in [4, 5]:
self.assertIsInstance(policy[i], dict)
for key in [
"const_factor", "int_factor", "float_factor",
"id_factor"
]:
self.assertIn(key, policy[i])
self.assertIsInstance(policy[i]["float_factor"], float)
self.assertIsInstance(policy[i]["int_factor"], int)
self.assertIn(policy[i]["const_factor"], [3])
self.assertIn(policy[i]["int_factor"], [8, 10, 12])
self.assertIn(policy[i]["float_factor"], [2.4, 2, 1.6])
self.assertIn(policy[i]["id_factor"], [3, 4, 100])
pbt, runner = self.basicSetup(log_config=True)
trials = runner.get_trials()
tmpdir = tempfile.mkdtemp()
for i, trial in enumerate(trials):
trial.local_dir = tmpdir
trial.last_result = {TRAINING_ITERATION: i}
pbt.on_trial_result(runner, trials[0], result(15, -100))
pbt.on_trial_result(runner, trials[0], result(20, -100))
pbt.on_trial_result(runner, trials[2], result(20, 40))
log_files = ["pbt_global.txt", "pbt_policy_0.txt", "pbt_policy_2.txt"]
for log_file in log_files:
self.assertTrue(os.path.exists(os.path.join(tmpdir, log_file)))
raw_policy = open(os.path.join(tmpdir, log_file), "r").readlines()
for line in raw_policy:
check_policy(json.loads(line))
shutil.rmtree(tmpdir)
def testPostprocessingHook(self):
def explore(new_config):
new_config["id_factor"] = 42
new_config["float_factor"] = 43
return new_config
pbt, runner = self.basicSetup(resample_prob=0.0, explore=explore)
trials = runner.get_trials()
self.assertEqual(
pbt.on_trial_result(runner, trials[0], result(20, -100)),
TrialScheduler.CONTINUE)
self.assertEqual(trials[0].config["id_factor"], 42)
self.assertEqual(trials[0].config["float_factor"], 43)
def testFastPerturb(self):
pbt, runner = self.basicSetup(
perturbation_interval=1, step_once=False, log_config=True)
trials = runner.get_trials()
tmpdir = tempfile.mkdtemp()
for i, trial in enumerate(trials):
trial.local_dir = tmpdir
trial.last_result = {}
pbt.on_trial_result(runner, trials[0], result(1, 10))
self.assertEqual(
pbt.on_trial_result(runner, trials[2], result(1, 200)),
TrialScheduler.CONTINUE)
self.assertEqual(pbt._num_checkpoints, 1)
pbt._exploit(runner.trial_executor, trials[1], trials[2])
shutil.rmtree(tmpdir)
class AsyncHyperBandSuite(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def basicSetup(self, scheduler):
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result(i, 450)),
TrialScheduler.CONTINUE)
return t1, t2
def testAsyncHBOnComplete(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=1)
t1, t2 = self.basicSetup(scheduler)
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
scheduler.on_trial_complete(None, t3, result(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result(101, 0)),
TrialScheduler.STOP)
def testAsyncHBGracePeriod(self):
scheduler = AsyncHyperBandScheduler(
grace_period=2.5, reduction_factor=3, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 10)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(3, 10)),
TrialScheduler.STOP)
def testAsyncHBAllCompletes(self):
scheduler = AsyncHyperBandScheduler(max_t=10, brackets=10)
trials = [Trial("PPO") for i in range(10)]
for t in trials:
scheduler.on_trial_add(None, t)
for t in trials:
self.assertEqual(
scheduler.on_trial_result(None, t, result(10, -2)),
TrialScheduler.STOP)
def testAsyncHBUsesPercentile(self):
scheduler = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2, brackets=1)
t1, t2 = self.basicSetup(scheduler)
scheduler.on_trial_complete(None, t1, result(10, 1000))
scheduler.on_trial_complete(None, t2, result(10, 1000))
t3 = Trial("PPO")
scheduler.on_trial_add(None, t3)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(1, 260)),
TrialScheduler.STOP)
self.assertEqual(
scheduler.on_trial_result(None, t3, result(2, 260)),
TrialScheduler.STOP)
def _test_metrics(self, result_func, metric, mode):
scheduler = AsyncHyperBandScheduler(
grace_period=1,
time_attr="training_iteration",
metric=metric,
mode=mode,
brackets=1)
t1 = Trial("PPO") # mean is 450, max 900, t_max=10
t2 = Trial("PPO") # mean is 450, max 450, t_max=5
scheduler.on_trial_add(None, t1)
scheduler.on_trial_add(None, t2)
for i in range(10):
self.assertEqual(
scheduler.on_trial_result(None, t1, result_func(i, i * 100)),
TrialScheduler.CONTINUE)
for i in range(5):
self.assertEqual(
scheduler.on_trial_result(None, t2, result_func(i, 450)),
TrialScheduler.CONTINUE)
scheduler.on_trial_complete(None, t1, result_func(10, 1000))
self.assertEqual(
scheduler.on_trial_result(None, t2, result_func(5, 450)),
TrialScheduler.CONTINUE)
self.assertEqual(
scheduler.on_trial_result(None, t2, result_func(6, 0)),
TrialScheduler.CONTINUE)
def testAlternateMetrics(self):
def result2(t, rew):
return dict(training_iteration=t, neg_mean_loss=rew)
self._test_metrics(result2, "neg_mean_loss", "max")
def testAlternateMetricsMin(self):
def result2(t, rew):
return dict(training_iteration=t, mean_loss=-rew)
self._test_metrics(result2, "mean_loss", "min")
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_tune_restore.py
|
Python
|
# coding: utf-8
import os
import shutil
import tempfile
import unittest
import skopt
import numpy as np
from hyperopt import hp
from nevergrad.optimization import optimizerlib
import ray
from ray import tune
from ray.test_utils import recursive_fnmatch
from ray.rllib import _register_all
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune.suggest.bayesopt import BayesOptSearch
from ray.tune.suggest.skopt import SkOptSearch
from ray.tune.suggest.nevergrad import NevergradSearch
from ray.tune.suggest.sigopt import SigOptSearch
from ray.tune.utils import validate_save_restore
class TuneRestoreTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=True)
tmpdir = tempfile.mkdtemp()
test_name = "TuneRestoreTest"
tune.run(
"PG",
name=test_name,
stop={"training_iteration": 1},
checkpoint_freq=1,
local_dir=tmpdir,
config={
"env": "CartPole-v0",
},
)
logdir = os.path.expanduser(os.path.join(tmpdir, test_name))
self.logdir = logdir
self.checkpoint_path = recursive_fnmatch(logdir, "checkpoint-1")[0]
def tearDown(self):
shutil.rmtree(self.logdir)
ray.shutdown()
_register_all()
def testTuneRestore(self):
self.assertTrue(os.path.isfile(self.checkpoint_path))
tune.run(
"PG",
name="TuneRestoreTest",
stop={"training_iteration": 2}, # train one more iteration.
checkpoint_freq=1,
restore=self.checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
},
)
class TuneExampleTest(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all()
def testPBTKeras(self):
from ray.tune.examples.pbt_tune_cifar10_with_keras import Cifar10Model
from tensorflow.python.keras.datasets import cifar10
cifar10.load_data()
validate_save_restore(Cifar10Model)
validate_save_restore(Cifar10Model, use_object_store=True)
def testPyTorchMNIST(self):
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
from torchvision import datasets
datasets.MNIST("~/data", train=True, download=True)
validate_save_restore(TrainMNIST)
validate_save_restore(TrainMNIST, use_object_store=True)
def testLogging(self):
from ray.tune.examples.logging_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testHyperbandExample(self):
from ray.tune.examples.hyperband_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
def testAsyncHyperbandExample(self):
from ray.tune.examples.async_hyperband_example import MyTrainableClass
validate_save_restore(MyTrainableClass)
validate_save_restore(MyTrainableClass, use_object_store=True)
class AutoInitTest(unittest.TestCase):
def testTuneRestore(self):
self.assertFalse(ray.is_initialized())
tune.run(
"__fake",
name="TestAutoInit",
stop={"training_iteration": 1},
ray_auto_init=True)
self.assertTrue(ray.is_initialized())
def tearDown(self):
ray.shutdown()
_register_all()
class AbstractWarmStartTest:
def setUp(self):
ray.init(local_mode=True)
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
ray.shutdown()
_register_all()
def set_basic_conf(self):
raise NotImplementedError()
def run_exp_1(self):
np.random.seed(162)
search_alg, cost = self.set_basic_conf()
results_exp_1 = tune.run(cost, num_samples=15, search_alg=search_alg)
self.log_dir = os.path.join(self.tmpdir, "warmStartTest.pkl")
search_alg.save(self.log_dir)
return results_exp_1
def run_exp_2(self):
search_alg2, cost = self.set_basic_conf()
search_alg2.restore(self.log_dir)
return tune.run(cost, num_samples=15, search_alg=search_alg2)
def run_exp_3(self):
np.random.seed(162)
search_alg3, cost = self.set_basic_conf()
return tune.run(cost, num_samples=30, search_alg=search_alg3)
def testWarmStart(self):
results_exp_1 = self.run_exp_1()
results_exp_2 = self.run_exp_2()
results_exp_3 = self.run_exp_3()
trials_1_config = [trial.config for trial in results_exp_1.trials]
trials_2_config = [trial.config for trial in results_exp_2.trials]
trials_3_config = [trial.config for trial in results_exp_3.trials]
self.assertEqual(trials_1_config + trials_2_config, trials_3_config)
class HyperoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {
"x": hp.uniform("x", 0, 10),
"y": hp.uniform("y", -10, 10),
"z": hp.uniform("z", -10, 0)
}
def cost(space, reporter):
loss = space["x"]**2 + space["y"]**2 + space["z"]**2
reporter(loss=loss)
search_alg = HyperOptSearch(
space,
max_concurrent=1,
metric="loss",
mode="min",
random_state_seed=5)
return search_alg, cost
class BayesoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = {"width": (0, 20), "height": (-100, 100)}
def cost(space, reporter):
reporter(loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = BayesOptSearch(
space,
max_concurrent=1,
metric="loss",
mode="min",
utility_kwargs={
"kind": "ucb",
"kappa": 2.5,
"xi": 0.0
})
return search_alg, cost
class SkoptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
optimizer = skopt.Optimizer([(0, 20), (-100, 100)])
previously_run_params = [[10, 0], [15, -20]]
known_rewards = [-189, -1144]
def cost(space, reporter):
reporter(loss=(space["height"]**2 + space["width"]**2))
search_alg = SkOptSearch(
optimizer, ["width", "height"],
max_concurrent=1,
metric="loss",
mode="min",
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
return search_alg, cost
class NevergradWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
instrumentation = 2
parameter_names = ["height", "width"]
optimizer = optimizerlib.OnePlusOne(instrumentation)
def cost(space, reporter):
reporter(
mean_loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = NevergradSearch(
optimizer,
parameter_names,
max_concurrent=1,
metric="mean_loss",
mode="min")
return search_alg, cost
class SigOptWarmStartTest(AbstractWarmStartTest, unittest.TestCase):
def set_basic_conf(self):
space = [
{
"name": "width",
"type": "int",
"bounds": {
"min": 0,
"max": 20
},
},
{
"name": "height",
"type": "int",
"bounds": {
"min": -100,
"max": 100
},
},
]
def cost(space, reporter):
reporter(
mean_loss=(space["height"] - 14)**2 - abs(space["width"] - 3))
search_alg = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="mean_loss",
mode="min")
return search_alg, cost
def testWarmStart(self):
if ("SIGOPT_KEY" not in os.environ):
return
super().testWarmStart()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_tune_save_restore.py
|
Python
|
# coding: utf-8
import os
import pickle
import shutil
import tempfile
import unittest
import ray
from ray import tune
from ray.rllib import _register_all
from ray.tune import Trainable
class SerialTuneRelativeLocalDirTest(unittest.TestCase):
local_mode = True
prefix = "Serial"
class MockTrainable(Trainable):
_name = "MockTrainable"
def _setup(self, config):
self.state = {"hi": 1}
def _train(self):
return {"timesteps_this_iter": 1, "done": True}
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(
checkpoint_dir, "checkpoint-{}".format(self._iteration))
with open(checkpoint_path, "wb") as f:
pickle.dump(self.state, f)
return checkpoint_path
def _restore(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
extra_data = pickle.load(f)
self.state.update(extra_data)
def setUp(self):
ray.init(num_cpus=1, num_gpus=0, local_mode=self.local_mode)
def tearDown(self):
shutil.rmtree(self.absolute_local_dir, ignore_errors=True)
self.absolute_local_dir = None
ray.shutdown()
# Without this line, test_tune_server.testAddTrial would fail.
_register_all()
def _get_trial_dir(self, absoulte_exp_dir):
trial_dirname = next(
(child_dir for child_dir in os.listdir(absoulte_exp_dir)
if (os.path.isdir(os.path.join(absoulte_exp_dir, child_dir))
and child_dir.startswith(self.MockTrainable._name))))
trial_absolute_dir = os.path.join(absoulte_exp_dir, trial_dirname)
return trial_dirname, trial_absolute_dir
def _train(self, exp_name, local_dir, absolute_local_dir):
trial, = tune.run(
self.MockTrainable,
name=exp_name,
stop={
"training_iteration": 1
},
checkpoint_freq=1,
local_dir=local_dir,
config={
"env": "CartPole-v0",
"log_level": "DEBUG"
}).trials
exp_dir = os.path.join(absolute_local_dir, exp_name)
_, abs_trial_dir = self._get_trial_dir(exp_dir)
self.assertIsNone(trial.error_file)
self.assertEqual(trial.local_dir, exp_dir)
self.assertEqual(trial.logdir, abs_trial_dir)
self.assertTrue(os.path.isdir(absolute_local_dir), absolute_local_dir)
self.assertTrue(os.path.isdir(exp_dir))
self.assertTrue(os.path.isdir(abs_trial_dir))
self.assertTrue(
os.path.isfile(
os.path.join(abs_trial_dir, "checkpoint_1/checkpoint-1")))
def _restore(self, exp_name, local_dir, absolute_local_dir):
trial_name, abs_trial_dir = self._get_trial_dir(
os.path.join(absolute_local_dir, exp_name))
checkpoint_path = os.path.join(
local_dir, exp_name, trial_name,
"checkpoint_1/checkpoint-1") # Relative checkpoint path
# The file tune would find. The absolute checkpoint path.
tune_find_file = os.path.abspath(os.path.expanduser(checkpoint_path))
self.assertTrue(
os.path.isfile(tune_find_file),
"{} is not exist!".format(tune_find_file))
trial, = tune.run(
self.MockTrainable,
name=exp_name,
stop={
"training_iteration": 2
}, # train one more iteration.
restore=checkpoint_path, # Restore the checkpoint
config={
"env": "CartPole-v0",
"log_level": "DEBUG"
}).trials
self.assertIsNone(trial.error_file)
def testDottedRelativePath(self):
local_dir = "./test_dotted_relative_local_dir"
exp_name = self.prefix + "DottedRelativeLocalDir"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testRelativePath(self):
local_dir = "test_relative_local_dir"
exp_name = self.prefix + "RelativePath"
absolute_local_dir = os.path.abspath(local_dir)
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTildeAbsolutePath(self):
local_dir = "~/test_tilde_absolute_local_dir"
exp_name = self.prefix + "TildeAbsolutePath"
absolute_local_dir = os.path.abspath(os.path.expanduser(local_dir))
self.absolute_local_dir = absolute_local_dir
self.assertFalse(os.path.exists(absolute_local_dir))
self._train(exp_name, local_dir, absolute_local_dir)
self._restore(exp_name, local_dir, absolute_local_dir)
def testTempfile(self):
local_dir = tempfile.mkdtemp()
exp_name = self.prefix + "Tempfile"
self.absolute_local_dir = local_dir
self._train(exp_name, local_dir, local_dir)
self._restore(exp_name, local_dir, local_dir)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_tune_server.py
|
Python
|
import unittest
import socket
import subprocess
import json
import ray
from ray.rllib import _register_all
from ray.tune.trial import Trial, Resources
from ray.tune.web_server import TuneClient
from ray.tune.trial_runner import TrialRunner
def get_valid_port():
port = 4321
while True:
try:
print("Trying port", port)
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
return port
class TuneServerSuite(unittest.TestCase):
def basicSetup(self):
ray.init(num_cpus=4, num_gpus=1)
port = get_valid_port()
self.runner = TrialRunner(launch_web_server=True, server_port=port)
runner = self.runner
kwargs = {
"stopping_criterion": {
"training_iteration": 3
},
"resources": Resources(cpu=1, gpu=1),
}
trials = [Trial("__fake", **kwargs), Trial("__fake", **kwargs)]
for t in trials:
runner.add_trial(t)
client = TuneClient("localhost", port)
return runner, client
def tearDown(self):
print("Tearing down....")
try:
self.runner._server.shutdown()
self.runner = None
except Exception as e:
print(e)
ray.shutdown()
_register_all()
def testAddTrial(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
spec = {
"run": "__fake",
"stop": {
"training_iteration": 3
},
"resources_per_trial": {
"cpu": 1,
"gpu": 1
},
}
client.add_trial("test", spec)
runner.step()
all_trials = client.get_all_trials()["trials"]
runner.step()
self.assertEqual(len(all_trials), 3)
def testGetTrials(self):
runner, client = self.basicSetup()
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 2)
tid = all_trials[0]["id"]
client.get_trial(tid)
runner.step()
self.assertEqual(len(all_trials), 2)
def testGetTrialsWithFunction(self):
runner, client = self.basicSetup()
test_trial = Trial(
"__fake",
trial_id="function_trial",
stopping_criterion={"training_iteration": 3},
config={"callbacks": {
"on_episode_start": lambda x: None
}})
runner.add_trial(test_trial)
for i in range(3):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(len(all_trials), 3)
client.get_trial("function_trial")
runner.step()
self.assertEqual(len(all_trials), 3)
def testStopTrial(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 1)
tid = [t for t in all_trials if t["status"] == Trial.RUNNING][0]["id"]
client.stop_trial(tid)
runner.step()
all_trials = client.get_all_trials()["trials"]
self.assertEqual(
len([t for t in all_trials if t["status"] == Trial.RUNNING]), 0)
def testCurlCommand(self):
"""Check if Stop Trial works."""
runner, client = self.basicSetup()
for i in range(2):
runner.step()
stdout = subprocess.check_output(
"curl \"http://{}:{}/trials\"".format(client.server_address,
client.server_port),
shell=True)
self.assertNotEqual(stdout, None)
curl_trials = json.loads(stdout.decode())["trials"]
client_trials = client.get_all_trials()["trials"]
for curl_trial, client_trial in zip(curl_trials, client_trials):
self.assertEqual(curl_trial.keys(), client_trial.keys())
self.assertEqual(curl_trial["id"], client_trial["id"])
self.assertEqual(curl_trial["trainable_name"],
client_trial["trainable_name"])
self.assertEqual(curl_trial["status"], client_trial["status"])
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/test_var.py
|
Python
|
import os
import numpy as np
import unittest
import ray
from ray.rllib import _register_all
from ray import tune
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.experiment import Experiment
from ray.tune.suggest import grid_search, BasicVariantGenerator
from ray.tune.suggest.suggestion import _MockSuggestionAlgorithm
from ray.tune.suggest.variant_generator import (RecursiveDependencyError,
resolve_nested_dict)
class VariantGeneratorTest(unittest.TestCase):
def setUp(self):
ray.init()
def tearDown(self):
ray.shutdown()
_register_all() # re-register the evicted objects
def generate_trials(self, spec, name):
suggester = BasicVariantGenerator()
suggester.add_configurations({name: spec})
return suggester.next_trials()
def testParseToTrials(self):
trials = self.generate_trials({
"run": "PPO",
"num_samples": 2,
"max_failures": 5,
"config": {
"env": "Pong-v0",
"foo": "bar"
},
}, "tune-pong")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertTrue("PPO_Pong-v0" in str(trials[0]))
self.assertEqual(trials[0].config, {"foo": "bar", "env": "Pong-v0"})
self.assertEqual(trials[0].trainable_name, "PPO")
self.assertEqual(trials[0].experiment_tag, "0")
self.assertEqual(trials[0].max_failures, 5)
self.assertEqual(trials[0].evaluated_params, {})
self.assertEqual(trials[0].local_dir,
os.path.join(DEFAULT_RESULTS_DIR, "tune-pong"))
self.assertEqual(trials[1].experiment_tag, "1")
def testEval(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"foo": {
"eval": "2 + 2"
},
},
}, "eval")
trials = list(trials)
self.assertEqual(len(trials), 1)
self.assertEqual(trials[0].config, {"foo": 4})
self.assertEqual(trials[0].evaluated_params, {"foo": 4})
self.assertEqual(trials[0].experiment_tag, "0_foo=4")
def testGridSearch(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"bar": {
"grid_search": [True, False]
},
"foo": {
"grid_search": [1, 2, 3]
},
"baz": "asd",
},
}, "grid_search")
trials = list(trials)
self.assertEqual(len(trials), 6)
self.assertEqual(trials[0].config, {
"bar": True,
"foo": 1,
"baz": "asd",
})
self.assertEqual(trials[0].evaluated_params, {
"bar": True,
"foo": 1,
})
self.assertEqual(trials[0].experiment_tag, "0_bar=True,foo=1")
self.assertEqual(trials[1].config, {
"bar": False,
"foo": 1,
"baz": "asd",
})
self.assertEqual(trials[1].evaluated_params, {
"bar": False,
"foo": 1,
})
self.assertEqual(trials[1].experiment_tag, "1_bar=False,foo=1")
self.assertEqual(trials[2].config, {
"bar": True,
"foo": 2,
"baz": "asd",
})
self.assertEqual(trials[2].evaluated_params, {
"bar": True,
"foo": 2,
})
self.assertEqual(trials[3].config, {
"bar": False,
"foo": 2,
"baz": "asd",
})
self.assertEqual(trials[3].evaluated_params, {
"bar": False,
"foo": 2,
})
self.assertEqual(trials[4].config, {
"bar": True,
"foo": 3,
"baz": "asd",
})
self.assertEqual(trials[4].evaluated_params, {
"bar": True,
"foo": 3,
})
self.assertEqual(trials[5].config, {
"bar": False,
"foo": 3,
"baz": "asd",
})
self.assertEqual(trials[5].evaluated_params, {
"bar": False,
"foo": 3,
})
def testGridSearchAndEval(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"qux": tune.sample_from(lambda spec: 2 + 2),
"bar": grid_search([True, False]),
"foo": grid_search([1, 2, 3]),
"baz": "asd",
},
}, "grid_eval")
trials = list(trials)
self.assertEqual(len(trials), 6)
self.assertEqual(trials[0].config, {
"bar": True,
"foo": 1,
"qux": 4,
"baz": "asd",
})
self.assertEqual(trials[0].evaluated_params, {
"bar": True,
"foo": 1,
"qux": 4,
})
self.assertEqual(trials[0].experiment_tag, "0_bar=True,foo=1,qux=4")
def testConditionResolution(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": 1,
"y": tune.sample_from(lambda spec: spec.config.x + 1),
"z": tune.sample_from(lambda spec: spec.config.y + 1),
},
}, "condition_resolution")
trials = list(trials)
self.assertEqual(len(trials), 1)
self.assertEqual(trials[0].config, {"x": 1, "y": 2, "z": 3})
self.assertEqual(trials[0].evaluated_params, {"y": 2, "z": 3})
self.assertEqual(trials[0].experiment_tag, "0_y=2,z=3")
def testDependentLambda(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": grid_search([1, 2]),
"y": tune.sample_from(lambda spec: spec.config.x * 100),
},
}, "dependent_lambda")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].config, {"x": 1, "y": 100})
self.assertEqual(trials[1].config, {"x": 2, "y": 200})
def testDependentGridSearch(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": grid_search([
tune.sample_from(lambda spec: spec.config.y * 100),
tune.sample_from(lambda spec: spec.config.y * 200)
]),
"y": tune.sample_from(lambda spec: 1),
},
}, "dependent_grid_search")
trials = list(trials)
self.assertEqual(len(trials), 2)
self.assertEqual(trials[0].config, {"x": 100, "y": 1})
self.assertEqual(trials[1].config, {"x": 200, "y": 1})
def testNestedValues(self):
trials = self.generate_trials({
"run": "PPO",
"config": {
"x": {
"y": {
"z": tune.sample_from(lambda spec: 1)
}
},
"y": tune.sample_from(lambda spec: 12),
"z": tune.sample_from(lambda spec: spec.config.x.y.z * 100),
},
}, "nested_values")
trials = list(trials)
self.assertEqual(len(trials), 1)
self.assertEqual(trials[0].config, {
"x": {
"y": {
"z": 1
}
},
"y": 12,
"z": 100
})
self.assertEqual(trials[0].evaluated_params, {
"x/y/z": 1,
"y": 12,
"z": 100
})
def testLogUniform(self):
sampler = tune.loguniform(1e-10, 1e-1).func
results = [sampler(None) for i in range(1000)]
assert abs(np.log(min(results)) / np.log(10) - -10) < 0.1
assert abs(np.log(max(results)) / np.log(10) - -1) < 0.1
sampler_e = tune.loguniform(np.e**-4, np.e, base=np.e).func
results_e = [sampler_e(None) for i in range(1000)]
assert abs(np.log(min(results_e)) - -4) < 0.1
assert abs(np.log(max(results_e)) - 1) < 0.1
def test_resolve_dict(self):
config = {
"a": {
"b": 1,
"c": 2,
},
"b": {
"a": 3
}
}
resolved = resolve_nested_dict(config)
for k, v in [(("a", "b"), 1), (("a", "c"), 2), (("b", "a"), 3)]:
self.assertEqual(resolved.get(k), v)
def testRecursiveDep(self):
try:
list(
self.generate_trials({
"run": "PPO",
"config": {
"foo": tune.sample_from(lambda spec: spec.config.foo),
},
}, "recursive_dep"))
except RecursiveDependencyError as e:
assert "`foo` recursively depends on" in str(e), e
else:
assert False
def testMaxConcurrentSuggestions(self):
"""Checks that next_trials() supports throttling."""
experiment_spec = {
"run": "PPO",
"num_samples": 6,
}
experiments = [Experiment.from_json("test", experiment_spec)]
searcher = _MockSuggestionAlgorithm(max_concurrent=4)
searcher.add_configurations(experiments)
trials = searcher.next_trials()
self.assertEqual(len(trials), 4)
self.assertEqual(searcher.next_trials(), [])
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
self.assertEqual(len(searcher.next_trials()), 1)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
finished_trial = trials.pop()
searcher.on_trial_complete(finished_trial.trial_id)
self.assertEqual(len(searcher.next_trials()), 1)
self.assertEqual(len(searcher.next_trials()), 0)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tests/tutorial.py
|
Python
|
# flake8: noqa
# Original Code: https://github.com/pytorch/examples/blob/master/mnist/main.py
# yapf: disable
# __tutorial_imports_begin__
import numpy as np
import torch
import torch.optim as optim
from torchvision import datasets
from ray import tune
from ray.tune import track
from ray.tune.schedulers import ASHAScheduler
from ray.tune.examples.mnist_pytorch import get_data_loaders, ConvNet, train, test
# __tutorial_imports_end__
# yapf: enable
# yapf: disable
# __train_func_begin__
def train_mnist(config):
model = ConvNet()
train_loader, test_loader = get_data_loaders()
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"])
for i in range(10):
train(model, optimizer, train_loader)
acc = test(model, test_loader)
track.log(mean_accuracy=acc)
if i % 5 == 0:
# This saves the model to the trial directory
torch.save(model, "./model.pth")
# __train_func_end__
# yapf: enable
# __eval_func_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9)
}
# Uncomment this to enable distributed execution
# `ray.init(address=...)`
analysis = tune.run(train_mnist, config=search_space)
# __eval_func_end__
#__plot_begin__
dfs = analysis.trial_dataframes
[d.mean_accuracy.plot() for d in dfs.values()]
#__plot_end__
# __run_scheduler_begin__
analysis = tune.run(
train_mnist,
num_samples=30,
scheduler=ASHAScheduler(metric="mean_accuracy", mode="max"),
config=search_space)
# Obtain a trial dataframe from all run trials of this `tune.run` call.
dfs = analysis.trial_dataframes
# __run_scheduler_end__
# yapf: disable
# __plot_scheduler_begin__
# Plot by epoch
ax = None # This plots everything on the same plot
for d in dfs.values():
ax = d.mean_accuracy.plot(ax=ax, legend=False)
# __plot_scheduler_end__
# yapf: enable
# __run_searchalg_begin__
from hyperopt import hp
from ray.tune.suggest.hyperopt import HyperOptSearch
space = {
"lr": hp.loguniform("lr", 1e-10, 0.1),
"momentum": hp.uniform("momentum", 0.1, 0.9),
}
hyperopt_search = HyperOptSearch(
space, max_concurrent=2, reward_attr="mean_accuracy")
analysis = tune.run(train_mnist, num_samples=10, search_alg=hyperopt_search)
# __run_searchalg_end__
# __run_analysis_begin__
import os
df = analysis.dataframe()
logdir = analysis.get_best_logdir("mean_accuracy", mode="max")
model = torch.load(os.path.join(logdir, "model.pth"))
# __run_analysis_end__
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
# __trainable_run_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9)
}
analysis = tune.run(
TrainMNIST, config=search_space, stop={"training_iteration": 10})
# __trainable_run_end__
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/track/__init__.py
|
Python
|
import logging
from ray.tune.track.session import TrackSession
logger = logging.getLogger(__name__)
_session = None
def get_session():
global _session
if not _session:
raise ValueError("Session not detected. Try `track.init()`?")
return _session
def init(ignore_reinit_error=True, **session_kwargs):
"""Initializes the global trial context for this process.
This creates a TrackSession object and the corresponding hooks for logging.
Examples:
>>> from ray.tune import track
>>> track.init()
"""
global _session
if _session:
# TODO(ng): would be nice to stack crawl at creation time to report
# where that initial trial was created, and that creation line
# info is helpful to keep around anyway.
reinit_msg = "A session already exists in the current context."
if ignore_reinit_error:
if not _session.is_tune_session:
logger.warning(reinit_msg)
return
else:
raise ValueError(reinit_msg)
_session = TrackSession(**session_kwargs)
def shutdown():
"""Cleans up the trial and removes it from the global context."""
global _session
if _session:
_session.close()
_session = None
def log(**kwargs):
"""Applies TrackSession.log to the trial in the current context."""
_session = get_session()
return _session.log(**kwargs)
def trial_dir():
"""Returns the directory where trial results are saved.
This includes json data containing the session's parameters and metrics.
"""
_session = get_session()
return _session.logdir
__all__ = ["TrackSession", "session", "log", "trial_dir", "init", "shutdown"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/track/session.py
|
Python
|
import os
from datetime import datetime
from ray.tune.trial import Trial
from ray.tune.result import DEFAULT_RESULTS_DIR, TRAINING_ITERATION
from ray.tune.logger import UnifiedLogger, Logger
class _ReporterHook(Logger):
def __init__(self, tune_reporter):
self.tune_reporter = tune_reporter
def on_result(self, metrics):
return self.tune_reporter(**metrics)
class TrackSession:
"""Manages results for a single session.
Represents a single Trial in an experiment.
Attributes:
trial_name (str): Custom trial name.
experiment_dir (str): Directory where results for all trials
are stored. Each session is stored into a unique directory
inside experiment_dir.
upload_dir (str): Directory to sync results to.
trial_config (dict): Parameters that will be logged to disk.
_tune_reporter (StatusReporter): For rerouting when using Tune.
Will not instantiate logging if not None.
"""
def __init__(self,
trial_name="",
experiment_dir=None,
upload_dir=None,
trial_config=None,
_tune_reporter=None):
self._experiment_dir = None
self._logdir = None
self._upload_dir = None
self.trial_config = None
self._iteration = -1
self.is_tune_session = bool(_tune_reporter)
self.trial_id = Trial.generate_id()
if trial_name:
self.trial_id = trial_name + "_" + self.trial_id
if self.is_tune_session:
self._logger = _ReporterHook(_tune_reporter)
self._logdir = _tune_reporter.logdir
else:
self._initialize_logging(trial_name, experiment_dir, upload_dir,
trial_config)
def _initialize_logging(self,
trial_name="",
experiment_dir=None,
upload_dir=None,
trial_config=None):
if upload_dir:
raise NotImplementedError("Upload Dir is not yet implemented.")
# TODO(rliaw): In other parts of the code, this is `local_dir`.
if experiment_dir is None:
experiment_dir = os.path.join(DEFAULT_RESULTS_DIR, "default")
self._experiment_dir = os.path.expanduser(experiment_dir)
# TODO(rliaw): Refactor `logdir` to `trial_dir`.
self._logdir = Trial.create_logdir(trial_name, self._experiment_dir)
self._upload_dir = upload_dir
self.trial_config = trial_config or {}
# misc metadata to save as well
self.trial_config["trial_id"] = self.trial_id
self._logger = UnifiedLogger(self.trial_config, self._logdir)
def log(self, **metrics):
"""Logs all named arguments specified in `metrics`.
This will log trial metrics locally, and they will be synchronized
with the driver periodically through ray.
Arguments:
metrics: named arguments with corresponding values to log.
"""
self._iteration += 1
# TODO: Implement a batching mechanism for multiple calls to `log`
# within the same iteration.
metrics_dict = metrics.copy()
metrics_dict.update({"trial_id": self.trial_id})
# TODO: Move Trainable autopopulation to a util function
metrics_dict.setdefault(TRAINING_ITERATION, self._iteration)
self._logger.on_result(metrics_dict)
def close(self):
self.trial_config["trial_completed"] = True
self.trial_config["end_time"] = datetime.now().isoformat()
# TODO(rliaw): Have Tune support updated configs
self._logger.update_config(self.trial_config)
self._logger.flush()
self._logger.close()
@property
def logdir(self):
"""Trial logdir (subdir of given experiment directory)"""
return self._logdir
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/trainable.py
|
Python
|
from datetime import datetime
import copy
import io
import logging
import glob
import os
import pickle
import pandas as pd
from six import string_types
import shutil
import tempfile
import time
import uuid
import ray
from ray.tune.logger import UnifiedLogger
from ray.tune.result import (DEFAULT_RESULTS_DIR, TIME_THIS_ITER_S,
TIMESTEPS_THIS_ITER, DONE, TIMESTEPS_TOTAL,
EPISODES_THIS_ITER, EPISODES_TOTAL,
TRAINING_ITERATION, RESULT_DUPLICATE)
from ray.tune.utils import UtilMonitor
logger = logging.getLogger(__name__)
SETUP_TIME_THRESHOLD = 10
class TrainableUtil:
@staticmethod
def pickle_checkpoint(checkpoint_path):
"""Pickles checkpoint data."""
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
data = {}
for basedir, _, file_names in os.walk(checkpoint_dir):
for file_name in file_names:
path = os.path.join(basedir, file_name)
with open(path, "rb") as f:
data[os.path.relpath(path, checkpoint_dir)] = f.read()
# Use normpath so that a directory path isn't mapped to empty string.
name = os.path.basename(os.path.normpath(checkpoint_path))
name += os.path.sep if os.path.isdir(checkpoint_path) else ""
data_dict = pickle.dumps({
"checkpoint_name": name,
"data": data,
})
return data_dict
@staticmethod
def find_checkpoint_dir(checkpoint_path):
"""Returns the directory containing the checkpoint path.
Raises:
FileNotFoundError if the directory is not found.
"""
if not os.path.exists(checkpoint_path):
raise FileNotFoundError("Path does not exist", checkpoint_path)
if os.path.isdir(checkpoint_path):
checkpoint_dir = checkpoint_path
else:
checkpoint_dir = os.path.dirname(checkpoint_path)
while checkpoint_dir != os.path.dirname(checkpoint_dir):
if os.path.exists(os.path.join(checkpoint_dir, ".is_checkpoint")):
break
checkpoint_dir = os.path.dirname(checkpoint_dir)
else:
raise FileNotFoundError("Checkpoint directory not found for {}"
.format(checkpoint_path))
return checkpoint_dir
@staticmethod
def make_checkpoint_dir(checkpoint_dir):
"""Creates a checkpoint directory at the provided path."""
os.makedirs(checkpoint_dir, exist_ok=True)
# Drop marker in directory to identify it as a checkpoint dir.
open(os.path.join(checkpoint_dir, ".is_checkpoint"), "a").close()
@staticmethod
def get_checkpoints_paths(logdir):
""" Finds the checkpoints within a specific folder.
Returns a pandas DataFrame of training iterations and checkpoint
paths within a specific folder.
Raises:
FileNotFoundError if the directory is not found.
"""
marker_paths = glob.glob(
os.path.join(logdir, "checkpoint_*/.is_checkpoint"))
iter_chkpt_pairs = []
for marker_path in marker_paths:
chkpt_dir = os.path.dirname(marker_path)
metadata_file = glob.glob(
os.path.join(chkpt_dir, "*.tune_metadata"))
if len(metadata_file) != 1:
raise ValueError(
"{} has zero or more than one tune_metadata.".format(
chkpt_dir))
chkpt_path = metadata_file[0][:-len(".tune_metadata")]
chkpt_iter = int(chkpt_dir[chkpt_dir.rfind("_") + 1:])
iter_chkpt_pairs.append([chkpt_iter, chkpt_path])
chkpt_df = pd.DataFrame(
iter_chkpt_pairs, columns=["training_iteration", "chkpt_path"])
return chkpt_df
class Trainable:
"""Abstract class for trainable models, functions, etc.
A call to ``train()`` on a trainable will execute one logical iteration of
training. As a rule of thumb, the execution time of one train call should
be large enough to avoid overheads (i.e. more than a few seconds), but
short enough to report progress periodically (i.e. at most a few minutes).
Calling ``save()`` should save the training state of a trainable to disk,
and ``restore(path)`` should restore a trainable to the given state.
Generally you only need to implement ``_setup``, ``_train``,
``_save``, and ``_restore`` when subclassing Trainable.
Other implementation methods that may be helpful to override are
``_log_result``, ``reset_config``, ``_stop``, and ``_export_model``.
When using Tune, Tune will convert this class into a Ray actor, which
runs on a separate process. Tune will also change the current working
directory of this process to `self.logdir`.
"""
def __init__(self, config=None, logger_creator=None):
"""Initialize an Trainable.
Sets up logging and points ``self.logdir`` to a directory in which
training outputs should be placed.
Subclasses should prefer defining ``_setup()`` instead of overriding
``__init__()`` directly.
Args:
config (dict): Trainable-specific configuration data. By default
will be saved as ``self.config``.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
self._experiment_id = uuid.uuid4().hex
self.config = config or {}
if logger_creator:
self._result_logger = logger_creator(self.config)
self._logdir = self._result_logger.logdir
else:
logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
ray.utils.try_to_create_directory(DEFAULT_RESULTS_DIR)
self._logdir = tempfile.mkdtemp(
prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
self._result_logger = UnifiedLogger(
self.config, self._logdir, loggers=None)
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
start_time = time.time()
self._setup(copy.deepcopy(self.config))
setup_time = time.time() - start_time
if setup_time > SETUP_TIME_THRESHOLD:
logger.info("_setup took {:.3f} seconds. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(setup_time))
self._local_ip = ray.services.get_node_ip_address()
log_sys_usage = self.config.get("log_sys_usage", False)
self._monitor = UtilMonitor(start=log_sys_usage)
@classmethod
def default_resource_request(cls, config):
"""Returns the resource requirement for the given configuration.
This can be overriden by sub-classes to set the correct trial resource
allocation, so the user does not need to.
Example:
>>> def default_resource_request(cls, config):
>>> return Resources(
>>> cpu=0,
>>> gpu=0,
>>> extra_cpu=config["workers"],
>>> extra_gpu=int(config["use_gpu"]) * config["workers"])
"""
return None
@classmethod
def resource_help(cls, config):
"""Returns a help string for configuring this trainable's resources.
Args:
config (dict): The Trainer's config dict.
"""
return ""
def current_ip(self):
logger.warning("Getting current IP.")
self._local_ip = ray.services.get_node_ip_address()
return self._local_ip
def train(self):
"""Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train(). This is incremented
after `_train()` is called.
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
"""
start = time.time()
result = self._train()
assert isinstance(result, dict), "_train() needs to return a dict."
# We do not modify internal state nor update this result if duplicate.
if RESULT_DUPLICATE in result:
return result
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._episodes_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
result.setdefault(TRAINING_ITERATION, self._iteration)
# Provides auto-filled neg_mean_loss for avoiding regressions
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=os.uname()[1],
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore)
monitor_data = self._monitor.get_data()
if monitor_data:
result.update(monitor_data)
self._log_result(result)
return result
def save(self, checkpoint_dir=None):
"""Saves the current model state to a checkpoint.
Subclasses should override ``_save()`` instead to save state.
This method dumps additional metadata alongside the saved path.
Args:
checkpoint_dir (str): Optional dir to place the checkpoint.
Returns:
Checkpoint path or prefix that may be passed to restore().
"""
checkpoint_dir = os.path.join(checkpoint_dir or self.logdir,
"checkpoint_{}".format(self._iteration))
TrainableUtil.make_checkpoint_dir(checkpoint_dir)
checkpoint = self._save(checkpoint_dir)
saved_as_dict = False
if isinstance(checkpoint, string_types):
if not checkpoint.startswith(checkpoint_dir):
raise ValueError(
"The returned checkpoint path must be within the "
"given checkpoint dir {}: {}".format(
checkpoint_dir, checkpoint))
checkpoint_path = checkpoint
if os.path.isdir(checkpoint_path):
# Add trailing slash to prevent tune metadata from
# being written outside the directory.
checkpoint_path = os.path.join(checkpoint_path, "")
elif isinstance(checkpoint, dict):
saved_as_dict = True
checkpoint_path = os.path.join(checkpoint_dir, "checkpoint")
with open(checkpoint_path, "wb") as f:
pickle.dump(checkpoint, f)
else:
raise ValueError("Returned unexpected type {}. "
"Expected str or dict.".format(type(checkpoint)))
with open(checkpoint_path + ".tune_metadata", "wb") as f:
pickle.dump({
"experiment_id": self._experiment_id,
"iteration": self._iteration,
"timesteps_total": self._timesteps_total,
"time_total": self._time_total,
"episodes_total": self._episodes_total,
"saved_as_dict": saved_as_dict,
"ray_version": ray.__version__,
}, f)
return checkpoint_path
def save_to_object(self):
"""Saves the current model state to a Python object.
It also saves to disk but does not return the checkpoint path.
Returns:
Object holding checkpoint data.
"""
tmpdir = tempfile.mkdtemp("save_to_object", dir=self.logdir)
checkpoint_path = self.save(tmpdir)
# Save all files in subtree.
data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path)
out = io.BytesIO()
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
shutil.rmtree(tmpdir)
return out.getvalue()
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``_restore()`` instead to restore state.
This method restores additional metadata saved with the checkpoint.
"""
with open(checkpoint_path + ".tune_metadata", "rb") as f:
metadata = pickle.load(f)
self._experiment_id = metadata["experiment_id"]
self._iteration = metadata["iteration"]
self._timesteps_total = metadata["timesteps_total"]
self._time_total = metadata["time_total"]
self._episodes_total = metadata["episodes_total"]
saved_as_dict = metadata["saved_as_dict"]
if saved_as_dict:
with open(checkpoint_path, "rb") as loaded_state:
checkpoint_dict = pickle.load(loaded_state)
checkpoint_dict.update(tune_checkpoint_path=checkpoint_path)
self._restore(checkpoint_dict)
else:
self._restore(checkpoint_path)
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = True
logger.info("Restored on %s from checkpoint: %s", self.current_ip(),
checkpoint_path)
state = {
"_iteration": self._iteration,
"_timesteps_total": self._timesteps_total,
"_time_total": self._time_total,
"_episodes_total": self._episodes_total,
}
logger.info("Current state after restoring: %s", state)
def restore_from_object(self, obj):
"""Restores training state from a checkpoint object.
These checkpoints are returned from calls to save_to_object().
"""
info = pickle.loads(obj)
data = info["data"]
tmpdir = tempfile.mkdtemp("restore_from_object", dir=self.logdir)
checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"])
for relpath_name, file_contents in data.items():
path = os.path.join(tmpdir, relpath_name)
# This may be a subdirectory, hence not just using tmpdir
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(file_contents)
self.restore(checkpoint_path)
shutil.rmtree(tmpdir)
def delete_checkpoint(self, checkpoint_path):
"""Deletes local copy of checkpoint.
Args:
checkpoint_path (str): Path to checkpoint.
"""
try:
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
except FileNotFoundError:
# The checkpoint won't exist locally if the
# trial was rescheduled to another worker.
logger.debug("Checkpoint not found during garbage collection.")
return
if os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
def export_model(self, export_formats, export_dir=None):
"""Exports model based on export_formats.
Subclasses should override _export_model() to actually
export model to local directory.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Optional dir to place the exported model.
Defaults to self.logdir.
Returns:
A dict that maps ExportFormats to successfully exported models.
"""
export_dir = export_dir or self.logdir
return self._export_model(export_formats, export_dir)
def reset_config(self, new_config):
"""Resets configuration without restarting the trial.
This method is optional, but can be implemented to speed up algorithms
such as PBT, and to allow performance optimizations such as running
experiments with reuse_actors=True. Note that self.config need to
be updated to reflect the latest parameter information in Ray logs.
Args:
new_config (dir): Updated hyperparameter configuration
for the trainable.
Returns:
True if reset was successful else False.
"""
return False
def stop(self):
"""Releases all resources used by this trainable."""
self._result_logger.flush()
self._result_logger.close()
self._stop()
@property
def logdir(self):
"""Directory of the results and checkpoints for this Trainable.
Tune will automatically sync this folder with the driver if execution
is distributed.
Note that the current working directory will also be changed to this.
"""
return os.path.join(self._logdir, "")
@property
def iteration(self):
"""Current training iteration.
This value is automatically incremented every time `train()` is called
and is automatically inserted into the training result dict.
"""
return self._iteration
def get_config(self):
"""Returns configuration passed in by Tune."""
return self.config
def _train(self):
"""Subclasses should override this to implement train().
The return value will be automatically passed to the loggers. Users
can also return `tune.result.DONE` or `tune.result.SHOULD_CHECKPOINT`
as a key to manually trigger termination or checkpointing of this
trial. Note that manual checkpointing only works when subclassing
Trainables.
Returns:
A dict that describes training progress.
"""
raise NotImplementedError
def _save(self, tmp_checkpoint_dir):
"""Subclasses should override this to implement ``save()``.
Warning:
Do not rely on absolute paths in the implementation of ``_save``
and ``_restore``.
Use ``validate_save_restore`` to catch ``_save``/``_restore`` errors
before execution.
>>> from ray.tune.utils import validate_save_restore
>>> validate_save_restore(MyTrainableClass)
>>> validate_save_restore(MyTrainableClass, use_object_store=True)
Args:
tmp_checkpoint_dir (str): The directory where the checkpoint
file must be stored. In a Tune run, if the trial is paused,
the provided path may be temporary and moved.
Returns:
A dict or string. If string, the return value is expected to be
prefixed by `tmp_checkpoint_dir`. If dict, the return value will
be automatically serialized by Tune and passed to `_restore()`.
Examples:
>>> print(trainable1._save("/tmp/checkpoint_1"))
"/tmp/checkpoint_1/my_checkpoint_file"
>>> print(trainable2._save("/tmp/checkpoint_2"))
{"some": "data"}
>>> trainable._save("/tmp/bad_example")
"/tmp/NEW_CHECKPOINT_PATH/my_checkpoint_file" # This will error.
"""
raise NotImplementedError
def _restore(self, checkpoint):
"""Subclasses should override this to implement restore().
Warning:
In this method, do not rely on absolute paths. The absolute
path of the checkpoint_dir used in ``_save`` may be changed.
If ``_save`` returned a prefixed string, the prefix of the checkpoint
string returned by ``_save`` may be changed. This is because trial
pausing depends on temporary directories.
The directory structure under the checkpoint_dir provided to ``_save``
is preserved.
See the example below.
.. code-block:: python
class Example(Trainable):
def _save(self, checkpoint_path):
print(checkpoint_path)
return os.path.join(checkpoint_path, "my/check/point")
def _restore(self, checkpoint):
print(checkpoint)
>>> trainer = Example()
>>> obj = trainer.save_to_object() # This is used when PAUSED.
<logdir>/tmpc8k_c_6hsave_to_object/checkpoint_0/my/check/point
>>> trainer.restore_from_object(obj) # Note the different prefix.
<logdir>/tmpb87b5axfrestore_from_object/checkpoint_0/my/check/point
Args:
checkpoint (str|dict): If dict, the return value is as
returned by `_save`. If a string, then it is a checkpoint path
that may have a different prefix than that returned by `_save`.
The directory structure underneath the `checkpoint_dir`
`_save` is preserved.
"""
raise NotImplementedError
def _setup(self, config):
"""Subclasses should override this for custom initialization.
Args:
config (dict): Hyperparameters and other configs given.
Copy of `self.config`.
"""
pass
def _log_result(self, result):
"""Subclasses can optionally override this to customize logging.
Args:
result (dict): Training result returned by _train().
"""
self._result_logger.on_result(result)
def _stop(self):
"""Subclasses should override this for any cleanup on stop.
If any Ray actors are launched in the Trainable (i.e., with a RLlib
trainer), be sure to kill the Ray actor process here.
You can kill a Ray actor by calling `actor.__ray_terminate__.remote()`
on the actor.
"""
pass
def _export_model(self, export_formats, export_dir):
"""Subclasses should override this to export model.
Args:
export_formats (list): List of formats that should be exported.
export_dir (str): Directory to place exported models.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
return {}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/trial.py
|
Python
|
import ray.cloudpickle as cloudpickle
import copy
from datetime import datetime
import logging
import shutil
import uuid
import time
import tempfile
import os
from numbers import Number
from ray.tune import TuneError
from ray.tune.checkpoint_manager import Checkpoint, CheckpointManager
from ray.tune.durable_trainable import DurableTrainable
from ray.tune.logger import pretty_print, UnifiedLogger
# NOTE(rkn): We import ray.tune.registry here instead of importing the names we
# need because there are cyclic imports that may cause specific names to not
# have been defined yet. See https://github.com/ray-project/ray/issues/1716.
from ray.tune.registry import get_trainable_cls, validate_trainable
from ray.tune.result import DEFAULT_RESULTS_DIR, DONE, TRAINING_ITERATION
from ray.tune.resources import Resources, json_to_resources, resources_to_json
from ray.tune.trainable import TrainableUtil
from ray.tune.utils import flatten_dict
from ray.utils import binary_to_hex, hex_to_binary
DEBUG_PRINT_INTERVAL = 5
MAX_LEN_IDENTIFIER = int(os.environ.get("MAX_LEN_IDENTIFIER", 130))
logger = logging.getLogger(__name__)
def date_str():
return datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
class Location:
"""Describes the location at which Trial is placed to run."""
def __init__(self, hostname=None, pid=None):
self.hostname = hostname
self.pid = pid
def __str__(self):
if not self.pid:
return ""
elif self.hostname == os.uname()[1]:
return "pid={}".format(self.pid)
else:
return "{}:{}".format(self.hostname, self.pid)
class ExportFormat:
"""Describes the format to export the trial Trainable.
This may correspond to different file formats based on the
Trainable implementation.
"""
CHECKPOINT = "checkpoint"
MODEL = "model"
@staticmethod
def validate(export_formats):
"""Validates export_formats.
Raises:
ValueError if the format is unknown.
"""
for i in range(len(export_formats)):
export_formats[i] = export_formats[i].strip().lower()
if export_formats[i] not in [
ExportFormat.CHECKPOINT, ExportFormat.MODEL
]:
raise TuneError("Unsupported export format: " +
export_formats[i])
def checkpoint_deleter(trial_id, runner):
"""Returns a checkpoint deleter callback for a runner."""
if not runner:
return lambda checkpoint: None
def delete(checkpoint):
"""Requests checkpoint deletion asynchronously.
Args:
checkpoint (Checkpoint): Checkpoint to delete.
"""
if checkpoint.storage == Checkpoint.PERSISTENT and checkpoint.value:
logger.debug("Trial %s: Deleting checkpoint %s", trial_id,
checkpoint.value)
checkpoint_path = checkpoint.value
# Delete local copy, if any exists.
if os.path.exists(checkpoint_path):
try:
checkpoint_dir = TrainableUtil.find_checkpoint_dir(
checkpoint_path)
shutil.rmtree(checkpoint_dir)
except FileNotFoundError:
logger.warning("Checkpoint dir not found during deletion.")
# TODO(ujvl): Batch remote deletes.
runner.delete_checkpoint.remote(checkpoint.value)
return delete
class Trial:
"""A trial object holds the state for one model training run.
Trials are themselves managed by the TrialRunner class, which implements
the event loop for submitting trial runs to a Ray cluster.
Trials start in the PENDING state, and transition to RUNNING once started.
On error it transitions to ERROR, otherwise TERMINATED on success.
"""
PENDING = "PENDING"
RUNNING = "RUNNING"
PAUSED = "PAUSED"
TERMINATED = "TERMINATED"
ERROR = "ERROR"
def __init__(self,
trainable_name,
config=None,
trial_id=None,
local_dir=DEFAULT_RESULTS_DIR,
evaluated_params=None,
experiment_tag="",
resources=None,
stopping_criterion=None,
remote_checkpoint_dir=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=TRAINING_ITERATION,
export_formats=None,
restore_path=None,
trial_name_creator=None,
loggers=None,
sync_to_driver_fn=None,
max_failures=0):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
validate_trainable(trainable_name)
# Trial config
self.trainable_name = trainable_name
self.trial_id = Trial.generate_id() if trial_id is None else trial_id
self.config = config or {}
self.local_dir = local_dir # This remains unexpanded for syncing.
#: Parameters that Tune varies across searches.
self.evaluated_params = evaluated_params or {}
self.experiment_tag = experiment_tag
trainable_cls = self.get_trainable_cls()
if trainable_cls and hasattr(trainable_cls,
"default_resource_request"):
default_resources = trainable_cls.default_resource_request(
self.config)
if default_resources:
if resources:
raise ValueError(
"Resources for {} have been automatically set to {} "
"by its `default_resource_request()` method. Please "
"clear the `resources_per_trial` option.".format(
trainable_cls, default_resources))
resources = default_resources
self.location = Location()
self.resources = resources or Resources(cpu=1, gpu=0)
self.stopping_criterion = stopping_criterion or {}
self.loggers = loggers
self.sync_to_driver_fn = sync_to_driver_fn
self.verbose = True
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = {}
self.last_update_time = -float("inf")
# stores in memory max/min/last result for each metric by trial
self.metric_analysis = {}
self.export_formats = export_formats
self.status = Trial.PENDING
self.start_time = None
self.logdir = None
self.runner = None
self.result_logger = None
self.last_debug = 0
self.error_file = None
self.error_msg = None
self.custom_trial_name = None
# Checkpointing fields
if remote_checkpoint_dir:
self.remote_checkpoint_dir_prefix = remote_checkpoint_dir
else:
self.remote_checkpoint_dir_prefix = None
self.checkpoint_freq = checkpoint_freq
self.checkpoint_at_end = checkpoint_at_end
self.sync_on_checkpoint = sync_on_checkpoint
newest_checkpoint = Checkpoint(Checkpoint.PERSISTENT, restore_path)
self.checkpoint_manager = CheckpointManager(
keep_checkpoints_num, checkpoint_score_attr,
checkpoint_deleter(str(self), self.runner))
self.checkpoint_manager.newest_checkpoint = newest_checkpoint
# Restoration fields
self.restoring_from = None
self.num_failures = 0
self.num_consecutive_start_attempts = 0
# AutoML fields
self.results = None
self.best_result = None
self.param_config = None
self.extra_arg = None
self._nonjson_fields = [
"loggers",
"sync_to_driver_fn",
"results",
"best_result",
"param_config",
"extra_arg",
]
if trial_name_creator:
self.custom_trial_name = trial_name_creator(self)
@property
def node_ip(self):
return self.location.hostname
@property
def checkpoint(self):
return self.checkpoint_manager.newest_checkpoint
@classmethod
def generate_id(cls):
return str(uuid.uuid1().hex)[:8]
@property
def remote_checkpoint_dir(self):
assert self.logdir, "Trial {}: logdir not initialized.".format(self)
if not self.remote_checkpoint_dir_prefix:
return None
logdir_name = os.path.basename(self.logdir)
return os.path.join(self.remote_checkpoint_dir_prefix, logdir_name)
@classmethod
def create_logdir(cls, identifier, local_dir):
local_dir = os.path.expanduser(local_dir)
os.makedirs(local_dir, exist_ok=True)
return tempfile.mkdtemp(
prefix="{}_{}".format(identifier[:MAX_LEN_IDENTIFIER], date_str()),
dir=local_dir)
def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not self.logdir:
self.logdir = Trial.create_logdir(str(self), self.local_dir)
else:
os.makedirs(self.logdir, exist_ok=True)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
trial=self,
loggers=self.loggers,
sync_function=self.sync_to_driver_fn)
def update_resources(self, cpu, gpu, **kwargs):
"""EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
"""
if self.status is Trial.RUNNING:
raise ValueError("Cannot update resources while Trial is running.")
self.resources = Resources(cpu, gpu, **kwargs)
def set_runner(self, runner):
self.runner = runner
self.checkpoint_manager.delete = checkpoint_deleter(str(self), runner)
def set_location(self, location):
"""Sets the location of the trial."""
self.location = location
def set_status(self, status):
"""Sets the status of the trial."""
self.status = status
if status == Trial.RUNNING:
if self.start_time is None:
self.start_time = time.time()
def close_logger(self):
"""Closes logger."""
if self.result_logger:
self.result_logger.close()
self.result_logger = None
def write_error_log(self, error_msg):
if error_msg and self.logdir:
self.num_failures += 1
self.error_file = os.path.join(self.logdir, "error.txt")
with open(self.error_file, "a+") as f:
f.write("Failure # {} (occurred at {})\n".format(
self.num_failures, date_str()))
f.write(error_msg + "\n")
self.error_msg = error_msg
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
if result.get(DONE):
return True
if callable(self.stopping_criterion):
return self.stopping_criterion(self.trial_id, result)
for criteria, stop_value in self.stopping_criterion.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
elif isinstance(criteria, dict):
raise ValueError(
"Stopping criteria is now flattened by default. "
"Use forward slashes to nest values `key1/key2/key3`.")
elif result[criteria] >= stop_value:
return True
return False
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
return (self.checkpoint_freq and
result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0)
def has_checkpoint(self):
return self.checkpoint.value is not None
def clear_checkpoint(self):
self.checkpoint.value = None
self.restoring_from = None
def on_checkpoint(self, checkpoint):
"""Hook for handling checkpoints taken by the Trainable.
Args:
checkpoint (Checkpoint): Checkpoint taken.
"""
if checkpoint.storage == Checkpoint.MEMORY:
# TODO(ujvl): Handle this separately to avoid restoration failure.
self.checkpoint_manager.on_checkpoint(checkpoint)
return
if self.sync_on_checkpoint:
try:
# Wait for any other syncs to finish. We need to sync again
# after this to handle checkpoints taken mid-sync.
self.result_logger.wait()
except TuneError as e:
# Errors occurring during this wait are not fatal for this
# checkpoint, so it should just be logged.
logger.error(
"Trial %s: An error occurred during the "
"checkpoint pre-sync wait.", str(e))
# Force sync down and wait before tracking the new checkpoint.
try:
if self.result_logger.sync_down():
self.result_logger.wait()
else:
logger.error(
"Trial %s: Checkpoint sync skipped. "
"This should not happen.", self)
except TuneError as e:
if issubclass(self.get_trainable_cls(), DurableTrainable):
# Even though rsync failed the trainable can restore
# from remote durable storage.
logger.error("Trial %s: Sync error - %s", self, str(e))
else:
# If the trainable didn't have remote storage to upload
# to then this checkpoint may have been lost, so we
# shouldn't track it with the checkpoint_manager.
raise e
if not issubclass(self.get_trainable_cls(), DurableTrainable):
if not os.path.exists(checkpoint.value):
raise TuneError("Trial {}: Checkpoint path {} not "
"found after successful sync down.".format(
self, checkpoint.value))
self.checkpoint_manager.on_checkpoint(checkpoint)
def on_restore(self):
"""Handles restoration completion."""
assert self.is_restoring
self.last_result = self.restoring_from.result
self.restoring_from = None
def should_recover(self):
"""Returns whether the trial qualifies for retrying.
This is if the trial has not failed more than max_failures. Note this
may return true even when there is no checkpoint, either because
`self.checkpoint_freq` is `0` or because the trial failed before
a checkpoint has been made.
"""
return self.num_failures < self.max_failures or self.max_failures < 0
def update_last_result(self, result, terminate=False):
result.update(trial_id=self.trial_id, done=terminate)
if self.experiment_tag:
result.update(experiment_tag=self.experiment_tag)
if self.verbose and (terminate or time.time() - self.last_debug >
DEBUG_PRINT_INTERVAL):
print("Result for {}:".format(self))
print(" {}".format(pretty_print(result).replace("\n", "\n ")))
self.last_debug = time.time()
self.set_location(Location(result.get("node_ip"), result.get("pid")))
self.last_result = result
self.last_update_time = time.time()
self.result_logger.on_result(self.last_result)
for metric, value in flatten_dict(result).items():
if isinstance(value, Number):
if metric not in self.metric_analysis:
self.metric_analysis[metric] = {
"max": value,
"min": value,
"last": value
}
else:
self.metric_analysis[metric]["max"] = max(
value, self.metric_analysis[metric]["max"])
self.metric_analysis[metric]["min"] = min(
value, self.metric_analysis[metric]["min"])
self.metric_analysis[metric]["last"] = value
def get_trainable_cls(self):
return get_trainable_cls(self.trainable_name)
def set_verbose(self, verbose):
self.verbose = verbose
def is_finished(self):
return self.status in [Trial.ERROR, Trial.TERMINATED]
@property
def is_restoring(self):
return self.restoring_from is not None
def __repr__(self):
return str(self)
def __str__(self):
"""Combines ``env`` with ``trainable_name`` and ``trial_id``.
Can be overridden with a custom string creator.
"""
if self.custom_trial_name:
return self.custom_trial_name
if "env" in self.config:
env = self.config["env"]
if isinstance(env, type):
env = env.__name__
identifier = "{}_{}".format(self.trainable_name, env)
else:
identifier = self.trainable_name
identifier += "_" + self.trial_id
return identifier.replace("/", "_")
def __getstate__(self):
"""Memento generator for Trial.
Sets RUNNING trials to PENDING, and flushes the result logger.
Note this can only occur if the trial holds a PERSISTENT checkpoint.
"""
assert self.checkpoint.storage == Checkpoint.PERSISTENT, (
"Checkpoint must not be in-memory.")
state = self.__dict__.copy()
state["resources"] = resources_to_json(self.resources)
for key in self._nonjson_fields:
state[key] = binary_to_hex(cloudpickle.dumps(state.get(key)))
state["runner"] = None
state["result_logger"] = None
if self.result_logger:
self.result_logger.flush(sync_down=False)
state["__logger_started__"] = True
else:
state["__logger_started__"] = False
return copy.deepcopy(state)
def __setstate__(self, state):
logger_started = state.pop("__logger_started__")
state["resources"] = json_to_resources(state["resources"])
if state["status"] == Trial.RUNNING:
state["status"] = Trial.PENDING
for key in self._nonjson_fields:
state[key] = cloudpickle.loads(hex_to_binary(state[key]))
self.__dict__.update(state)
validate_trainable(self.trainable_name)
if logger_started:
self.init_logger()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/trial_executor.py
|
Python
|
# coding: utf-8
import logging
from ray.tune.trial import Trial, Checkpoint
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
class TrialExecutor:
"""Manages platform-specific details such as resource handling
and starting/stopping trials.
"""
def __init__(self, queue_trials=False):
"""Initializes a new TrialExecutor.
Args:
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
"""
self._queue_trials = queue_trials
self._cached_trial_state = {}
def set_status(self, trial, status):
"""Sets status and checkpoints metadata if needed.
Only checkpoints metadata if trial status is a terminal condition.
PENDING, PAUSED, and RUNNING switches have checkpoints taken care of
in the TrialRunner.
Args:
trial (Trial): Trial to checkpoint.
status (Trial.status): Status to set trial to.
"""
if trial.status == status:
logger.debug("Trial %s: Status %s unchanged.", trial, trial.status)
else:
logger.debug("Trial %s: Changing status from %s to %s.", trial,
trial.status, status)
trial.set_status(status)
if status in [Trial.TERMINATED, Trial.ERROR]:
self.try_checkpoint_metadata(trial)
def try_checkpoint_metadata(self, trial):
"""Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
"""
if trial.checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Trial %s: Not saving data for memory checkpoint.",
trial)
return
try:
logger.debug("Trial %s: Saving trial metadata.", trial)
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception("Trial %s: Error checkpointing trial metadata.",
trial)
def get_checkpoints(self):
"""Returns a copy of mapping of the trial ID to pickled metadata."""
return self._cached_trial_state.copy()
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources."""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"has_resources() method")
def start_trial(self, trial, checkpoint=None):
"""Starts the trial restoring from checkpoint if checkpoint is provided.
Args:
trial (Trial): Trial to be started.
checkpoint(Checkpoint): A Python object or path storing the state
of trial.
"""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"start_trial() method")
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Stops the trial.
Stops this trial, releasing all allocating resources.
If stopping the trial fails, the run will be marked as terminated
in error, but no exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"stop_trial() method")
def continue_training(self, trial):
"""Continues the training of this trial."""
pass
def pause_trial(self, trial):
"""Pauses the trial.
We want to release resources (specifically GPUs) when pausing an
experiment. This results in PAUSED state that similar to TERMINATED.
"""
assert trial.status == Trial.RUNNING, trial.status
try:
self.save(trial, Checkpoint.MEMORY)
self.stop_trial(trial, stop_logger=False)
self.set_status(trial, Trial.PAUSED)
except Exception:
logger.exception("Error pausing runner.")
self.set_status(trial, Trial.ERROR)
def unpause_trial(self, trial):
"""Sets PAUSED trial to pending to allow scheduler to start."""
assert trial.status == Trial.PAUSED, trial.status
self.set_status(trial, Trial.PENDING)
def resume_trial(self, trial):
"""Resumes PAUSED trials. This is a blocking call."""
assert trial.status == Trial.PAUSED, trial.status
self.start_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
raise NotImplementedError
def get_running_trials(self):
"""Returns all running trials."""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"get_running_trials() method")
def on_step_begin(self, trial_runner):
"""A hook called before running one step of the trial event loop."""
pass
def on_step_end(self, trial_runner):
"""A hook called after running one step of the trial event loop."""
pass
def on_no_available_trials(self, trial_runner):
if self._queue_trials:
return
for trial in trial_runner.get_trials():
if trial.status == Trial.PENDING:
if not self.has_resources(trial.resources):
raise TuneError(
("Insufficient cluster resources to launch trial: "
"trial requested {} but the cluster has only {}. "
"Pass `queue_trials=True` in "
"ray.tune.run() or on the command "
"line to queue trials until the cluster scales "
"up or resources become available. {}").format(
trial.resources.summary_string(),
self.resource_string(),
trial.get_trainable_cls().resource_help(
trial.config)))
elif trial.status == Trial.PAUSED:
raise TuneError("There are paused trials, but no more pending "
"trials with sufficient resources.")
def get_next_available_trial(self):
"""Blocking call that waits until one result is ready.
Returns:
Trial object that is ready for intermediate processing.
"""
raise NotImplementedError
def get_next_failed_trial(self):
"""Non-blocking call that detects and returns one failed trial.
Returns:
A Trial object that is ready for failure processing. None if
no failure detected.
"""
raise NotImplementedError
def fetch_result(self, trial):
"""Fetches one result for the trial.
Assumes the trial is running.
Return:
Result object for the trial.
"""
raise NotImplementedError
def debug_string(self):
"""Returns a human readable message for printing to the console."""
raise NotImplementedError
def resource_string(self):
"""Returns a string describing the total resources available."""
raise NotImplementedError
def restore(self, trial, checkpoint=None):
"""Restores training state from a checkpoint.
If checkpoint is None, try to restore from trial.checkpoint.
If restoring fails, the trial status will be set to ERROR.
Args:
trial (Trial): Trial to be restored.
checkpoint (Checkpoint): Checkpoint to restore from.
Return:
False if error occurred, otherwise return True.
"""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"restore() method")
def save(self, trial, storage=Checkpoint.PERSISTENT, result=None):
"""Saves training state of this trial to a checkpoint.
If result is None, this trial's last result will be used.
Args:
trial (Trial): The state of this trial to be saved.
storage (str): Where to store the checkpoint. Defaults to
PERSISTENT.
result (dict): The state of this trial as a dictionary to be saved.
Return:
A Python object if storage==Checkpoint.MEMORY otherwise
a path to the checkpoint.
"""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"save() method")
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Args:
trial (Trial): The state of this trial to be saved.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
raise NotImplementedError("Subclasses of TrialExecutor must provide "
"export_trial_if_needed() method")
def has_gpus(self):
"""Returns True if GPUs are detected on the cluster."""
return None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/trial_runner.py
|
Python
|
import click
from datetime import datetime
import json
import logging
import os
import time
import traceback
import types
import ray.cloudpickle as cloudpickle
from ray.tune import TuneError
from ray.tune.progress_reporter import trial_progress_str
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.result import (TIME_THIS_ITER_S, RESULT_DUPLICATE,
SHOULD_CHECKPOINT)
from ray.tune.syncer import get_cloud_syncer
from ray.tune.trial import Checkpoint, Trial
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.utils import warn_if_slow, flatten_dict
from ray.tune.web_server import TuneServer
from ray.utils import binary_to_hex, hex_to_binary
MAX_DEBUG_TRIALS = 20
logger = logging.getLogger(__name__)
def _find_newest_ckpt(ckpt_dir):
"""Returns path to most recently modified checkpoint."""
full_paths = [
os.path.join(ckpt_dir, fname) for fname in os.listdir(ckpt_dir)
if fname.startswith("experiment_state") and fname.endswith(".json")
]
return max(full_paths)
class _TuneFunctionEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, types.FunctionType):
return self._to_cloudpickle(obj)
try:
return super(_TuneFunctionEncoder, self).default(obj)
except Exception:
logger.debug("Unable to encode. Falling back to cloudpickle.")
return self._to_cloudpickle(obj)
def _to_cloudpickle(self, obj):
return {
"_type": "CLOUDPICKLE_FALLBACK",
"value": binary_to_hex(cloudpickle.dumps(obj))
}
class _TuneFunctionDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(
self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if obj.get("_type") == "CLOUDPICKLE_FALLBACK":
return self._from_cloudpickle(obj)
return obj
def _from_cloudpickle(self, obj):
return cloudpickle.loads(hex_to_binary(obj["value"]))
class TrialRunner:
"""A TrialRunner implements the event loop for scheduling trials on Ray.
Example:
runner = TrialRunner()
runner.add_trial(Trial(...))
runner.add_trial(Trial(...))
while not runner.is_finished():
runner.step()
print(runner.debug_string())
The main job of TrialRunner is scheduling trials to efficiently use cluster
resources, without overloading the cluster.
While Ray itself provides resource management for tasks and actors, this is
not sufficient when scheduling trials that may instantiate multiple actors.
This is because if insufficient resources are available, concurrent trials
could deadlock waiting for new resources to become available. Furthermore,
oversubscribing the cluster could degrade training performance, leading to
misleading benchmark results.
"""
CKPT_FILE_TMPL = "experiment_state-{}.json"
VALID_RESUME_TYPES = [True, "LOCAL", "REMOTE", "PROMPT"]
def __init__(self,
search_alg=None,
scheduler=None,
launch_web_server=False,
local_checkpoint_dir=None,
remote_checkpoint_dir=None,
sync_to_cloud=None,
resume=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=True,
checkpoint_period=10,
trial_executor=None):
"""Initializes a new TrialRunner.
Args:
search_alg (SearchAlgorithm): SearchAlgorithm for generating
Trial objects.
scheduler (TrialScheduler): Defaults to FIFOScheduler.
launch_web_server (bool): Flag for starting TuneServer
local_checkpoint_dir (str): Path where
global checkpoints are stored and restored from.
remote_checkpoint_dir (str): Remote path where
global checkpoints are stored and restored from. Used
if `resume` == REMOTE.
resume (str|False): see `tune.py:run`.
sync_to_cloud (func|str): See `tune.py:run`.
server_port (int): Port number for launching TuneServer.
verbose (bool): Flag for verbosity. If False, trial results
will not be output.
trial_executor (TrialExecutor): Defaults to RayTrialExecutor.
"""
self._search_alg = search_alg or BasicVariantGenerator()
self._scheduler_alg = scheduler or FIFOScheduler()
self.trial_executor = trial_executor or RayTrialExecutor()
# For debugging, it may be useful to halt trials after some time has
# elapsed. TODO(ekl) consider exposing this in the API.
self._global_time_limit = float(
os.environ.get("TRIALRUNNER_WALLTIME_LIMIT", float("inf")))
self._total_time = 0
self._iteration = 0
self._verbose = verbose
self._server = None
self._server_port = server_port
if launch_web_server:
self._server = TuneServer(self, self._server_port)
self._trials = []
self._stop_queue = []
self._local_checkpoint_dir = local_checkpoint_dir
if self._local_checkpoint_dir:
os.makedirs(self._local_checkpoint_dir, exist_ok=True)
self._remote_checkpoint_dir = remote_checkpoint_dir
self._syncer = get_cloud_syncer(local_checkpoint_dir,
remote_checkpoint_dir, sync_to_cloud)
self._resumed = False
if self._validate_resume(resume_type=resume):
try:
self.resume()
logger.info("Resuming trial.")
self._resumed = True
except Exception:
logger.exception(
"Runner restore failed. Restarting experiment.")
else:
logger.debug("Starting a new experiment.")
self._start_time = time.time()
self._last_checkpoint_time = -float("inf")
self._checkpoint_period = checkpoint_period
self._session_str = datetime.fromtimestamp(
self._start_time).strftime("%Y-%m-%d_%H-%M-%S")
self.checkpoint_file = None
if self._local_checkpoint_dir:
self.checkpoint_file = os.path.join(
self._local_checkpoint_dir,
TrialRunner.CKPT_FILE_TMPL.format(self._session_str))
@property
def scheduler_alg(self):
return self._scheduler_alg
def _validate_resume(self, resume_type):
"""Checks whether to resume experiment.
Args:
resume_type: One of True, "REMOTE", "LOCAL", "PROMPT".
"""
if not resume_type:
return False
assert resume_type in self.VALID_RESUME_TYPES, (
"resume_type {} is not one of {}".format(resume_type,
self.VALID_RESUME_TYPES))
# Not clear if we need this assertion, since we should always have a
# local checkpoint dir.
assert self._local_checkpoint_dir or self._remote_checkpoint_dir
if resume_type in [True, "LOCAL", "PROMPT"]:
if not self.checkpoint_exists(self._local_checkpoint_dir):
raise ValueError("Called resume when no checkpoint exists "
"in local directory.")
elif resume_type == "PROMPT":
if click.confirm("Resume from local directory?"):
return True
if resume_type in ["REMOTE", "PROMPT"]:
if resume_type == "PROMPT" and not click.confirm(
"Try downloading from remote directory?"):
return False
if not self._remote_checkpoint_dir:
raise ValueError(
"Called resume from remote without remote directory.")
# Try syncing down the upload directory.
logger.info("Downloading from %s", self._remote_checkpoint_dir)
# TODO(ujvl): Note that this syncs down the entire directory,
# which may also contain trial checkpoints. We should selectively
# sync the necessary files instead.
self._syncer.sync_down_if_needed()
if not self.checkpoint_exists(self._local_checkpoint_dir):
raise ValueError("Called resume when no checkpoint exists "
"in remote or local directory.")
return True
@classmethod
def checkpoint_exists(cls, directory):
if not os.path.exists(directory):
return False
return any(
(fname.startswith("experiment_state") and fname.endswith(".json"))
for fname in os.listdir(directory))
def add_experiment(self, experiment):
if not self._resumed:
self._search_alg.add_configurations([experiment])
else:
logger.info("TrialRunner resumed, ignoring new add_experiment.")
def checkpoint(self, force=False):
"""Saves execution state to `self._local_checkpoint_dir`.
Overwrites the current session checkpoint, which starts when self
is instantiated. Throttle depends on self._checkpoint_period.
Args:
force (bool): Forces a checkpoint despite checkpoint_period.
"""
if not self._local_checkpoint_dir:
return
now = time.time()
if now - self._last_checkpoint_time < self._checkpoint_period and (
not force):
return
self._last_checkpoint_time = now
runner_state = {
"checkpoints": list(
self.trial_executor.get_checkpoints().values()),
"runner_data": self.__getstate__(),
"stats": {
"start_time": self._start_time,
"timestamp": self._last_checkpoint_time
}
}
tmp_file_name = os.path.join(self._local_checkpoint_dir,
".tmp_checkpoint")
with open(tmp_file_name, "w") as f:
json.dump(runner_state, f, indent=2, cls=_TuneFunctionEncoder)
os.rename(tmp_file_name, self.checkpoint_file)
if force:
self._syncer.sync_up()
else:
self._syncer.sync_up_if_needed()
return self._local_checkpoint_dir
def resume(self):
"""Resumes all checkpointed trials from previous run.
Requires user to manually re-register their objects. Also stops
all ongoing trials.
"""
newest_ckpt_path = _find_newest_ckpt(self._local_checkpoint_dir)
with open(newest_ckpt_path, "r") as f:
runner_state = json.load(f, cls=_TuneFunctionDecoder)
self.checkpoint_file = newest_ckpt_path
logger.warning("".join([
"Attempting to resume experiment from {}. ".format(
self._local_checkpoint_dir), "This feature is experimental, "
"and may not work with all search algorithms. ",
"This will ignore any new changes to the specification."
]))
self.__setstate__(runner_state["runner_data"])
trials = []
for trial_cp in runner_state["checkpoints"]:
new_trial = Trial(trial_cp["trainable_name"])
new_trial.__setstate__(trial_cp)
trials += [new_trial]
for trial in sorted(
trials, key=lambda t: t.last_update_time, reverse=True):
self.add_trial(trial)
def is_finished(self):
"""Returns whether all trials have finished running."""
if self._total_time > self._global_time_limit:
logger.warning("Exceeded global time limit {} / {}".format(
self._total_time, self._global_time_limit))
return True
trials_done = all(trial.is_finished() for trial in self._trials)
return trials_done and self._search_alg.is_finished()
def step(self):
"""Runs one step of the trial event loop.
Callers should typically run this method repeatedly in a loop. They
may inspect or modify the runner's state in between calls to step().
"""
if self.is_finished():
raise TuneError("Called step when all trials finished?")
with warn_if_slow("on_step_begin"):
self.trial_executor.on_step_begin(self)
next_trial = self._get_next_trial() # blocking
if next_trial is not None:
with warn_if_slow("start_trial"):
self.trial_executor.start_trial(next_trial)
elif self.trial_executor.get_running_trials():
self._process_events() # blocking
else:
self.trial_executor.on_no_available_trials(self)
try:
with warn_if_slow("experiment_checkpoint"):
self.checkpoint()
except Exception:
logger.exception("Trial Runner checkpointing failed.")
self._iteration += 1
if self._server:
with warn_if_slow("server"):
self._process_requests()
if self.is_finished():
self._server.shutdown()
with warn_if_slow("on_step_end"):
self.trial_executor.on_step_end(self)
def get_trial(self, tid):
trial = [t for t in self._trials if t.trial_id == tid]
return trial[0] if trial else None
def get_trials(self):
"""Returns the list of trials managed by this TrialRunner.
Note that the caller usually should not mutate trial state directly.
"""
return self._trials
def add_trial(self, trial):
"""Adds a new trial to this TrialRunner.
Trials may be added at any time.
Args:
trial (Trial): Trial to queue.
"""
trial.set_verbose(self._verbose)
self._trials.append(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial)
self.trial_executor.try_checkpoint_metadata(trial)
def debug_string(self, delim="\n"):
messages = [
self._scheduler_alg.debug_string(),
self.trial_executor.debug_string(),
trial_progress_str(self.get_trials()),
]
return delim.join(messages)
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources."""
return self.trial_executor.has_resources(resources)
def _get_next_trial(self):
"""Replenishes queue.
Blocks if all trials queued have finished, but search algorithm is
still not finished.
"""
trials_done = all(trial.is_finished() for trial in self._trials)
wait_for_trial = trials_done and not self._search_alg.is_finished()
self._update_trial_queue(blocking=wait_for_trial)
with warn_if_slow("choose_trial_to_run"):
trial = self._scheduler_alg.choose_trial_to_run(self)
return trial
def _process_events(self):
failed_trial = self.trial_executor.get_next_failed_trial()
if failed_trial:
error_msg = (
"{} (IP: {}) detected as stale. This is likely because the "
"node was lost").format(failed_trial, failed_trial.node_ip)
logger.info(error_msg)
with warn_if_slow("process_failed_trial"):
self._process_trial_failure(failed_trial, error_msg=error_msg)
else:
# TODO(ujvl): Consider combining get_next_available_trial and
# fetch_result functionality so that we don't timeout on fetch.
trial = self.trial_executor.get_next_available_trial() # blocking
if trial.is_restoring:
with warn_if_slow("process_trial_restore"):
self._process_trial_restore(trial)
else:
with warn_if_slow("process_trial"):
self._process_trial(trial)
def _process_trial(self, trial):
"""Processes a trial result."""
try:
result = self.trial_executor.fetch_result(trial)
is_duplicate = RESULT_DUPLICATE in result
# TrialScheduler and SearchAlgorithm still receive a
# notification because there may be special handling for
# the `on_trial_complete` hook.
if is_duplicate:
logger.debug("Trial finished without logging 'done'.")
result = trial.last_result
result.update(done=True)
self._total_time += result.get(TIME_THIS_ITER_S, 0)
flat_result = flatten_dict(result)
if trial.should_stop(flat_result):
# Hook into scheduler
self._scheduler_alg.on_trial_complete(self, trial, flat_result)
self._search_alg.on_trial_complete(
trial.trial_id, result=flat_result)
decision = TrialScheduler.STOP
else:
with warn_if_slow("scheduler.on_trial_result"):
decision = self._scheduler_alg.on_trial_result(
self, trial, flat_result)
with warn_if_slow("search_alg.on_trial_result"):
self._search_alg.on_trial_result(trial.trial_id,
flat_result)
if decision == TrialScheduler.STOP:
with warn_if_slow("search_alg.on_trial_complete"):
self._search_alg.on_trial_complete(
trial.trial_id,
result=flat_result,
early_terminated=True)
if not is_duplicate:
trial.update_last_result(
result, terminate=(decision == TrialScheduler.STOP))
# Checkpoints to disk. This should be checked even if
# the scheduler decision is STOP or PAUSE. Note that
# PAUSE only checkpoints to memory and does not update
# the global checkpoint state.
self._checkpoint_trial_if_needed(
trial, force=result.get(SHOULD_CHECKPOINT, False))
if decision == TrialScheduler.CONTINUE:
self.trial_executor.continue_training(trial)
elif decision == TrialScheduler.PAUSE:
self.trial_executor.pause_trial(trial)
elif decision == TrialScheduler.STOP:
self.trial_executor.export_trial_if_needed(trial)
self.trial_executor.stop_trial(trial)
else:
assert False, "Invalid scheduling decision: {}".format(
decision)
except Exception:
logger.exception("Trial %s: Error processing event.", trial)
self._process_trial_failure(trial, traceback.format_exc())
def _process_trial_restore(self, trial):
"""Processes a trial restore.
Args:
trial: Trial being restored.
"""
logger.debug("Trial %s: Processing trial restore.", trial)
try:
self.trial_executor.fetch_result(trial)
trial.on_restore()
logger.debug("Trial %s: Restore processed successfully", trial)
self.trial_executor.set_status(trial, Trial.RUNNING)
self.trial_executor.continue_training(trial)
except Exception:
logger.exception("Trial %s: Error processing restore.", trial)
self._process_trial_failure(trial, traceback.format_exc())
def _process_trial_failure(self, trial, error_msg):
"""Handle trial failure.
Attempt trial recovery if possible, clean up state otherwise.
Args:
trial (Trial): Failed trial.
error_msg (str): Error message prior to invoking this method.
"""
if trial.status == Trial.RUNNING:
if trial.should_recover():
self._try_recover(trial, error_msg)
else:
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
self.trial_executor.stop_trial(
trial, error=True, error_msg=error_msg)
def _checkpoint_trial_if_needed(self, trial, force=False):
"""Checkpoints trial based off trial.last_result."""
if trial.should_checkpoint() or force:
# Save trial runtime if possible
if trial.runner:
self.trial_executor.save(trial, storage=Checkpoint.PERSISTENT)
self.trial_executor.try_checkpoint_metadata(trial)
def _try_recover(self, trial, error_msg):
"""Tries to recover trial.
Notifies SearchAlgorithm and Scheduler if failure to recover.
Args:
trial (Trial): Trial to recover.
error_msg (str): Error message from prior to invoking this method.
"""
if trial.is_restoring:
# Restore was unsuccessful, try again without checkpoint.
trial.clear_checkpoint()
self.trial_executor.stop_trial(
trial,
error=error_msg is not None,
error_msg=error_msg,
stop_logger=False)
trial.result_logger.flush()
if self.trial_executor.has_resources(trial.resources):
logger.info(
"Trial %s: Attempting to restore "
"trial state from last checkpoint.", trial)
self.trial_executor.start_trial(trial)
if trial.status == Trial.ERROR:
logger.exception(
"Trial %s: Error restoring trial from checkpoint, abort.",
trial)
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
else:
logger.debug("Trial %s: Restore dispatched correctly.", trial)
else:
logger.debug("Trial %s: Notifying Scheduler and requeueing.",
trial)
self._requeue_trial(trial)
def _requeue_trial(self, trial):
"""Notification to TrialScheduler and requeue trial.
This does not notify the SearchAlgorithm because the function
evaluation is still in progress.
"""
self._scheduler_alg.on_trial_error(self, trial)
self.trial_executor.set_status(trial, Trial.PENDING)
# TODO(rliaw): Right now, this pushes the trial to the end of queue
# because restoration can be expensive. However, this is not
# ideal since it just hides the issue - a better fix would
# be to use an actor table to detect the IP of the Trainable
# and rsync the files there.
# See https://github.com/ray-project/ray/issues/5168
self._trials.pop(self._trials.index(trial))
self._trials.append(trial)
with warn_if_slow("scheduler.on_trial_add"):
self._scheduler_alg.on_trial_add(self, trial)
def _update_trial_queue(self, blocking=False, timeout=600):
"""Adds next trials to queue if possible.
Note that the timeout is currently unexposed to the user.
Args:
blocking (bool): Blocks until either a trial is available
or is_finished (timeout or search algorithm finishes).
timeout (int): Seconds before blocking times out.
"""
trials = self._search_alg.next_trials()
if blocking and not trials:
start = time.time()
# Checking `is_finished` instead of _search_alg.is_finished
# is fine because blocking only occurs if all trials are
# finished and search_algorithm is not yet finished
while (not trials and not self.is_finished()
and time.time() - start < timeout):
logger.info("Blocking for next trial...")
trials = self._search_alg.next_trials()
time.sleep(1)
for trial in trials:
self.add_trial(trial)
def request_stop_trial(self, trial):
self._stop_queue.append(trial)
def _process_requests(self):
while self._stop_queue:
t = self._stop_queue.pop()
self.stop_trial(t)
def stop_trial(self, trial):
"""Stops trial.
Trials may be stopped at any time. If trial is in state PENDING
or PAUSED, calls `on_trial_remove` for scheduler and
`on_trial_complete(..., early_terminated=True) for search_alg.
Otherwise waits for result for the trial and calls
`on_trial_complete` for scheduler and search_alg if RUNNING.
"""
error = False
error_msg = None
if trial.status in [Trial.ERROR, Trial.TERMINATED]:
return
elif trial.status in [Trial.PENDING, Trial.PAUSED]:
self._scheduler_alg.on_trial_remove(self, trial)
self._search_alg.on_trial_complete(
trial.trial_id, early_terminated=True)
elif trial.status is Trial.RUNNING:
try:
result = self.trial_executor.fetch_result(trial)
trial.update_last_result(result, terminate=True)
self._scheduler_alg.on_trial_complete(self, trial, result)
self._search_alg.on_trial_complete(
trial.trial_id, result=result)
except Exception:
error_msg = traceback.format_exc()
logger.exception("Error processing event.")
self._scheduler_alg.on_trial_error(self, trial)
self._search_alg.on_trial_complete(trial.trial_id, error=True)
error = True
self.trial_executor.stop_trial(trial, error=error, error_msg=error_msg)
def __getstate__(self):
"""Gets state for trial.
Note that this is not used as a pickling override as
does not have all fields.
"""
state = self.__dict__.copy()
for k in [
"_trials",
"_stop_queue",
"_server",
"_search_alg",
"_scheduler_alg",
"trial_executor",
"_syncer",
]:
del state[k]
state["launch_web_server"] = bool(self._server)
return state
def __setstate__(self, state):
launch_web_server = state.pop("launch_web_server")
# Use session_str from previous checkpoint if does not exist
session_str = state.pop("_session_str")
self.__dict__.setdefault("_session_str", session_str)
# Use start_time from previous checkpoint if does not exist
start_time = state.pop("_start_time")
self.__dict__.setdefault("_start_time", start_time)
self.__dict__.update(state)
if launch_web_server:
self._server = TuneServer(self, self._server_port)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/tune.py
|
Python
|
import logging
import time
import six
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list, Experiment
from ray.tune.analysis import ExperimentAnalysis
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial, DEBUG_PRINT_INTERVAL
from ray.tune.trainable import Trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import get_trainable_cls
from ray.tune.syncer import wait_for_sync
from ray.tune.trial_runner import TrialRunner
from ray.tune.progress_reporter import CLIReporter, JupyterNotebookReporter
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
FIFOScheduler, MedianStoppingRule)
from ray.tune.web_server import TuneServer
logger = logging.getLogger(__name__)
_SCHEDULERS = {
"FIFO": FIFOScheduler,
"MedianStopping": MedianStoppingRule,
"HyperBand": HyperBandScheduler,
"AsyncHyperBand": AsyncHyperBandScheduler,
}
try:
class_name = get_ipython().__class__.__name__
IS_NOTEBOOK = True if "Terminal" not in class_name else False
except NameError:
IS_NOTEBOOK = False
def _make_scheduler(args):
if args.scheduler in _SCHEDULERS:
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError("Unknown scheduler: {}, should be one of {}".format(
args.scheduler, _SCHEDULERS.keys()))
def _check_default_resources_override(run_identifier):
if not isinstance(run_identifier, six.string_types):
# If obscure dtype, assume it is overriden.
return True
trainable_cls = get_trainable_cls(run_identifier)
return hasattr(trainable_cls, "default_resource_request") and (
trainable_cls.default_resource_request.__code__ !=
Trainable.default_resource_request.__code__)
def run(run_or_experiment,
name=None,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_to_cloud=None,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
global_checkpoint_period=10,
export_formats=None,
max_failures=0,
restore=None,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
return_trials=False,
ray_auto_init=True,
sync_function=None):
"""Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec.
name (str): Name of experiment.
stop (dict|func): The stopping criteria. If dict, the keys may be
any field in the return result of 'train()', whichever is
reached first. If function, it must take (trial_id, result) as
arguments and return a boolean (True if trial should be stopped,
False otherwise).
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict): Machine resources to allocate per trial,
e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
assigned unless you specify them here. Defaults to 1 CPU and 0
GPUs in ``Trainable.default_resource_request()``.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
upload_dir (str): Optional URI to sync training results and checkpoints
to (e.g. ``s3://bucket`` or ``gs://bucket``).
trial_name_creator (func): Optional function for generating
the trial string representation.
loggers (list): List of logger creators to be used with
each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
See `ray/tune/logger.py`.
sync_to_cloud (func|str): Function for syncing the local_dir to and
from upload_dir. If string, then it must be a string template that
includes `{source}` and `{target}` for the syncer to run. If not
provided, the sync command defaults to standard S3 or gsutil sync
commands.
sync_to_driver (func|str|bool): Function for syncing trial logdir from
remote node to local. If string, then it must be a string template
that includes `{source}` and `{target}` for the syncer to run.
If True or not provided, it defaults to using rsync. If False,
syncing to driver is disabled.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
sync_on_checkpoint (bool): Force sync-down of trial checkpoint to
driver. If set to False, checkpoint syncing from worker to driver
is asynchronous and best-effort. This does not affect persistent
storage syncing. Defaults to True.
keep_checkpoints_num (int): Number of checkpoints to keep. A value of
`None` keeps all checkpoints. Defaults to `None`. If set, need
to provide `checkpoint_score_attr`.
checkpoint_score_attr (str): Specifies by which attribute to rank the
best checkpoint. Default is increasing order. If attribute starts
with `min-` it will rank attribute in decreasing order, i.e.
`min-validation_loss`.
global_checkpoint_period (int): Seconds between global checkpointing.
This does not affect `checkpoint_freq`, which specifies frequency
for individual trials.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial at least this many times.
Ray will recover from the latest checkpoint if present.
Setting to -1 will lead to infinite recovery retries.
Setting to 0 will disable retries. Defaults to 3.
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, HyperBand and PopulationBasedTraining. Refer to
ray.tune.schedulers for more options.
with_server (bool): Starts a background Tune server. Needed for
using the Client API.
server_port (int): Port number for launching TuneServer.
verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
1 = only status updates, 2 = status and trial results.
resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", or bool.
LOCAL/True restores the checkpoint from the local_checkpoint_dir.
REMOTE restores the checkpoint from remote_checkpoint_dir.
PROMPT provides CLI feedback. False forces a new
experiment. If resume is set but checkpoint does not exist,
ValueError will be thrown.
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
ray_auto_init (bool): Automatically starts a local Ray cluster
if using a RayTrialExecutor (which is the default) and
if Ray is not initialized. Defaults to True.
sync_function: Deprecated. See `sync_to_cloud` and
`sync_to_driver`.
Returns:
List of Trial objects.
Raises:
TuneError if any trials failed and `raise_on_failed_trial` is True.
Examples:
>>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
>>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
>>> tune.run(
>>> "PG",
>>> num_samples=5,
>>> config={
>>> "env": "CartPole-v0",
>>> "lr": tune.sample_from(lambda _: np.random.rand())
>>> }
>>> )
"""
trial_executor = trial_executor or RayTrialExecutor(
queue_trials=queue_trials,
reuse_actors=reuse_actors,
ray_auto_init=ray_auto_init)
if isinstance(run_or_experiment, list):
experiments = run_or_experiment
else:
experiments = [run_or_experiment]
if len(experiments) > 1:
logger.info(
"Running multiple concurrent experiments is experimental and may "
"not work with certain features.")
for i, exp in enumerate(experiments):
if not isinstance(exp, Experiment):
run_identifier = Experiment.register_if_needed(exp)
experiments[i] = Experiment(
name=name,
run=run_identifier,
stop=stop,
config=config,
resources_per_trial=resources_per_trial,
num_samples=num_samples,
local_dir=local_dir,
upload_dir=upload_dir,
sync_to_driver=sync_to_driver,
trial_name_creator=trial_name_creator,
loggers=loggers,
checkpoint_freq=checkpoint_freq,
checkpoint_at_end=checkpoint_at_end,
sync_on_checkpoint=sync_on_checkpoint,
keep_checkpoints_num=keep_checkpoints_num,
checkpoint_score_attr=checkpoint_score_attr,
export_formats=export_formats,
max_failures=max_failures,
restore=restore,
sync_function=sync_function)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
if sync_to_cloud:
for exp in experiments:
assert exp.remote_checkpoint_dir, (
"Need `upload_dir` if `sync_to_cloud` given.")
runner = TrialRunner(
search_alg=search_alg or BasicVariantGenerator(),
scheduler=scheduler or FIFOScheduler(),
local_checkpoint_dir=experiments[0].checkpoint_dir,
remote_checkpoint_dir=experiments[0].remote_checkpoint_dir,
sync_to_cloud=sync_to_cloud,
checkpoint_period=global_checkpoint_period,
resume=resume,
launch_web_server=with_server,
server_port=server_port,
verbose=bool(verbose > 1),
trial_executor=trial_executor)
for exp in experiments:
runner.add_experiment(exp)
if IS_NOTEBOOK:
reporter = JupyterNotebookReporter(overwrite=verbose < 2)
else:
reporter = CLIReporter()
# User Warning for GPUs
if trial_executor.has_gpus():
if isinstance(resources_per_trial,
dict) and "gpu" in resources_per_trial:
# "gpu" is manually set.
pass
elif _check_default_resources_override(experiments[0].run_identifier):
# "default_resources" is manually overriden.
pass
else:
logger.warning("Tune detects GPUs, but no trials are using GPUs. "
"To enable trials to use GPUs, set "
"tune.run(resources_per_trial={'gpu': 1}...) "
"which allows Tune to expose 1 GPU to each trial. "
"You can also override "
"`Trainable.default_resource_request` if using the "
"Trainable API.")
last_debug = 0
while not runner.is_finished():
runner.step()
if time.time() - last_debug > DEBUG_PRINT_INTERVAL:
if verbose:
reporter.report(runner)
last_debug = time.time()
try:
runner.checkpoint(force=True)
except Exception:
logger.exception("Trial Runner checkpointing failed.")
if verbose:
reporter.report(runner)
wait_for_sync()
errored_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError("Trials did not complete", errored_trials)
else:
logger.error("Trials did not complete: %s", errored_trials)
trials = runner.get_trials()
if return_trials:
return trials
logger.info("Returning an analysis object by default. You can call "
"`analysis.trials` to retrieve a list of trials. "
"This message will be removed in future versions of Tune.")
return ExperimentAnalysis(runner.checkpoint_file, trials=trials)
def run_experiments(experiments,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
concurrent=False):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_experiments(
>>> experiments=experiment_spec,
>>> scheduler=MedianStoppingRule(...))
>>> run_experiments(
>>> experiments=experiment_spec,
>>> search_alg=SearchAlgorithm(),
>>> scheduler=MedianStoppingRule(...))
Returns:
List of Trial objects, holding data for each executed trial.
"""
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
if concurrent:
return run(
experiments,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
return_trials=True)
else:
trials = []
for exp in experiments:
trials += run(
exp,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
return_trials=True)
return trials
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/utils/__init__.py
|
Python
|
from ray.tune.utils.util import (deep_update, flatten_dict, get_pinned_object,
merge_dicts, pin_in_object_store, UtilMonitor,
validate_save_restore, warn_if_slow)
__all__ = [
"deep_update", "flatten_dict", "get_pinned_object", "merge_dicts",
"pin_in_object_store", "UtilMonitor", "validate_save_restore",
"warn_if_slow"
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/utils/mock.py
|
Python
|
import os
from ray.rllib.agents.mock import _MockTrainer
from ray.tune import DurableTrainable
from ray.tune.sync_client import get_sync_client
from ray.tune.syncer import NodeSyncer
MOCK_REMOTE_DIR = "/tmp/mock-tune-remote/"
# Sync and delete templates that operate on local directories.
LOCAL_SYNC_TEMPLATE = "mkdir -p {target} && rsync -avz {source}/ {target}/"
LOCAL_DELETE_TEMPLATE = "rm -rf {target}"
def mock_storage_client():
"""Mocks storage client that treats a local dir as durable storage."""
return get_sync_client(LOCAL_SYNC_TEMPLATE, LOCAL_DELETE_TEMPLATE)
class MockNodeSyncer(NodeSyncer):
"""Mock NodeSyncer that syncs to and from /tmp"""
def has_remote_target(self):
return True
@property
def _remote_path(self):
if self._remote_dir.startswith("/"):
self._remote_dir = self._remote_dir[1:]
return os.path.join(MOCK_REMOTE_DIR, self._remote_dir)
class MockRemoteTrainer(_MockTrainer):
"""Mock Trainable that saves at tmp for simulated clusters."""
def __init__(self, *args, **kwargs):
super(MockRemoteTrainer, self).__init__(*args, **kwargs)
if self._logdir.startswith("/"):
self._logdir = self._logdir[1:]
self._logdir = os.path.join(MOCK_REMOTE_DIR, self._logdir)
if not os.path.exists(self._logdir):
os.makedirs(self._logdir)
class MockDurableTrainer(DurableTrainable, _MockTrainer):
"""Mock DurableTrainable that saves at tmp for simulated clusters."""
# TODO(ujvl): This class uses multiple inheritance; it should be cleaned
# up once the durable training API converges.
def __init__(self, remote_checkpoint_dir, *args, **kwargs):
_MockTrainer.__init__(self, *args, **kwargs)
DurableTrainable.__init__(self, remote_checkpoint_dir, *args, **kwargs)
def _create_storage_client(self):
return mock_storage_client()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/utils/util.py
|
Python
|
import copy
import logging
import threading
import time
from collections import defaultdict
from threading import Thread
import numpy as np
import ray
logger = logging.getLogger(__name__)
try:
import psutil
except ImportError:
psutil = None
try:
import GPUtil
except ImportError:
GPUtil = None
_pinned_objects = []
PINNED_OBJECT_PREFIX = "ray.tune.PinnedObject:"
START_OF_TIME = time.time()
class UtilMonitor(Thread):
"""Class for system usage utilization monitoring.
It keeps track of CPU, RAM, GPU, VRAM usage (each gpu separately) by
pinging for information every x seconds in a separate thread.
Requires psutil and GPUtil to be installed. Can be enabled with
tune.run(config={"log_sys_usage": True}).
"""
def __init__(self, start=True, delay=0.7):
self.stopped = True
if GPUtil is None and start:
logger.warning("Install gputil for GPU system monitoring.")
if psutil is None and start:
logger.warning("Install psutil to monitor system performance.")
if GPUtil is None and psutil is None:
return
super(UtilMonitor, self).__init__()
self.delay = delay # Time between calls to GPUtil
self.values = defaultdict(list)
self.lock = threading.Lock()
self.daemon = True
if start:
self.start()
def _read_utilization(self):
with self.lock:
if psutil is not None:
self.values["cpu_util_percent"].append(
float(psutil.cpu_percent(interval=None)))
self.values["ram_util_percent"].append(
float(getattr(psutil.virtual_memory(), "percent")))
if GPUtil is not None:
for gpu in GPUtil.getGPUs():
self.values["gpu_util_percent" + str(gpu.id)].append(
float(gpu.load))
self.values["vram_util_percent" + str(gpu.id)].append(
float(gpu.memoryUtil))
def get_data(self):
if self.stopped:
return {}
with self.lock:
ret_values = copy.deepcopy(self.values)
for key, val in self.values.items():
del val[:]
return {
"perf": {
k: np.mean(v)
for k, v in ret_values.items() if len(v) > 0
}
}
def run(self):
self.stopped = False
while not self.stopped:
self._read_utilization()
time.sleep(self.delay)
def stop(self):
self.stopped = True
def pin_in_object_store(obj):
"""Deprecated, use ray.put(value, weakref=False) instead."""
obj_id = ray.put(obj, weakref=False)
_pinned_objects.append(obj_id)
return obj_id
def get_pinned_object(pinned_id):
"""Deprecated."""
return ray.get(pinned_id)
class warn_if_slow:
"""Prints a warning if a given operation is slower than 100ms.
Example:
>>> with warn_if_slow("some_operation"):
... ray.get(something)
"""
DEFAULT_THRESHOLD = 0.5
def __init__(self, name, threshold=None):
self.name = name
self.threshold = threshold or self.DEFAULT_THRESHOLD
self.too_slow = False
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, type, value, traceback):
now = time.time()
if now - self.start > self.threshold and now - START_OF_TIME > 60.0:
self.too_slow = True
logger.warning(
"The `%s` operation took %s seconds to complete, "
"which may be a performance bottleneck.", self.name,
now - self.start)
def merge_dicts(d1, d2):
"""Returns a new dict that is d1 and d2 deep merged."""
merged = copy.deepcopy(d1)
deep_update(merged, d2, True, [])
return merged
def deep_update(original, new_dict, new_keys_allowed, whitelist):
"""Updates original dict with values from new_dict recursively.
If new key is introduced in new_dict, then if new_keys_allowed is not
True, an error will be thrown. Further, for sub-dicts, if the key is
in the whitelist, then new subkeys can be introduced.
Args:
original (dict): Dictionary with default values.
new_dict (dict): Dictionary with values to be updated
new_keys_allowed (bool): Whether new keys are allowed.
whitelist (list): List of keys that correspond to dict values
where new subkeys can be introduced. This is only at
the top level.
"""
for k, value in new_dict.items():
if k not in original:
if not new_keys_allowed:
raise Exception("Unknown config parameter `{}` ".format(k))
if isinstance(original.get(k), dict):
if k in whitelist:
deep_update(original[k], value, True, [])
else:
deep_update(original[k], value, new_keys_allowed, [])
else:
original[k] = value
return original
def flatten_dict(dt, delimiter="/"):
dt = copy.deepcopy(dt)
while any(isinstance(v, dict) for v in dt.values()):
remove = []
add = {}
for key, value in dt.items():
if isinstance(value, dict):
for subkey, v in value.items():
add[delimiter.join([key, subkey])] = v
remove.append(key)
dt.update(add)
for k in remove:
del dt[k]
return dt
def _to_pinnable(obj):
"""Converts obj to a form that can be pinned in object store memory.
Currently only numpy arrays are pinned in memory, if you have a strong
reference to the array value.
"""
return (obj, np.zeros(1))
def _from_pinnable(obj):
"""Retrieve from _to_pinnable format."""
return obj[0]
def validate_save_restore(trainable_cls,
config=None,
num_gpus=0,
use_object_store=False):
"""Helper method to check if your Trainable class will resume correctly.
Args:
trainable_cls: Trainable class for evaluation.
config (dict): Config to pass to Trainable when testing.
num_gpus (int): GPU resources to allocate when testing.
use_object_store (bool): Whether to save and restore to Ray's object
store. Recommended to set this to True if planning to use
algorithms that pause training (i.e., PBT, HyperBand).
"""
assert ray.is_initialized(), "Need Ray to be initialized."
remote_cls = ray.remote(num_gpus=num_gpus)(trainable_cls)
trainable_1 = remote_cls.remote(config=config)
trainable_2 = remote_cls.remote(config=config)
from ray.tune.result import TRAINING_ITERATION
for _ in range(3):
res = ray.get(trainable_1.train.remote())
assert res.get(TRAINING_ITERATION), (
"Validation will not pass because it requires `training_iteration` "
"to be returned.")
if use_object_store:
restore_check = trainable_2.restore_from_object.remote(
trainable_1.save_to_object.remote())
ray.get(restore_check)
else:
restore_check = ray.get(
trainable_2.restore.remote(trainable_1.save.remote()))
res = ray.get(trainable_2.train.remote())
assert res[TRAINING_ITERATION] == 4
res = ray.get(trainable_2.train.remote())
assert res[TRAINING_ITERATION] == 5
return True
if __name__ == "__main__":
ray.init()
X = pin_in_object_store("hello")
print(X)
result = get_pinned_object(X)
print(result)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/utils/visual_utils.py
|
Python
|
import pandas as pd
from pandas.api.types import is_string_dtype, is_numeric_dtype
import logging
import os
import os.path as osp
import numpy as np
import json
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
logger.warning("This module will be deprecated in a future version of Tune.")
def _parse_results(res_path):
res_dict = {}
try:
with open(res_path) as f:
# Get last line in file
for line in f:
pass
res_dict = flatten_dict(json.loads(line.strip()))
except Exception:
logger.exception("Importing %s failed...Perhaps empty?" % res_path)
return res_dict
def _parse_configs(cfg_path):
try:
with open(cfg_path) as f:
cfg_dict = flatten_dict(json.load(f))
except Exception:
logger.exception("Config parsing failed.")
return cfg_dict
def _resolve(directory, result_fname):
try:
resultp = osp.join(directory, result_fname)
res_dict = _parse_results(resultp)
cfgp = osp.join(directory, "params.json")
cfg_dict = _parse_configs(cfgp)
cfg_dict.update(res_dict)
return cfg_dict
except Exception:
return None
def load_results_to_df(directory, result_name="result.json"):
exp_directories = [
dirpath for dirpath, dirs, files in os.walk(directory) for f in files
if f == result_name
]
data = [_resolve(d, result_name) for d in exp_directories]
data = [d for d in data if d]
return pd.DataFrame(data)
def generate_plotly_dim_dict(df, field):
dim_dict = {}
dim_dict["label"] = field
column = df[field]
if is_numeric_dtype(column):
dim_dict["values"] = column
elif is_string_dtype(column):
texts = column.unique()
dim_dict["values"] = [
np.argwhere(texts == x).flatten()[0] for x in column
]
dim_dict["tickvals"] = list(range(len(texts)))
dim_dict["ticktext"] = texts
else:
raise Exception("Unidentifiable Type")
return dim_dict
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/web_server.py
|
Python
|
import json
import logging
import threading
from urllib.parse import urljoin, urlparse
from http.server import SimpleHTTPRequestHandler, HTTPServer
import ray.cloudpickle as cloudpickle
from ray.tune import TuneError
from ray.tune.suggest import BasicVariantGenerator
from ray.utils import binary_to_hex, hex_to_binary
logger = logging.getLogger(__name__)
try:
import requests # `requests` is not part of stdlib.
except ImportError:
requests = None
logger.exception("Couldn't import `requests` library. "
"Be sure to install it on the client side.")
class TuneClient:
"""Client to interact with an ongoing Tune experiment.
Requires a TuneServer to have started running.
Attributes:
tune_address (str): Address of running TuneServer
port_forward (int): Port number of running TuneServer
"""
def __init__(self, tune_address, port_forward):
self._tune_address = tune_address
self._port_forward = port_forward
self._path = "http://{}:{}".format(tune_address, port_forward)
def get_all_trials(self):
"""Returns a list of all trials' information."""
response = requests.get(urljoin(self._path, "trials"))
return self._deserialize(response)
def get_trial(self, trial_id):
"""Returns trial information by trial_id."""
response = requests.get(
urljoin(self._path, "trials/{}".format(trial_id)))
return self._deserialize(response)
def add_trial(self, name, specification):
"""Adds a trial by name and specification (dict)."""
payload = {"name": name, "spec": specification}
response = requests.post(urljoin(self._path, "trials"), json=payload)
return self._deserialize(response)
def stop_trial(self, trial_id):
"""Requests to stop trial by trial_id."""
response = requests.put(
urljoin(self._path, "trials/{}".format(trial_id)))
return self._deserialize(response)
@property
def server_address(self):
return self._tune_address
@property
def server_port(self):
return self._port_forward
def _load_trial_info(self, trial_info):
trial_info["config"] = cloudpickle.loads(
hex_to_binary(trial_info["config"]))
trial_info["result"] = cloudpickle.loads(
hex_to_binary(trial_info["result"]))
def _deserialize(self, response):
parsed = response.json()
if "trial" in parsed:
self._load_trial_info(parsed["trial"])
elif "trials" in parsed:
for trial_info in parsed["trials"]:
self._load_trial_info(trial_info)
return parsed
def RunnerHandler(runner):
class Handler(SimpleHTTPRequestHandler):
"""A Handler is a custom handler for TuneServer.
Handles all requests and responses coming into and from
the TuneServer.
"""
def _do_header(self, response_code=200, headers=None):
"""Sends the header portion of the HTTP response.
Parameters:
response_code (int): Standard HTTP response code
headers (list[tuples]): Standard HTTP response headers
"""
if headers is None:
headers = [("Content-type", "application/json")]
self.send_response(response_code)
for key, value in headers:
self.send_header(key, value)
self.end_headers()
def do_HEAD(self):
"""HTTP HEAD handler method."""
self._do_header()
def do_GET(self):
"""HTTP GET handler method."""
response_code = 200
message = ""
try:
result = self._get_trial_by_url(self.path)
resource = {}
if result:
if isinstance(result, list):
infos = [self._trial_info(t) for t in result]
resource["trials"] = infos
else:
resource["trial"] = self._trial_info(result)
message = json.dumps(resource)
except TuneError as e:
response_code = 404
message = str(e)
self._do_header(response_code=response_code)
self.wfile.write(message.encode())
def do_PUT(self):
"""HTTP PUT handler method."""
response_code = 200
message = ""
try:
result = self._get_trial_by_url(self.path)
resource = {}
if result:
if isinstance(result, list):
infos = [self._trial_info(t) for t in result]
resource["trials"] = infos
for t in result:
runner.request_stop_trial(t)
else:
resource["trial"] = self._trial_info(result)
runner.request_stop_trial(result)
message = json.dumps(resource)
except TuneError as e:
response_code = 404
message = str(e)
self._do_header(response_code=response_code)
self.wfile.write(message.encode())
def do_POST(self):
"""HTTP POST handler method."""
response_code = 201
content_len = int(self.headers.get("Content-Length"), 0)
raw_body = self.rfile.read(content_len)
parsed_input = json.loads(raw_body.decode())
resource = self._add_trials(parsed_input["name"],
parsed_input["spec"])
headers = [("Content-type", "application/json"), ("Location",
"/trials/")]
self._do_header(response_code=response_code, headers=headers)
self.wfile.write(json.dumps(resource).encode())
def _trial_info(self, trial):
"""Returns trial information as JSON."""
if trial.last_result:
result = trial.last_result.copy()
else:
result = None
info_dict = {
"id": trial.trial_id,
"trainable_name": trial.trainable_name,
"config": binary_to_hex(cloudpickle.dumps(trial.config)),
"status": trial.status,
"result": binary_to_hex(cloudpickle.dumps(result))
}
return info_dict
def _get_trial_by_url(self, url):
"""Parses url to get either all trials or trial by trial_id."""
parts = urlparse(url)
path = parts.path
if path == "/trials":
return list(runner.get_trials())
else:
trial_id = path.split("/")[-1]
return runner.get_trial(trial_id)
def _add_trials(self, name, spec):
"""Add trial by invoking TrialRunner."""
resource = {}
resource["trials"] = []
trial_generator = BasicVariantGenerator()
trial_generator.add_configurations({name: spec})
for trial in trial_generator.next_trials():
runner.add_trial(trial)
resource["trials"].append(self._trial_info(trial))
return resource
return Handler
class TuneServer(threading.Thread):
"""A TuneServer is a thread that initializes and runs a HTTPServer.
The server handles requests from a TuneClient.
Attributes:
runner (TrialRunner): Runner that modifies and accesses trials.
port_forward (int): Port number of TuneServer.
"""
DEFAULT_PORT = 4321
def __init__(self, runner, port=None):
"""Initialize HTTPServer and serve forever by invoking self.run()"""
threading.Thread.__init__(self)
self._port = port if port else self.DEFAULT_PORT
address = ("localhost", self._port)
logger.info("Starting Tune Server...")
self._server = HTTPServer(address, RunnerHandler(runner))
self.daemon = True
self.start()
def run(self):
self._server.serve_forever()
def shutdown(self):
"""Shutdown the underlying server."""
self._server.shutdown()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/utils.py
|
Python
|
import binascii
import errno
import hashlib
import inspect
import logging
import numpy as np
import os
import six
import subprocess
import sys
import threading
import time
import uuid
import ray.gcs_utils
import ray.ray_constants as ray_constants
def _random_string():
id_hash = hashlib.sha1()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest()
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
def format_error_message(exception_message, task_exception=False):
"""Improve the formatting of an exception thrown by a remote function.
This method takes a traceback from an exception and makes it nicer by
removing a few uninformative lines and adding some space to indent the
remaining lines nicely.
Args:
exception_message (str): A message generated by traceback.format_exc().
Returns:
A string of the formatted exception message.
"""
lines = exception_message.split("\n")
if task_exception:
# For errors that occur inside of tasks, remove lines 1 and 2 which are
# always the same, they just contain information about the worker code.
lines = lines[0:1] + lines[3:]
pass
return "\n".join(lines)
def push_error_to_driver(worker, error_type, message, job_id=None):
"""Push an error message to the driver to be printed in the background.
Args:
worker: The worker to use.
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
worker.raylet_client.push_error(job_id, error_type, message, time.time())
def push_error_to_driver_through_redis(redis_client,
error_type,
message,
job_id=None):
"""Push an error message to the driver to be printed in the background.
Normally the push_error_to_driver function should be used. However, in some
instances, the raylet client is not available, e.g., because the
error happens in Python before the driver or worker has connected to the
backend processes.
Args:
redis_client: The redis client to use.
error_type (str): The type of the error.
message (str): The message that will be printed in the background
on the driver.
job_id: The ID of the driver to push the error message to. If this
is None, then the message will be pushed to all drivers.
"""
if job_id is None:
job_id = ray.JobID.nil()
assert isinstance(job_id, ray.JobID)
# Do everything in Python and through the Python Redis client instead
# of through the raylet.
error_data = ray.gcs_utils.construct_error_message(job_id, error_type,
message, time.time())
redis_client.execute_command(
"RAY.TABLE_APPEND", ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"), job_id.binary(),
error_data)
def is_cython(obj):
"""Check if an object is a Cython function or method"""
# TODO(suo): We could split these into two functions, one for Cython
# functions and another for Cython methods.
# TODO(suo): There doesn't appear to be a Cython function 'type' we can
# check against via isinstance. Please correct me if I'm wrong.
def check_cython(x):
return type(x).__name__ == "cython_function_or_method"
# Check if function or method, respectively
return check_cython(obj) or \
(hasattr(obj, "__func__") and check_cython(obj.__func__))
def is_function_or_method(obj):
"""Check if an object is a function or method.
Args:
obj: The Python object in question.
Returns:
True if the object is an function or method.
"""
return inspect.isfunction(obj) or inspect.ismethod(obj) or is_cython(obj)
def is_class_method(f):
"""Returns whether the given method is a class_method."""
return hasattr(f, "__self__") and f.__self__ is not None
def is_static_method(cls, f_name):
"""Returns whether the class has a static method with the given name.
Args:
cls: The Python class (i.e. object of type `type`) to
search for the method in.
f_name: The name of the method to look up in this class
and check whether or not it is static.
"""
for cls in inspect.getmro(cls):
if f_name in cls.__dict__:
return isinstance(cls.__dict__[f_name], staticmethod)
return False
def random_string():
"""Generate a random string to use as an ID.
Note that users may seed numpy, which could cause this function to generate
duplicate IDs. Therefore, we need to seed numpy ourselves, but we can't
interfere with the state of the user's random number generator, so we
extract the state of the random number generator and reset it after we are
done.
TODO(rkn): If we want to later guarantee that these are generated in a
deterministic manner, then we will need to make some changes here.
Returns:
A random byte string of length ray_constants.ID_SIZE.
"""
# Get the state of the numpy random number generator.
numpy_state = np.random.get_state()
# Try to use true randomness.
np.random.seed(None)
# Generate the random ID.
random_id = np.random.bytes(ray_constants.ID_SIZE)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
return random_id
def decode(byte_str, allow_none=False):
"""Make this unicode in Python 3, otherwise leave it as bytes.
Args:
byte_str: The byte string to decode.
allow_none: If true, then we will allow byte_str to be None in which
case we will return an empty string. TODO(rkn): Remove this flag.
This is only here to simplify upgrading to flatbuffers 1.10.0.
Returns:
A byte string in Python 2 and a unicode string in Python 3.
"""
if byte_str is None and allow_none:
return ""
if not isinstance(byte_str, bytes):
raise ValueError(
"The argument {} must be a bytes object.".format(byte_str))
if sys.version_info >= (3, 0):
return byte_str.decode("ascii")
else:
return byte_str
def ensure_str(s, encoding="utf-8", errors="strict"):
"""Coerce *s* to `str`.
To keep six with lower version, see Issue 4169, we copy this function
from six == 1.12.0.
TODO(yuhguo): remove this function when six >= 1.12.0.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if six.PY3:
text_type = str
binary_type = bytes
else:
text_type = unicode # noqa: F821
binary_type = str
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if six.PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif six.PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def binary_to_object_id(binary_object_id):
return ray.ObjectID(binary_object_id)
def binary_to_task_id(binary_task_id):
return ray.TaskID(binary_task_id)
def binary_to_hex(identifier):
hex_identifier = binascii.hexlify(identifier)
if sys.version_info >= (3, 0):
hex_identifier = hex_identifier.decode()
return hex_identifier
def hex_to_binary(hex_identifier):
return binascii.unhexlify(hex_identifier)
# TODO(qwang): Remove these hepler functions
# once we separate `WorkerID` from `UniqueID`.
def compute_job_id_from_driver(driver_id):
assert isinstance(driver_id, ray.WorkerID)
return ray.JobID(driver_id.binary()[0:ray.JobID.size()])
def compute_driver_id_from_job(job_id):
assert isinstance(job_id, ray.JobID)
rest_length = ray_constants.ID_SIZE - job_id.size()
driver_id_str = job_id.binary() + (rest_length * b"\xff")
return ray.WorkerID(driver_id_str)
def get_cuda_visible_devices():
"""Get the device IDs in the CUDA_VISIBLE_DEVICES environment variable.
Returns:
if CUDA_VISIBLE_DEVICES is set, this returns a list of integers with
the IDs of the GPUs. If it is not set, this returns None.
"""
gpu_ids_str = os.environ.get("CUDA_VISIBLE_DEVICES", None)
if gpu_ids_str is None:
return None
if gpu_ids_str == "":
return []
return [int(i) for i in gpu_ids_str.split(",")]
last_set_gpu_ids = None
def set_cuda_visible_devices(gpu_ids):
"""Set the CUDA_VISIBLE_DEVICES environment variable.
Args:
gpu_ids: This is a list of integers representing GPU IDs.
"""
global last_set_gpu_ids
if last_set_gpu_ids == gpu_ids:
return # optimization: already set
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in gpu_ids])
last_set_gpu_ids = gpu_ids
def resources_from_resource_arguments(
default_num_cpus, default_num_gpus, default_memory,
default_object_store_memory, default_resources, runtime_num_cpus,
runtime_num_gpus, runtime_memory, runtime_object_store_memory,
runtime_resources):
"""Determine a task's resource requirements.
Args:
default_num_cpus: The default number of CPUs required by this function
or actor method.
default_num_gpus: The default number of GPUs required by this function
or actor method.
default_memory: The default heap memory required by this function
or actor method.
default_object_store_memory: The default object store memory required
by this function or actor method.
default_resources: The default custom resources required by this
function or actor method.
runtime_num_cpus: The number of CPUs requested when the task was
invoked.
runtime_num_gpus: The number of GPUs requested when the task was
invoked.
runtime_memory: The heap memory requested when the task was invoked.
runtime_object_store_memory: The object store memory requested when
the task was invoked.
runtime_resources: The custom resources requested when the task was
invoked.
Returns:
A dictionary of the resource requirements for the task.
"""
if runtime_resources is not None:
resources = runtime_resources.copy()
elif default_resources is not None:
resources = default_resources.copy()
else:
resources = {}
if "CPU" in resources or "GPU" in resources:
raise ValueError("The resources dictionary must not "
"contain the key 'CPU' or 'GPU'")
elif "memory" in resources or "object_store_memory" in resources:
raise ValueError("The resources dictionary must not "
"contain the key 'memory' or 'object_store_memory'")
assert default_num_cpus is not None
resources["CPU"] = (default_num_cpus
if runtime_num_cpus is None else runtime_num_cpus)
if runtime_num_gpus is not None:
resources["GPU"] = runtime_num_gpus
elif default_num_gpus is not None:
resources["GPU"] = default_num_gpus
memory = default_memory or runtime_memory
object_store_memory = (default_object_store_memory
or runtime_object_store_memory)
if memory is not None:
resources["memory"] = ray_constants.to_memory_units(
memory, round_up=True)
if object_store_memory is not None:
resources["object_store_memory"] = ray_constants.to_memory_units(
object_store_memory, round_up=True)
return resources
_default_handler = None
def setup_logger(logging_level, logging_format):
"""Setup default logging for ray."""
logger = logging.getLogger("ray")
if type(logging_level) is str:
logging_level = logging.getLevelName(logging_level.upper())
logger.setLevel(logging_level)
global _default_handler
if _default_handler is None:
_default_handler = logging.StreamHandler()
logger.addHandler(_default_handler)
_default_handler.setFormatter(logging.Formatter(logging_format))
logger.propagate = False
# This function is copied and modified from
# https://github.com/giampaolo/psutil/blob/5bd44f8afcecbfb0db479ce230c790fc2c56569a/psutil/tests/test_linux.py#L132-L138 # noqa: E501
def vmstat(stat):
"""Run vmstat and get a particular statistic.
Args:
stat: The statistic that we are interested in retrieving.
Returns:
The parsed output.
"""
out = subprocess.check_output(["vmstat", "-s"])
stat = stat.encode("ascii")
for line in out.split(b"\n"):
line = line.strip()
if stat in line:
return int(line.split(b" ")[0])
raise ValueError("Can't find {} in 'vmstat' output.".format(stat))
# This function is copied and modified from
# https://github.com/giampaolo/psutil/blob/5e90b0a7f3fccb177445a186cc4fac62cfadb510/psutil/tests/test_osx.py#L29-L38 # noqa: E501
def sysctl(command):
"""Run a sysctl command and parse the output.
Args:
command: A sysctl command with an argument, for example,
["sysctl", "hw.memsize"].
Returns:
The parsed output.
"""
out = subprocess.check_output(command)
result = out.split(b" ")[1]
try:
return int(result)
except ValueError:
return result
def get_system_memory():
"""Return the total amount of system memory in bytes.
Returns:
The total amount of system memory in bytes.
"""
# Try to accurately figure out the memory limit if we are in a docker
# container. Note that this file is not specific to Docker and its value is
# often much larger than the actual amount of memory.
docker_limit = None
memory_limit_filename = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
if os.path.exists(memory_limit_filename):
with open(memory_limit_filename, "r") as f:
docker_limit = int(f.read())
# Use psutil if it is available.
psutil_memory_in_bytes = None
try:
import psutil
psutil_memory_in_bytes = psutil.virtual_memory().total
except ImportError:
pass
if psutil_memory_in_bytes is not None:
memory_in_bytes = psutil_memory_in_bytes
elif sys.platform == "linux" or sys.platform == "linux2":
# Handle Linux.
bytes_in_kilobyte = 1024
memory_in_bytes = vmstat("total memory") * bytes_in_kilobyte
else:
# Handle MacOS.
memory_in_bytes = sysctl(["sysctl", "hw.memsize"])
if docker_limit is not None:
return min(docker_limit, memory_in_bytes)
else:
return memory_in_bytes
def estimate_available_memory():
"""Return the currently available amount of system memory in bytes.
Returns:
The total amount of available memory in bytes. It may be an
overestimate if psutil is not installed.
"""
# Use psutil if it is available.
try:
import psutil
return psutil.virtual_memory().available
except ImportError:
pass
# Handle Linux.
if sys.platform == "linux" or sys.platform == "linux2":
bytes_in_kilobyte = 1024
return (
vmstat("total memory") - vmstat("used memory")) * bytes_in_kilobyte
# Give up
return get_system_memory()
def get_shared_memory_bytes():
"""Get the size of the shared memory file system.
Returns:
The size of the shared memory file system in bytes.
"""
# Make sure this is only called on Linux.
assert sys.platform == "linux" or sys.platform == "linux2"
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
finally:
os.close(shm_fd)
return shm_avail
def check_oversized_pickle(pickled, name, obj_type, worker):
"""Send a warning message if the pickled object is too large.
Args:
pickled: the pickled object.
name: name of the pickled object.
obj_type: type of the pickled object, can be 'function',
'remote function', 'actor', or 'object'.
worker: the worker used to send warning message.
"""
length = len(pickled)
if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE:
return
warning_message = (
"Warning: The {} {} has size {} when pickled. "
"It will be stored in Redis, which could cause memory issues. "
"This may mean that its definition uses a large array or other object."
).format(obj_type, name, length)
push_error_to_driver(
worker,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR,
warning_message,
job_id=worker.current_job_id)
def is_main_thread():
return threading.current_thread().getName() == "MainThread"
def try_make_directory_shared(directory_path):
try:
os.chmod(directory_path, 0o0777)
except OSError as e:
# Silently suppress the PermissionError that is thrown by the chmod.
# This is done because the user attempting to change the permissions
# on a directory may not own it. The chmod is attempted whether the
# directory is new or not to avoid race conditions.
# ray-project/ray/#3591
if e.errno in [errno.EACCES, errno.EPERM]:
pass
else:
raise
def try_to_create_directory(directory_path):
"""Attempt to create a directory that is globally readable/writable.
Args:
directory_path: The path of the directory to create.
"""
directory_path = os.path.expanduser(directory_path)
os.makedirs(directory_path, exist_ok=True)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
try_make_directory_shared(directory_path)
def try_to_symlink(symlink_path, target_path):
"""Attempt to create a symlink.
If the symlink path exists and isn't a symlink, the symlink will not be
created. If a symlink exists in the path, it will be attempted to be
removed and replaced.
Args:
symlink_path: The path at which to create the symlink.
target_path: The path the symlink should point to.
"""
symlink_path = os.path.expanduser(symlink_path)
target_path = os.path.expanduser(target_path)
if os.path.exists(symlink_path):
if os.path.islink(symlink_path):
# Try to remove existing symlink.
try:
os.remove(symlink_path)
except OSError:
return
else:
# There's an existing non-symlink file, don't overwrite it.
return
try:
os.symlink(target_path, symlink_path)
except OSError:
return
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/worker.py
|
Python
|
from contextlib import contextmanager
import colorama
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import signal
from six.moves import queue
import sys
import threading
import time
import traceback
import random
# Ray modules
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray.memory_monitor as memory_monitor
import ray.node
import ray.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray.services as services
import ray.signature
import ray.state
from ray import (
ActorID,
JobID,
ObjectID,
)
from ray import import_thread
from ray import profiling
from ray.exceptions import (
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray.function_manager import FunctionActorManager
from ray.utils import (
_random_string,
check_oversized_pickle,
is_cython,
setup_logger,
)
from ray.local_mode_manager import LocalModeManager
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
try:
import setproctitle
except ImportError:
setproctitle = None
class ActorCheckpointInfo:
"""Information used to maintain actor checkpoints."""
__slots__ = [
# Number of tasks executed since last checkpoint.
"num_tasks_since_last_checkpoint",
# Timestamp of the last checkpoint, in milliseconds.
"last_checkpoint_timestamp",
# IDs of the previous checkpoints.
"checkpoint_ids",
]
def __init__(self, num_tasks_since_last_checkpoint,
last_checkpoint_timestamp, checkpoint_ids):
self.num_tasks_since_last_checkpoint = num_tasks_since_last_checkpoint
self.last_checkpoint_timestamp = last_checkpoint_timestamp
self.checkpoint_ids = checkpoint_ids
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
connected (bool): True if Ray has been started and False otherwise.
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actor_init_error = None
self.make_actor = None
self.actors = {}
# Information used to maintain actor checkpoints.
self.actor_checkpoint_info = {}
self.actor_task_counter = 0
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# Functions to run to process the values returned by ray.get. Each
# postprocessor must take two arguments ("object_ids", and "values").
self._post_get_hooks = []
@property
def connected(self):
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self.node.load_code_from_local
@property
def use_pickle(self):
self.check_connected()
return self.node.use_pickle
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
def mark_actor_init_failed(self, error):
"""Called to mark this actor as failed during initialization."""
self.actor_init_error = error
def reraise_actor_init_error(self):
"""Raises any previous actor initialization error."""
if self.actor_init_error is not None:
raise self.actor_init_error
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
self.serialization_context_map[job_id].initialize()
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RayConnectionError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def put_object(self, value, object_id=None, pin_object=True):
"""Put value in the local object store with object id `objectid`.
This assumes that the value for `objectid` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_id (object_id.ObjectID): The object ID of the value to be
put. If None, one will be generated.
pin_object: If set, the object will be pinned at the raylet.
Returns:
object_id.ObjectID: The object ID the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ObjectID):
raise TypeError(
"Calling 'put' on an ray.ObjectID is not allowed "
"(similarly, returning an ray.ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectID in a list and "
"call 'put' on it (or return it).")
serialized_value = self.get_serialization_context().serialize(value)
return self.core_worker.put_serialized_object(
serialized_value, object_id=object_id, pin_object=pin_object)
def deserialize_objects(self,
data_metadata_pairs,
object_ids,
error_timeout=10):
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs, object_ids,
error_timeout)
def get_objects(self, object_ids, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_ids. This will
block until all the values for object_ids have been written to the
local object store.
Args:
object_ids (List[object_id.ObjectID]): A list of the object IDs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Raises:
Exception if running in LOCAL_MODE and any of the object IDs do not
exist in the emulated object store.
"""
# Make sure that the values are object IDs.
for object_id in object_ids:
if not isinstance(object_id, ObjectID):
raise TypeError(
"Attempting to call `get` on the value {}, "
"which is not an ray.ObjectID.".format(object_id))
if self.mode == LOCAL_MODE:
return self.local_mode_manager.get_objects(object_ids)
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_ids, self.current_task_id, timeout_ms)
return self.deserialize_objects(data_metadata_pairs, object_ids)
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hmset(
key, {
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers)
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hmset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def _get_arguments_for_execution(self, function_name, serialized_args):
"""Retrieve the arguments for the remote function.
This retrieves the values for the arguments to the remote function that
were passed in as object IDs. Arguments that were passed by value are
not changed. This is called by the worker that is executing the remote
function.
Args:
function_name (str): The name of the remote function whose
arguments are being retrieved.
serialized_args (List): The arguments to the function. These are
either strings representing serialized objects passed by value
or they are ray.ObjectIDs.
Returns:
The retrieved arguments in addition to the arguments that were
passed by value.
Raises:
RayError: This exception is raised if a task that
created one of the arguments failed.
"""
arguments = [None] * len(serialized_args)
object_ids = []
object_indices = []
for (i, arg) in enumerate(serialized_args):
if isinstance(arg, ObjectID):
object_ids.append(arg)
object_indices.append(i)
else:
# pass the argument by value
arguments[i] = arg
# Get the objects from the local object store.
if len(object_ids) > 0:
values = self.get_objects(object_ids)
for i, value in enumerate(values):
if isinstance(value, RayError):
raise value
else:
arguments[object_indices[i]] = value
return ray.signature.recover_args(arguments)
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
signal.signal(signal.SIGTERM, sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
if _mode() == LOCAL_MODE:
raise Exception("ray.get_gpu_ids() currently does not work in LOCAL "
"MODE.")
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = [
resource_id for resource_id, _ in all_resource_ids.get("GPU", [])
]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
return assigned_ids
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
if _mode() == LOCAL_MODE:
raise Exception(
"ray.get_resource_ids() currently does not work in LOCAL "
"MODE.")
return global_worker.core_worker.resource_ids()
def get_webui_url():
"""Get the URL to access the web UI.
Note that the URL does not specify which node the web UI is on.
Returns:
The URL of the web UI as a string.
"""
if _global_node is None:
raise Exception("Ray has not been initialized/connected.")
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
class RayConnectionError(Exception):
pass
def print_failed_task(task_status):
"""Print information about failed tasks.
Args:
task_status (Dict): A dictionary containing the name, operationid, and
error message for a failed task.
"""
logger.error("""
Error: Task failed
Function Name: {}
Task ID: {}
Error Message: \n{}
""".format(task_status["function_name"], task_status["operationid"],
task_status["error_message"]))
def init(address=None,
redis_address=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
driver_object_store_memory=None,
redis_max_memory=None,
log_to_driver=True,
node_ip_address=ray_constants.NODE_DEFAULT_IP,
object_id_seed=None,
local_mode=False,
redirect_worker_output=None,
redirect_output=None,
ignore_reinit_error=False,
num_redis_shards=None,
redis_max_clients=None,
redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
plasma_directory=None,
huge_pages=False,
include_webui=None,
webui_host="localhost",
job_id=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
plasma_store_socket_name=None,
raylet_socket_name=None,
temp_dir=None,
load_code_from_local=False,
use_pickle=ray.cloudpickle.FAST_CLOUDPICKLE_USED,
_internal_config=None):
"""Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis, a
raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits.
redis_address (str): Deprecated; same as address.
num_cpus (int): Number of cpus the user wishes all raylets to
be configured with.
num_gpus (int): Number of gpus the user wishes all raylets to
be configured with.
resources: A dictionary mapping the name of a resource to the quantity
of that resource available.
memory: The amount of memory (in bytes) that is available for use by
workers requesting memory resources. By default, this is autoset
based on available system memory.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is autoset based on available
system memory, subject to a 20GB cap.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
autoset based on available system memory, subject to a 10GB cap.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
node_ip_address (str): The IP address of the node that we are on.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same driver in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different
drivers.
local_mode (bool): True if the code should be executed serially
without Ray. This is useful for debugging.
driver_object_store_memory (int): Limit the amount of memory the driver
can use in the object store for creating objects. By default, this
is autoset based on available system memory, subject to a 20GB cap.
ignore_reinit_error: True if we should suppress errors from calling
ray.init() a second time.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which displays the status of the Ray cluster. If this argument
is None, then the UI will be started if the relevant dependencies
are present.
webui_host: The host to bind the web UI server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
job_id: The ID of this job.
configure_logging: True if allow the logging cofiguration here.
Otherwise, the users may want to configure it by their own.
logging_level: Logging level, default will be logging.INFO.
logging_format: Logging format, default contains a timestamp,
filename, line number, and message. See ray_constants.py.
plasma_store_socket_name (str): If provided, it will specify the socket
name used by the plasma store.
raylet_socket_name (str): If provided, it will specify the socket path
used by the raylet process.
temp_dir (str): If provided, it will specify the root temporary
directory for the Ray process.
load_code_from_local: Whether code should be loaded from a local module
or from the GCS.
use_pickle: Whether data objects should be serialized with cloudpickle.
_internal_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if redis_address is not None:
raise DeprecationWarning("The redis_address argument is deprecated. "
"Please use address instead.")
if redis_address is not None or address is not None:
redis_address, _, _ = services.validate_redis_address(
address, redis_address)
if configure_logging:
setup_logger(logging_level, logging_format)
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if "OMP_NUM_THREADS" in os.environ:
logger.warning("OMP_NUM_THREADS={} is set, this may impact "
"object transfer performance.".format(
os.environ["OMP_NUM_THREADS"]))
if setproctitle is None:
logger.warning(
"WARNING: Not updating worker name since `setproctitle` is not "
"installed. Install this with `pip install setproctitle` "
"(or ray[debug]) to enable monitoring of worker processes.")
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise Exception("Perhaps you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
global _global_node
if driver_mode == LOCAL_MODE:
# If starting Ray in LOCAL_MODE, don't start any other processes.
_global_node = ray.node.LocalNode()
elif redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
object_id_seed=object_id_seed,
local_mode=local_mode,
driver_mode=driver_mode,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
redis_password=redis_password,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
webui_host=webui_host,
memory=memory,
object_store_memory=object_store_memory,
redis_max_memory=redis_max_memory,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
use_pickle=use_pickle,
_internal_config=_internal_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise Exception("When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise Exception("When connecting to an existing cluster, "
"resources must not be provided.")
if num_redis_shards is not None:
raise Exception("When connecting to an existing cluster, "
"num_redis_shards must not be provided.")
if redis_max_clients is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_clients must not be provided.")
if memory is not None:
raise Exception("When connecting to an existing cluster, "
"memory must not be provided.")
if object_store_memory is not None:
raise Exception("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if redis_max_memory is not None:
raise Exception("When connecting to an existing cluster, "
"redis_max_memory must not be provided.")
if plasma_directory is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_directory must not be provided.")
if huge_pages:
raise Exception("When connecting to an existing cluster, "
"huge_pages must not be provided.")
if temp_dir is not None:
raise Exception("When connecting to an existing cluster, "
"temp_dir must not be provided.")
if plasma_store_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"plasma_store_socket_name must not be provided.")
if raylet_socket_name is not None:
raise Exception("When connecting to an existing cluster, "
"raylet_socket_name must not be provided.")
if _internal_config is not None:
raise Exception("When connecting to an existing cluster, "
"_internal_config must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
redis_address=redis_address,
redis_password=redis_password,
object_id_seed=object_id_seed,
temp_dir=temp_dir,
load_code_from_local=load_code_from_local,
use_pickle=use_pickle)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=driver_object_store_memory,
job_id=job_id,
internal_config=json.loads(_internal_config)
if _internal_config else {})
for hook in _post_init_hooks:
hook()
return _global_node.address_info
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
def shutdown(exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
global_worker._post_get_hooks = []
atexit.register(shutdown, True)
def sigterm_handler(signum, frame):
sys.exit(signal.SIGTERM)
try:
signal.signal(signal.SIGTERM, sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to redis.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
try:
global_worker.redis_client.hmset(
b"Drivers:" + global_worker.worker_id,
{"exception": error_message})
except (ConnectionRefusedError, redis.exceptions.ConnectionError):
logger.warning("Could not push exception to redis.")
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
# The last time we raised a TaskError in this process. We use this value to
# suppress redundant error messages pushed from the workers.
last_task_error_raise_time = 0
# The max amount of seconds to wait before printing out an uncaught error.
UNCAUGHT_ERROR_GRACE_PERIOD = 5
def print_logs(redis_client, threads_stopped):
"""Prints log messages from workers on all of the nodes.
Args:
redis_client: A client to the primary Redis shard.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
pubsub_client = redis_client.pubsub(ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have been
# received with no break in between. If this number grows continually,
# then the worker is probably not able to process the log messages as
# rapidly as they are coming in.
num_consecutive_messages_received = 0
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
data = json.loads(ray.utils.decode(msg["data"]))
def color_for(data):
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
else:
return colorama.Fore.CYAN
if data["ip"] == localhost:
for line in data["lines"]:
print("{}{}(pid={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
colorama.Style.RESET_ALL, line))
else:
for line in data["lines"]:
print("{}{}(pid={}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), data["pid"],
data["ip"], colorama.Style.RESET_ALL, line))
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding logs "
"to the driver, use 'ray.init(log_to_driver=False)'.")
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("print_logs: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
def print_error_messages_raylet(task_error_queue, threads_stopped):
"""Prints message received in the given output queue.
This checks periodically if any un-raised errors occured in the background.
Args:
task_error_queue (queue.Queue): A queue used to receive errors from the
thread that listens to Redis.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
try:
error, t = task_error_queue.get(block=False)
except queue.Empty:
threads_stopped.wait(timeout=0.01)
continue
# Delay errors a little bit of time to attempt to suppress redundant
# messages originating from the worker.
while t + UNCAUGHT_ERROR_GRACE_PERIOD > time.time():
threads_stopped.wait(timeout=1)
if threads_stopped.is_set():
break
if t < last_task_error_raise_time + UNCAUGHT_ERROR_GRACE_PERIOD:
logger.debug("Suppressing error from worker: {}".format(error))
else:
logger.error(
"Possible unhandled error from worker: {}".format(error))
def listen_error_messages_raylet(worker, task_error_queue, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
task_error_queue (queue.Queue): A queue used to communicate with the
thread that prints the errors found by this thread.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = str(
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB")).encode("ascii")
worker.error_message_pubsub_client.subscribe(error_pubsub_channel)
# worker.error_message_pubsub_client.psubscribe("*")
try:
# Get the errors that occurred before the call to subscribe.
error_messages = ray.errors()
for error_message in error_messages:
logger.error(error_message)
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
gcs_entry = ray.gcs_utils.GcsEntry.FromString(msg["data"])
assert len(gcs_entry.entries) == 1
error_data = ray.gcs_utils.ErrorTableData.FromString(
gcs_entry.entries[0])
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary()
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# Delay it a bit to see if we can suppress it
task_error_queue.put((error_message, time.time()))
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error("listen_error_messages_raylet: {}".format(e))
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
def is_initialized():
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
internal_config=None):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Limit the amount of memory the driver can
use in the object store when creating objects.
job_id: The ID of job. If it's None, then we will generate one.
internal_config: Dictionary of (str,str) containing internal config
options to override the defaults.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
ray._raylet.set_internal_config(internal_config)
if mode is not LOCAL_MODE:
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
# Initialize some fields.
if mode is WORKER_MODE:
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
# TODO(qwang): Rename this to `worker_id_str` or type to `WorkerID`
worker.worker_id = _random_string()
if setproctitle:
setproctitle.setproctitle("ray::IDLE")
elif mode is LOCAL_MODE:
if job_id is None:
job_id = JobID.from_int(random.randint(1, 65535))
worker.worker_id = ray.utils.compute_driver_id_from_job(
job_id).binary()
else:
# This is the code path of driver mode.
if job_id is None:
# TODO(qwang): use `GcsClient::GenerateJobId()` here.
job_id = JobID.from_int(
int(worker.redis_client.incr("JobCounter")))
# When tasks are executed on remote workers in the context of multiple
# drivers, the current job ID is used to keep track of which job is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.worker_id = ray.utils.compute_driver_id_from_job(
job_id).binary()
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# If running Ray in LOCAL_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == LOCAL_MODE:
worker.local_mode_manager = LocalModeManager()
return
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
# Create an object for interfacing with the global state.
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Register the worker with Redis.
if mode == SCRIPT_MODE:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": node.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": node.plasma_store_socket_name,
"raylet_socket": node.raylet_socket_name,
"name": (main.__file__
if hasattr(main, "__file__") else "INTERACTIVE MODE")
}
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": node.node_ip_address,
"plasma_store_socket": node.plasma_store_socket_name,
}
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (redirect_worker_output_val is not None
and int(redirect_worker_output_val) == 1):
log_stdout_file, log_stderr_file = (
node.new_worker_redirected_log_file(worker.worker_id))
# Redirect stdout/stderr at the file descriptor level. If we simply
# set sys.stdout and sys.stderr, then logging from C++ can fail to
# be redirected.
os.dup2(log_stdout_file.fileno(), sys.stdout.fileno())
os.dup2(log_stderr_file.fileno(), sys.stderr.fileno())
# We also manually set sys.stdout and sys.stderr because that seems
# to have an affect on the output buffering. Without doing this,
# stdout and stderr are heavily buffered resulting in seemingly
# lost logging statements.
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
# This should always be the first message to appear in the worker's
# stdout and stderr log files. The string "Ray worker pid:" is
# parsed in the log monitor process.
print("Ray worker pid: {}".format(os.getpid()))
print("Ray worker pid: {}".format(os.getpid()), file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
else:
raise ValueError("Invalid worker mode. Expected DRIVER or WORKER.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
worker.core_worker = ray._raylet.CoreWorker(
(mode == SCRIPT_MODE),
node.plasma_store_socket_name,
node.raylet_socket_name,
job_id,
gcs_options,
node.get_logs_dir_path(),
node.node_ip_address,
node.node_manager_port,
)
worker.raylet_client = ray._raylet.RayletClient(worker.core_worker)
if driver_object_store_memory is not None:
worker.core_worker.set_object_store_client_options(
"ray_driver_{}".format(os.getpid()), driver_object_store_memory)
# Put something in the plasma store so that subsequent plasma store
# accesses will be faster. Currently the first access is always slow, and
# we don't want the user to experience this.
temporary_object_id = ray.ObjectID.from_random()
worker.put_object(1, object_id=temporary_object_id)
ray.internal.free([temporary_object_id])
# Start the import thread
worker.import_thread = import_thread.ImportThread(worker, mode,
worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
q = queue.Queue()
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, q, worker.threads_stopped))
worker.printer_thread = threading.Thread(
target=print_error_messages_raylet,
name="ray_print_error_messages",
args=(q, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
worker.printer_thread.daemon = True
worker.printer_thread.start()
if log_to_driver:
worker.logger_thread = threading.Thread(
target=print_logs,
name="ray_print_logs",
args=(worker.redis_client, worker.threads_stopped))
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "printer_thread"):
worker.printer_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
@contextmanager
def _changeproctitle(title, next_title):
if setproctitle:
setproctitle.setproctitle(title)
yield
if setproctitle:
setproctitle.setproctitle(next_title)
def register_custom_serializer(cls,
serializer=None,
deserializer=None,
use_pickle=False,
use_dict=False,
local=None,
job_id=None,
class_id=None):
"""Registers custom functions for efficient object serialization.
The serializer and deserializer are used when transferring objects of
`cls` across processes and nodes. This can be significantly faster than
the Ray default fallbacks. Wraps `register_custom_serializer` underneath.
`use_pickle` tells Ray to automatically use cloudpickle for serialization,
and `use_dict` automatically uses `cls.__dict__`.
When calling this function, you can only provide one of the following:
1. serializer and deserializer
2. `use_pickle`
3. `use_dict`
Args:
cls (type): The class that ray should use this custom serializer for.
serializer: The custom serializer that takes in a cls instance and
outputs a serialized representation. use_pickle and use_dict
must be False if provided.
deserializer: The custom deserializer that takes in a serialized
representation of the cls and outputs a cls instance. use_pickle
and use_dict must be False if provided.
use_pickle (bool): If true, objects of this class will be
serialized using pickle. Must be False if
use_dict is true.
use_dict (bool): If true, objects of this class be serialized turning
their __dict__ fields into a dictionary. Must be False if
use_pickle is true.
local: Deprecated.
job_id: Deprecated.
class_id (str): Unique ID of the class. Autogenerated if None.
"""
if job_id:
raise DeprecationWarning(
"`job_id` is no longer a valid parameter and will be removed in "
"future versions of Ray. If this breaks your application, "
"see `SerializationContext.register_custom_serializer`.")
if local:
raise DeprecationWarning(
"`local` is no longer a valid parameter and will be removed in "
"future versions of Ray. If this breaks your application, "
"see `SerializationContext.register_custom_serializer`.")
context = global_worker.get_serialization_context()
context.register_custom_serializer(
cls,
use_pickle=use_pickle,
use_dict=use_dict,
serializer=serializer,
deserializer=deserializer,
class_id=class_id)
def show_in_webui(message):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
"""
worker = global_worker
worker.check_connected()
worker.core_worker.set_webui_display(message.encode())
def get(object_ids, timeout=None):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ID is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_ids is a list, then the objects
corresponding to each object in the list will be returned.
Args:
object_ids: Object ID of the object to get or a list of object IDs to
get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
RayTimeoutError: A RayTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
raise RayError("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please "
"use `await` on object id with asyncio.gather.")
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_ids, ray.ObjectID)
if is_individual_id:
object_ids = [object_ids]
if not isinstance(object_ids, list):
raise ValueError("'object_ids' must either be an object ID "
"or a list of object IDs.")
global last_task_error_raise_time
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values = worker.get_objects(object_ids, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
last_task_error_raise_time = time.time()
if isinstance(value, ray.exceptions.UnreconstructableError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
# Run post processors.
for post_processor in worker._post_get_hooks:
values = post_processor(object_ids, values)
if is_individual_id:
values = values[0]
return values
def put(value, weakref=False):
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Note that this pinning only applies to the particular object ID returned
by put, not object IDs in general.
Args:
value: The Python object to be stored.
weakref: If set, allows the object to be evicted while a reference
to the returned ID exists. You might want to set this if putting
a lot of objects that you might not need in the future.
Returns:
The object ID assigned to this value.
"""
worker = global_worker
worker.check_connected()
with profiling.profile("ray.put"):
if worker.mode == LOCAL_MODE:
object_id = worker.local_mode_manager.put_object(value)
else:
try:
object_id = worker.put_object(value, pin_object=not weakref)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects. If you are putting "
"and holding references to a lot of object ids, consider "
"ray.put(value, weakref=True) to allow object data to "
"be evicted early.")
raise
return object_id
def wait(object_ids, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
.. warning::
The **timeout** argument used to be in **milliseconds** (up through
``ray==0.6.1``) and now it is in **seconds**.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object IDs.
This method returns two lists. The first list consists of object IDs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object IDs (which may or may not be
ready).
Ordering of the input list of object IDs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
Args:
object_ids (List[ObjectID]): List of object IDs for objects that may or
may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
raise RayError("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object id with asyncio.wait. ")
if isinstance(object_ids, ObjectID):
raise TypeError(
"wait() expected a list of ray.ObjectID, got a single ray.ObjectID"
)
if not isinstance(object_ids, list):
raise TypeError(
"wait() expected a list of ray.ObjectID, got {}".format(
type(object_ids)))
if isinstance(timeout, int) and timeout != 0:
logger.warning("The 'timeout' argument now requires seconds instead "
"of milliseconds. This message can be suppressed by "
"passing in a float.")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
"Received {}".format(timeout))
for object_id in object_ids:
if not isinstance(object_id, ObjectID):
raise TypeError("wait() expected a list of ray.ObjectID, "
"got list containing {}".format(type(object_id)))
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# When Ray is run in LOCAL_MODE, all functions are run immediately,
# so all objects in object_id are ready.
if worker.mode == LOCAL_MODE:
return object_ids[:num_returns], object_ids[num_returns:]
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_ids) == 0:
return [], []
if len(object_ids) != len(set(object_ids)):
raise Exception("Wait requires a list of unique object IDs.")
if num_returns <= 0:
raise Exception(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_ids):
raise Exception("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_ids,
num_returns,
timeout_milliseconds,
worker.current_task_id,
)
return ready_ids, remaining_ids
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to serialize
remote functions, we don't attempt to serialize the worker object, which
cannot be serialized.
"""
return worker.mode
def get_global_worker():
return global_worker
def make_decorator(num_return_vals=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
max_calls=None,
max_retries=None,
max_reconstructions=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_reconstructions is not None:
raise Exception("The keyword 'max_reconstructions' is not "
"allowed for remote functions.")
return ray.remote_function.RemoteFunction(
function_or_class, num_cpus, num_gpus, memory,
object_store_memory, resources, num_return_vals, max_calls,
max_retries)
if inspect.isclass(function_or_class):
if num_return_vals is not None:
raise Exception("The keyword 'num_return_vals' is not allowed "
"for actors.")
if max_calls is not None:
raise Exception("The keyword 'max_calls' is not allowed for "
"actors.")
return worker.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
max_reconstructions)
raise Exception("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
def remote(*args, **kwargs):
"""Define a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments:
* **num_return_vals:** This is only for *remote functions*. It specifies
the number of object IDs returned by the remote function invocation.
* **num_cpus:** The quantity of CPU cores to reserve for this task or for
the lifetime of the actor.
* **num_gpus:** The quantity of GPUs to reserve for this task or for the
lifetime of the actor.
* **resources:** The quantity of various custom resources to reserve for
this task or for the lifetime of the actor. This is a dictionary mapping
strings (resource names) to numbers.
* **max_calls:** Only for *remote functions*. This specifies the maximum
number of times that a given worker can execute the given remote function
before it must exit (this can be used to address memory leaks in
third-party libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow). By
default this is infinite.
* **max_reconstructions**: Only for *actors*. This specifies the maximum
number of times that the actor should be reconstructed when it dies
unexpectedly. The minimum valid value is 0 (default), which indicates
that the actor doesn't need to be reconstructed. And the maximum valid
value is ray.ray_constants.INFINITE_RECONSTRUCTION.
* **max_retries**: Only for *remote functions*. This specifies the maximum
number of times that the remote function should be rerun when the worker
process executing it crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and the maximum valid value is
ray.ray_constants.INFINITE_RECONSTRUCTION.
This can be done as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_return_vals=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``actor_handle.__ray_kill__()``.
"""
worker = get_global_worker()
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'num_cpus', 'num_gpus', "
"'memory', 'object_store_memory', 'resources', "
"'max_calls', or 'max_reconstructions', like "
"'@ray.remote(num_return_vals=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals",
"num_cpus",
"num_gpus",
"memory",
"object_store_memory",
"resources",
"max_calls",
"max_reconstructions",
"max_retries",
], error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise Exception("The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(
type(resources)))
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs.get("num_return_vals")
max_calls = kwargs.get("max_calls")
max_reconstructions = kwargs.get("max_reconstructions")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
return make_decorator(
num_return_vals=num_return_vals,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
max_calls=max_calls,
max_reconstructions=max_reconstructions,
max_retries=max_retries,
worker=worker)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/workers/default_worker.py
|
Python
|
import argparse
import json
import ray
import ray.actor
import ray.node
import ray.ray_constants as ray_constants
import ray.utils
from ray.parameter import RayParams
parser = argparse.ArgumentParser(
description=("Parse addresses for the worker "
"to connect to."))
parser.add_argument(
"--node-ip-address",
required=True,
type=str,
help="the ip address of the worker's node")
parser.add_argument(
"--node-manager-port",
required=True,
type=int,
help="the port of the worker's node")
parser.add_argument(
"--redis-address",
required=True,
type=str,
help="the address to use for Redis")
parser.add_argument(
"--redis-password",
required=False,
type=str,
default=None,
help="the password to use for Redis")
parser.add_argument(
"--object-store-name",
required=True,
type=str,
help="the object store's name")
parser.add_argument(
"--raylet-name", required=False, type=str, help="the raylet's name")
parser.add_argument(
"--logging-level",
required=False,
type=str,
default=ray_constants.LOGGER_LEVEL,
choices=ray_constants.LOGGER_LEVEL_CHOICES,
help=ray_constants.LOGGER_LEVEL_HELP)
parser.add_argument(
"--logging-format",
required=False,
type=str,
default=ray_constants.LOGGER_FORMAT,
help=ray_constants.LOGGER_FORMAT_HELP)
parser.add_argument(
"--config-list",
required=False,
type=str,
default=None,
help="Override internal config options for the worker process.")
parser.add_argument(
"--temp-dir",
required=False,
type=str,
default=None,
help="Specify the path of the temporary directory use by Ray process.")
parser.add_argument(
"--load-code-from-local",
default=False,
action="store_true",
help="True if code is loaded from local files, as opposed to the GCS.")
parser.add_argument(
"--use-pickle",
default=False,
action="store_true",
help="True if cloudpickle should be used for serialization.")
if __name__ == "__main__":
args = parser.parse_args()
ray.utils.setup_logger(args.logging_level, args.logging_format)
internal_config = {}
if args.config_list is not None:
config_list = args.config_list.split(",")
if len(config_list) > 1:
i = 0
while i < len(config_list):
internal_config[config_list[i]] = config_list[i + 1]
i += 2
ray_params = RayParams(
node_ip_address=args.node_ip_address,
node_manager_port=args.node_manager_port,
redis_address=args.redis_address,
redis_password=args.redis_password,
plasma_store_socket_name=args.object_store_name,
raylet_socket_name=args.raylet_name,
temp_dir=args.temp_dir,
load_code_from_local=args.load_code_from_local,
use_pickle=args.use_pickle,
_internal_config=json.dumps(internal_config),
)
node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
ray.worker._global_node = node
ray.worker.connect(
node, mode=ray.WORKER_MODE, internal_config=internal_config)
ray.worker.global_worker.main_loop()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/setup.py
|
Python
|
from itertools import chain
import os
import re
import shutil
import subprocess
import sys
from setuptools import setup, find_packages, Distribution
import setuptools.command.build_ext as _build_ext
# Ideally, we could include these files by putting them in a
# MANIFEST.in or using the package_data argument to setup, but the
# MANIFEST.in gets applied at the very beginning when setup.py runs
# before these files have been created, so we have to move the files
# manually.
# NOTE: The lists below must be kept in sync with ray/BUILD.bazel.
ray_files = [
"ray/core/src/ray/thirdparty/redis/src/redis-server",
"ray/core/src/ray/gcs/redis_module/libray_redis_module.so",
"ray/core/src/plasma/plasma_store_server",
"ray/_raylet.so",
"ray/core/src/ray/raylet/raylet_monitor",
"ray/core/src/ray/raylet/raylet",
"ray/dashboard/dashboard.py",
"ray/streaming/_streaming.so",
]
build_java = os.getenv("RAY_INSTALL_JAVA") == "1"
if build_java:
ray_files.append("ray/jars/ray_dist.jar")
# These are the directories where automatically generated Python protobuf
# bindings are created.
generated_python_directories = [
"ray/core/generated",
"ray/streaming/generated",
]
optional_ray_files = []
ray_autoscaler_files = [
"ray/autoscaler/aws/example-full.yaml",
"ray/autoscaler/gcp/example-full.yaml",
"ray/autoscaler/local/example-full.yaml",
"ray/autoscaler/kubernetes/example-full.yaml",
"ray/autoscaler/kubernetes/kubectl-rsync.sh",
]
ray_project_files = [
"ray/projects/schema.json", "ray/projects/templates/cluster_template.yaml",
"ray/projects/templates/project_template.yaml",
"ray/projects/templates/requirements.txt"
]
ray_dashboard_files = [
os.path.join(dirpath, filename)
for dirpath, dirnames, filenames in os.walk("ray/dashboard/client/build")
for filename in filenames
]
optional_ray_files += ray_autoscaler_files
optional_ray_files += ray_project_files
optional_ray_files += ray_dashboard_files
if "RAY_USE_NEW_GCS" in os.environ and os.environ["RAY_USE_NEW_GCS"] == "on":
ray_files += [
"ray/core/src/credis/build/src/libmember.so",
"ray/core/src/credis/build/src/libmaster.so",
"ray/core/src/credis/redis/src/redis-server"
]
extras = {
"debug": ["psutil", "setproctitle", "py-spy >= 0.2.0"],
"dashboard": ["aiohttp", "google", "grpcio", "psutil", "setproctitle"],
"serve": ["uvicorn", "pygments", "werkzeug", "flask", "pandas", "blist"],
"tune": ["tabulate", "tensorboardX"],
}
extras["rllib"] = extras["tune"] + [
"pyyaml",
"gym[atari]",
"opencv-python-headless",
"lz4",
"scipy",
]
extras["all"] = list(set(chain.from_iterable(extras.values())))
class build_ext(_build_ext.build_ext):
def run(self):
# Note: We are passing in sys.executable so that we use the same
# version of Python to build pyarrow inside the build.sh script. Note
# that certain flags will not be passed along such as --user or sudo.
# TODO(rkn): Fix this.
command = ["../build.sh", "-p", sys.executable]
if build_java:
# Also build binaries for Java if the above env variable exists.
command += ["-l", "python,java"]
subprocess.check_call(command)
# We also need to install pyarrow along with Ray, so make sure that the
# relevant non-Python pyarrow files get copied.
pyarrow_files = []
for (root, dirs, filenames) in os.walk("./ray/pyarrow_files/pyarrow"):
for name in filenames:
pyarrow_files.append(os.path.join(root, name))
# We also need to install pickle5 along with Ray, so make sure that the
# relevant non-Python pickle5 files get copied.
pickle5_files = []
for (root, dirs, filenames) in os.walk("./ray/pickle5_files/pickle5"):
for name in filenames:
pickle5_files.append(os.path.join(root, name))
files_to_include = ray_files + pyarrow_files + pickle5_files
# Copy over the autogenerated protobuf Python bindings.
for directory in generated_python_directories:
for filename in os.listdir(directory):
if filename[-3:] == ".py":
files_to_include.append(os.path.join(directory, filename))
for filename in files_to_include:
self.move_file(filename)
# Try to copy over the optional files.
for filename in optional_ray_files:
try:
self.move_file(filename)
except Exception:
print("Failed to copy optional file {}. This is ok."
.format(filename))
def move_file(self, filename):
# TODO(rkn): This feels very brittle. It may not handle all cases. See
# https://github.com/apache/arrow/blob/master/python/setup.py for an
# example.
source = filename
destination = os.path.join(self.build_lib, filename)
# Create the target directory if it doesn't already exist.
parent_directory = os.path.dirname(destination)
if not os.path.exists(parent_directory):
os.makedirs(parent_directory)
if not os.path.exists(destination):
print("Copying {} to {}.".format(source, destination))
shutil.copy(source, destination, follow_symlinks=True)
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
def find_version(*filepath):
# Extract version information from filepath
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *filepath)) as fp:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
fp.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
requires = [
"numpy >= 1.16",
"filelock",
"jsonschema",
"funcsigs",
"click",
"colorama",
"packaging",
"pytest",
"pyyaml",
"redis>=3.3.2",
# NOTE: Don't upgrade the version of six! Doing so causes installation
# problems. See https://github.com/ray-project/ray/issues/4169.
"six >= 1.0.0",
"faulthandler;python_version<'3.3'",
"protobuf >= 3.8.0",
"cloudpickle",
]
setup(
name="ray",
version=find_version("ray", "__init__.py"),
author="Ray Team",
author_email="ray-dev@googlegroups.com",
description=("A system for parallel and distributed Python that unifies "
"the ML ecosystem."),
long_description=open("../README.rst").read(),
url="https://github.com/ray-project/ray",
keywords=("ray distributed parallel machine-learning "
"reinforcement-learning deep-learning python"),
packages=find_packages(),
cmdclass={"build_ext": build_ext},
# The BinaryDistribution argument triggers build_ext.
distclass=BinaryDistribution,
install_requires=requires,
setup_requires=["cython >= 0.29"],
extras_require=extras,
entry_points={
"console_scripts": [
"ray=ray.scripts.scripts:main",
"rllib=ray.rllib.scripts:cli [rllib]", "tune=ray.tune.scripts:cli"
]
},
include_package_data=True,
zip_safe=False,
license="Apache 2.0")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/__init__.py
|
Python
|
import logging
# Note: do not introduce unnecessary library dependencies here, e.g. gym.
# This file is imported from the tune module in order to register RLlib agents.
from ray.rllib.env.base_env import BaseEnv
from ray.rllib.env.external_env import ExternalEnv
from ray.rllib.env.multi_agent_env import MultiAgentEnv
from ray.rllib.env.vector_env import VectorEnv
from ray.rllib.evaluation.policy_graph import PolicyGraph
from ray.rllib.evaluation.rollout_worker import RolloutWorker
from ray.rllib.evaluation.tf_policy_graph import TFPolicyGraph
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.tune.registry import register_trainable
def _setup_logger():
logger = logging.getLogger("ray.rllib")
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s"
))
logger.addHandler(handler)
logger.propagate = False
def _register_all():
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.agents.registry import ALGORITHMS, get_agent_class
from ray.rllib.contrib.registry import CONTRIBUTED_ALGORITHMS
for key in list(ALGORITHMS.keys()) + list(CONTRIBUTED_ALGORITHMS.keys(
)) + ["__fake", "__sigmoid_fake_data", "__parameter_tuning"]:
register_trainable(key, get_agent_class(key))
def _see_contrib(name):
"""Returns dummy agent class warning algo is in contrib/."""
class _SeeContrib(Trainer):
_name = "SeeContrib"
_default_config = with_common_config({})
def _setup(self, config):
raise NameError(
"Please run `contrib/{}` instead.".format(name))
return _SeeContrib
# also register the aliases minus contrib/ to give a good error message
for key in list(CONTRIBUTED_ALGORITHMS.keys()):
assert key.startswith("contrib/")
alias = key.split("/", 1)[1]
register_trainable(alias, _see_contrib(alias))
_setup_logger()
_register_all()
__all__ = [
"Policy",
"PolicyGraph",
"TFPolicy",
"TFPolicyGraph",
"RolloutWorker",
"PolicyEvaluator",
"SampleBatch",
"BaseEnv",
"MultiAgentEnv",
"VectorEnv",
"ExternalEnv",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/__init__.py
|
Python
|
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.agents.agent import Agent
__all__ = ["Agent", "Trainer", "with_common_config"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/a3c/__init__.py
|
Python
|
from ray.rllib.agents.a3c.a3c import A3CTrainer, DEFAULT_CONFIG
from ray.rllib.agents.a3c.a2c import A2CTrainer
from ray.rllib.utils import renamed_agent
A2CAgent = renamed_agent(A2CTrainer)
A3CAgent = renamed_agent(A3CTrainer)
__all__ = [
"A2CAgent", "A3CAgent", "A2CTrainer", "A3CTrainer", "DEFAULT_CONFIG"
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/a3c/a2c.py
|
Python
|
from ray.rllib.agents.a3c.a3c import DEFAULT_CONFIG as A3C_CONFIG, \
validate_config, get_policy_class
from ray.rllib.optimizers import SyncSamplesOptimizer, MicrobatchOptimizer
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.utils import merge_dicts
A2C_DEFAULT_CONFIG = merge_dicts(
A3C_CONFIG,
{
"sample_batch_size": 20,
"min_iter_time_s": 10,
"sample_async": False,
# A2C supports microbatching, in which we accumulate gradients over
# batch of this size until the train batch size is reached. This allows
# training with batch sizes much larger than can fit in GPU memory.
# To enable, set this to a value less than the train batch size.
"microbatch_size": None,
},
)
def choose_policy_optimizer(workers, config):
if config["microbatch_size"]:
return MicrobatchOptimizer(
workers,
train_batch_size=config["train_batch_size"],
microbatch_size=config["microbatch_size"])
else:
return SyncSamplesOptimizer(
workers, train_batch_size=config["train_batch_size"])
A2CTrainer = build_trainer(
name="A2C",
default_config=A2C_DEFAULT_CONFIG,
default_policy=A3CTFPolicy,
get_policy_class=get_policy_class,
make_policy_optimizer=choose_policy_optimizer,
validate_config=validate_config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/a3c/a3c.py
|
Python
|
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.optimizers import AsyncGradientsOptimizer
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# Size of rollout batch
"sample_batch_size": 10,
# GAE(gamma) parameter
"lambda": 1.0,
# Max global norm for each gradient calculated by worker
"grad_clip": 40.0,
# Learning rate
"lr": 0.0001,
# Learning rate schedule
"lr_schedule": None,
# Value Function Loss coefficient
"vf_loss_coeff": 0.5,
# Entropy coefficient
"entropy_coeff": 0.01,
# Min time per iteration
"min_iter_time_s": 5,
# Workers sample async. Note that this increases the effective
# sample_batch_size by up to 5x due to async buffering of batches.
"sample_async": True,
})
# __sphinx_doc_end__
# yapf: enable
def get_policy_class(config):
if config["use_pytorch"]:
from ray.rllib.agents.a3c.a3c_torch_policy import \
A3CTorchPolicy
return A3CTorchPolicy
else:
return A3CTFPolicy
def validate_config(config):
if config["entropy_coeff"] < 0:
raise DeprecationWarning("entropy_coeff must be >= 0")
if config["sample_async"] and config["use_pytorch"]:
raise ValueError(
"The sample_async option is not supported with use_pytorch: "
"Multithreading can be lead to crashes if used with pytorch.")
def make_async_optimizer(workers, config):
return AsyncGradientsOptimizer(workers, **config["optimizer"])
A3CTrainer = build_trainer(
name="A3C",
default_config=DEFAULT_CONFIG,
default_policy=A3CTFPolicy,
get_policy_class=get_policy_class,
validate_config=validate_config,
make_policy_optimizer=make_async_optimizer)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/a3c/a3c_tf_policy.py
|
Python
|
"""Note: Keep in sync with changes to VTraceTFPolicy."""
import ray
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule
from ray.rllib.utils.tf_ops import make_tf_callable
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class A3CLoss:
def __init__(self,
action_dist,
actions,
advantages,
v_target,
vf,
vf_loss_coeff=0.5,
entropy_coeff=0.01):
log_prob = action_dist.logp(actions)
# The "policy gradients" loss
self.pi_loss = -tf.reduce_sum(log_prob * advantages)
delta = vf - v_target
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
self.entropy = tf.reduce_sum(action_dist.entropy())
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
def actor_critic_loss(policy, model, dist_class, train_batch):
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
policy.loss = A3CLoss(action_dist, train_batch[SampleBatch.ACTIONS],
train_batch[Postprocessing.ADVANTAGES],
train_batch[Postprocessing.VALUE_TARGETS],
model.value_function(),
policy.config["vf_loss_coeff"],
policy.config["entropy_coeff"])
return policy.loss.total_loss
def postprocess_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
return compute_advantages(sample_batch, last_r, policy.config["gamma"],
policy.config["lambda"])
def add_value_function_fetch(policy):
return {SampleBatch.VF_PREDS: policy.model.value_function()}
class ValueNetworkMixin:
def __init__(self):
@make_tf_callable(self.get_session())
def value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model({
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor([prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([prev_reward]),
"is_training": tf.convert_to_tensor(False),
}, [tf.convert_to_tensor([s]) for s in state],
tf.convert_to_tensor([1]))
return self.model.value_function()[0]
self._value = value
def stats(policy, train_batch):
return {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"policy_entropy": policy.loss.entropy,
"var_gnorm": tf.global_norm(list(policy.model.trainable_variables())),
"vf_loss": policy.loss.vf_loss,
}
def grad_stats(policy, train_batch, grads):
return {
"grad_gnorm": tf.global_norm(grads),
"vf_explained_var": explained_variance(
train_batch[Postprocessing.VALUE_TARGETS],
policy.model.value_function()),
}
def clip_gradients(policy, optimizer, loss):
grads_and_vars = optimizer.compute_gradients(
loss, policy.model.trainable_variables())
grads = [g for (g, v) in grads_and_vars]
grads, _ = tf.clip_by_global_norm(grads, policy.config["grad_clip"])
clipped_grads = list(zip(grads, policy.model.trainable_variables()))
return clipped_grads
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
A3CTFPolicy = build_tf_policy(
name="A3CTFPolicy",
get_default_config=lambda: ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG,
loss_fn=actor_critic_loss,
stats_fn=stats,
grad_stats_fn=grad_stats,
gradients_fn=clip_gradients,
postprocess_fn=postprocess_advantages,
extra_action_fetches_fn=add_value_function_fetch,
before_loss_init=setup_mixins,
mixins=[ValueNetworkMixin, LearningRateSchedule])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/a3c/a3c_torch_policy.py
|
Python
|
import ray
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.framework import try_import_torch
torch, nn = try_import_torch()
F = nn.functional
def actor_critic_loss(policy, model, dist_class, train_batch):
logits, _ = model.from_batch(train_batch)
values = model.value_function()
dist = dist_class(logits, model)
log_probs = dist.logp(train_batch[SampleBatch.ACTIONS])
policy.entropy = dist.entropy().mean()
policy.pi_err = -train_batch[Postprocessing.ADVANTAGES].dot(
log_probs.reshape(-1))
policy.value_err = F.mse_loss(
values.reshape(-1), train_batch[Postprocessing.VALUE_TARGETS])
overall_err = sum([
policy.pi_err,
policy.config["vf_loss_coeff"] * policy.value_err,
-policy.config["entropy_coeff"] * policy.entropy,
])
return overall_err
def loss_and_entropy_stats(policy, train_batch):
return {
"policy_entropy": policy.entropy.item(),
"policy_loss": policy.pi_err.item(),
"vf_loss": policy.value_err.item(),
}
def add_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1])
return compute_advantages(sample_batch, last_r, policy.config["gamma"],
policy.config["lambda"])
def model_value_predictions(policy, input_dict, state_batches, model,
action_dist):
return {SampleBatch.VF_PREDS: model.value_function().cpu().numpy()}
def apply_grad_clipping(policy):
info = {}
if policy.config["grad_clip"]:
total_norm = nn.utils.clip_grad_norm_(policy.model.parameters(),
policy.config["grad_clip"])
info["grad_gnorm"] = total_norm
return info
def torch_optimizer(policy, config):
return torch.optim.Adam(policy.model.parameters(), lr=config["lr"])
class ValueNetworkMixin:
def _value(self, obs):
obs = torch.from_numpy(obs).float().unsqueeze(0).to(self.device)
_ = self.model({"obs": obs}, [], [1])
return self.model.value_function().detach().cpu().numpy().squeeze()
A3CTorchPolicy = build_torch_policy(
name="A3CTorchPolicy",
get_default_config=lambda: ray.rllib.agents.a3c.a3c.DEFAULT_CONFIG,
loss_fn=actor_critic_loss,
stats_fn=loss_and_entropy_stats,
postprocess_fn=add_advantages,
extra_action_out_fn=model_value_predictions,
extra_grad_process_fn=apply_grad_clipping,
optimizer_fn=torch_optimizer,
mixins=[ValueNetworkMixin])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/agent.py
|
Python
|
from ray.rllib.agents.trainer import Trainer
from ray.rllib.utils import renamed_agent
Agent = renamed_agent(Trainer)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ars/__init__.py
|
Python
|
from ray.rllib.agents.ars.ars import (ARSTrainer, DEFAULT_CONFIG)
from ray.rllib.utils import renamed_agent
ARSAgent = renamed_agent(ARSTrainer)
__all__ = ["ARSAgent", "ARSTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ars/ars.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter and from
# https://github.com/modestyachts/ARS
from collections import namedtuple
import logging
import numpy as np
import time
import ray
from ray.rllib.agents import Trainer, with_common_config
from ray.rllib.agents.ars import optimizers
from ray.rllib.agents.ars import policies
from ray.rllib.agents.ars import utils
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import override
from ray.rllib.utils.memory import ray_get_and_free
from ray.rllib.utils import FilterManager
logger = logging.getLogger(__name__)
Result = namedtuple("Result", [
"noise_indices", "noisy_returns", "sign_noisy_returns", "noisy_lengths",
"eval_returns", "eval_lengths"
])
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
"noise_stdev": 0.02, # std deviation of parameter noise
"num_rollouts": 32, # number of perturbs to try
"rollouts_used": 32, # number of perturbs to keep in gradient estimate
"num_workers": 2,
"sgd_stepsize": 0.01, # sgd step-size
"observation_filter": "MeanStdFilter",
"noise_size": 250000000,
"eval_prob": 0.03, # probability of evaluating the parameter rewards
"report_length": 10, # how many of the last rewards we average over
"offset": 0,
})
# __sphinx_doc_end__
# yapf: enable
@ray.remote
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers."""
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable:
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, dim):
return np.random.randint(0, len(self.noise) - dim + 1)
def get_delta(self, dim):
idx = self.sample_index(dim)
return idx, self.get(idx, dim)
@ray.remote
class Worker:
def __init__(self, config, env_creator, noise, min_task_runtime=0.2):
self.min_task_runtime = min_task_runtime
self.config = config
self.noise = SharedNoiseTable(noise)
self.env = env_creator(config["env_config"])
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(self.env)
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
self.sess, self.env.action_space, self.env.observation_space,
self.preprocessor, config["observation_filter"], config["model"])
@property
def filters(self):
return {DEFAULT_POLICY_ID: self.policy.get_filter()}
def sync_filters(self, new_filters):
for k in self.filters:
self.filters[k].sync(new_filters[k])
def get_filters(self, flush_after=False):
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
def rollout(self, timestep_limit, add_noise=False):
rollout_rewards, rollout_length = policies.rollout(
self.policy,
self.env,
timestep_limit=timestep_limit,
add_noise=add_noise,
offset=self.config["offset"])
return rollout_rewards, rollout_length
def do_rollouts(self, params, timestep_limit=None):
# Set the network weights.
self.policy.set_weights(params)
noise_indices, returns, sign_returns, lengths = [], [], [], []
eval_returns, eval_lengths = [], []
# Perform some rollouts with noise.
while (len(noise_indices) == 0):
if np.random.uniform() < self.config["eval_prob"]:
# Do an evaluation run with no perturbation.
self.policy.set_weights(params)
rewards, length = self.rollout(timestep_limit, add_noise=False)
eval_returns.append(rewards.sum())
eval_lengths.append(length)
else:
# Do a regular run with parameter perturbations.
noise_index = self.noise.sample_index(self.policy.num_params)
perturbation = self.config["noise_stdev"] * self.noise.get(
noise_index, self.policy.num_params)
# These two sampling steps could be done in parallel on
# different actors letting us update twice as frequently.
self.policy.set_weights(params + perturbation)
rewards_pos, lengths_pos = self.rollout(timestep_limit)
self.policy.set_weights(params - perturbation)
rewards_neg, lengths_neg = self.rollout(timestep_limit)
noise_indices.append(noise_index)
returns.append([rewards_pos.sum(), rewards_neg.sum()])
sign_returns.append(
[np.sign(rewards_pos).sum(),
np.sign(rewards_neg).sum()])
lengths.append([lengths_pos, lengths_neg])
return Result(
noise_indices=noise_indices,
noisy_returns=returns,
sign_noisy_returns=sign_returns,
noisy_lengths=lengths,
eval_returns=eval_returns,
eval_lengths=eval_lengths)
class ARSTrainer(Trainer):
"""Large-scale implementation of Augmented Random Search in Ray."""
_name = "ARS"
_default_config = DEFAULT_CONFIG
@override(Trainer)
def _init(self, config, env_creator):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError(
"ARS does not support PyTorch yet! Use tf instead."
)
env = env_creator(config["env_config"])
from ray.rllib import models
preprocessor = models.ModelCatalog.get_preprocessor(env)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
self.sess, env.action_space, env.observation_space, preprocessor,
config["observation_filter"], config["model"])
self.optimizer = optimizers.SGD(self.policy, config["sgd_stepsize"])
self.rollouts_used = config["rollouts_used"]
self.num_rollouts = config["num_rollouts"]
self.report_length = config["report_length"]
# Create the shared noise table.
logger.info("Creating shared noise table.")
noise_id = create_shared_noise.remote(config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
logger.info("Creating actors.")
self.workers = [
Worker.remote(config, env_creator, noise_id)
for _ in range(config["num_workers"])
]
self.episodes_so_far = 0
self.reward_list = []
self.tstart = time.time()
@override(Trainer)
def _train(self):
config = self.config
theta = self.policy.get_weights()
assert theta.dtype == np.float32
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["num_rollouts"])
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (len(all_noise_indices) == len(all_training_returns) ==
len(all_training_lengths))
self.episodes_so_far += num_episodes
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# keep only the best returns
# select top performing directions if rollouts_used < num_rollouts
max_rewards = np.max(noisy_returns, axis=1)
if self.rollouts_used > self.num_rollouts:
self.rollouts_used = self.num_rollouts
percentile = 100 * (1 - (self.rollouts_used / self.num_rollouts))
idx = np.arange(max_rewards.size)[
max_rewards >= np.percentile(max_rewards, percentile)]
noise_idx = noise_indices[idx]
noisy_returns = noisy_returns[idx, :]
# Compute and take a step.
g, count = utils.batched_weighted_sum(
noisy_returns[:, 0] - noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params)
for index in noise_idx),
batch_size=min(500, noisy_returns[:, 0].size))
g /= noise_idx.size
# scale the returns by their standard deviation
if not np.isclose(np.std(noisy_returns), 0.0):
g /= np.std(noisy_returns)
assert (g.shape == (self.policy.num_params, )
and g.dtype == np.float32)
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g)
# Set the new weights in the local copy of the policy.
self.policy.set_weights(theta)
# update the reward list
if len(all_eval_returns) > 0:
self.reward_list.append(eval_returns.mean())
# Now sync the filters
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.get_filter()
}, self.workers)
info = {
"weights_norm": np.square(theta).sum(),
"weights_std": np.std(theta),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
}
result = dict(
episode_reward_mean=np.mean(
self.reward_list[-self.report_length:]),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info)
return result
@override(Trainer)
def _stop(self):
# workaround for https://github.com/ray-project/ray/issues/1516
for w in self.workers:
w.__ray_terminate__.remote()
@override(Trainer)
def compute_action(self, observation):
return self.policy.compute(observation, update=True)[0]
def _collect_results(self, theta_id, min_episodes):
num_episodes, num_timesteps = 0, 0
results = []
while num_episodes < min_episodes:
logger.debug(
"Collected {} episodes {} timesteps so far this iter".format(
num_episodes, num_timesteps))
rollout_ids = [
worker.do_rollouts.remote(theta_id) for worker in self.workers
]
# Get the results of the rollouts.
for result in ray_get_and_free(rollout_ids):
results.append(result)
# Update the number of episodes and the number of timesteps
# keeping in mind that result.noisy_lengths is a list of lists,
# where the inner lists have length 2.
num_episodes += sum(len(pair) for pair in result.noisy_lengths)
num_timesteps += sum(
sum(pair) for pair in result.noisy_lengths)
return results, num_episodes, num_timesteps
def __getstate__(self):
return {
"weights": self.policy.get_weights(),
"filter": self.policy.get_filter(),
"episodes_so_far": self.episodes_so_far,
}
def __setstate__(self, state):
self.episodes_so_far = state["episodes_so_far"]
self.policy.set_weights(state["weights"])
self.policy.set_filter(state["filter"])
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.get_filter()
}, self.workers)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ars/optimizers.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import numpy as np
class Optimizer:
def __init__(self, policy):
self.policy = policy
self.dim = policy.num_params
self.t = 0
def update(self, globalg):
self.t += 1
step = self._compute_step(globalg)
theta = self.policy.get_weights()
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return theta + step, ratio
def _compute_step(self, globalg):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, policy, stepsize, momentum=0.0):
Optimizer.__init__(self, policy)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, policy, stepsize, beta1=0.9, beta2=0.999,
epsilon=1e-08):
Optimizer.__init__(self, policy)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def _compute_step(self, globalg):
a = self.stepsize * (np.sqrt(1 - self.beta2**self.t) /
(1 - self.beta1**self.t))
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ars/policies.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import gym
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.evaluation.sampler import _unbatch_tuple_actions
from ray.rllib.utils.filter import get_filter
from ray.rllib.models import ModelCatalog
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def rollout(policy, env, timestep_limit=None, add_noise=False, offset=0):
"""Do a rollout.
If add_noise is True, the rollout will take noisy actions with
noise drawn from that stream. Otherwise, no action noise will be added.
Parameters
----------
policy: tf object
policy from which to draw actions
env: GymEnv
environment from which to draw rewards, done, and next state
timestep_limit: int, optional
steps after which to end the rollout
add_noise: bool, optional
indicates whether exploratory action noise should be added
offset: int, optional
value to subtract from the reward. For example, survival bonus
from humanoid
"""
env_timestep_limit = env.spec.max_episode_steps
timestep_limit = (env_timestep_limit if timestep_limit is None else min(
timestep_limit, env_timestep_limit))
rews = []
t = 0
observation = env.reset()
for _ in range(timestep_limit or 999999):
ac = policy.compute(observation, add_noise=add_noise, update=True)[0]
observation, rew, done, _ = env.step(ac)
rew -= np.abs(offset)
rews.append(rew)
t += 1
if done:
break
rews = np.array(rews, dtype=np.float32)
return rews, t
class GenericPolicy:
def __init__(self,
sess,
action_space,
obs_space,
preprocessor,
observation_filter,
model_config,
action_noise_std=0.0):
self.sess = sess
self.action_space = action_space
self.action_noise_std = action_noise_std
self.preprocessor = preprocessor
self.observation_filter = get_filter(observation_filter,
self.preprocessor.shape)
self.inputs = tf.placeholder(tf.float32,
[None] + list(self.preprocessor.shape))
# Policy network.
dist_class, dist_dim = ModelCatalog.get_action_dist(
action_space, model_config, dist_type="deterministic")
model = ModelCatalog.get_model({
"obs": self.inputs
}, obs_space, action_space, dist_dim, model_config)
dist = dist_class(model.outputs, model)
self.sampler = dist.sample()
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
model.outputs, self.sess)
self.num_params = sum(
np.prod(variable.shape.as_list())
for _, variable in self.variables.variables.items())
self.sess.run(tf.global_variables_initializer())
def compute(self, observation, add_noise=False, update=True):
observation = self.preprocessor.transform(observation)
observation = self.observation_filter(observation[None], update=update)
action = self.sess.run(
self.sampler, feed_dict={self.inputs: observation})
action = _unbatch_tuple_actions(action)
if add_noise and isinstance(self.action_space, gym.spaces.Box):
action += np.random.randn(*action.shape) * self.action_noise_std
return action
def set_weights(self, x):
self.variables.set_flat(x)
def set_filter(self, obs_filter):
self.observation_filter = obs_filter
def get_filter(self):
return self.observation_filter
def get_weights(self):
return self.variables.get_flat()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ars/utils.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import numpy as np
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def compute_ranks(x):
"""Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in
[1, len(x)].
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= 0.5
return y
def make_session(single_threaded):
if not single_threaded:
return tf.Session()
return tf.Session(
config=tf.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1))
def itergroups(items, group_size):
assert group_size >= 1
group = []
for x in items:
group.append(x)
if len(group) == group_size:
yield tuple(group)
del group[:]
if group:
yield tuple(group)
def batched_weighted_sum(weights, vecs, batch_size):
total = 0
num_items_summed = 0
for batch_weights, batch_vecs in zip(
itergroups(weights, batch_size), itergroups(vecs, batch_size)):
assert len(batch_weights) == len(batch_vecs) <= batch_size
total += np.dot(
np.asarray(batch_weights, dtype=np.float32),
np.asarray(batch_vecs, dtype=np.float32))
num_items_summed += len(batch_weights)
return total, num_items_summed
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/__init__.py
|
Python
|
from ray.rllib.agents.ddpg.apex import ApexDDPGTrainer
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, DEFAULT_CONFIG
from ray.rllib.agents.ddpg.td3 import TD3Trainer
from ray.rllib.utils import renamed_agent
ApexDDPGAgent = renamed_agent(ApexDDPGTrainer)
DDPGAgent = renamed_agent(DDPGTrainer)
__all__ = [
"DDPGAgent", "ApexDDPGAgent", "DDPGTrainer", "ApexDDPGTrainer",
"TD3Trainer", "DEFAULT_CONFIG"
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/apex.py
|
Python
|
from ray.rllib.agents.dqn.apex import APEX_TRAINER_PROPERTIES
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \
DEFAULT_CONFIG as DDPG_CONFIG
from ray.rllib.utils import merge_dicts
APEX_DDPG_DEFAULT_CONFIG = merge_dicts(
DDPG_CONFIG, # see also the options in ddpg.py, which are also supported
{
"optimizer": merge_dicts(
DDPG_CONFIG["optimizer"], {
"max_weight_sync_delay": 400,
"num_replay_buffer_shards": 4,
"debug": False
}),
"n_step": 3,
"num_gpus": 0,
"num_workers": 32,
"buffer_size": 2000000,
"learning_starts": 50000,
"train_batch_size": 512,
"sample_batch_size": 50,
"target_network_update_freq": 500000,
"timesteps_per_iteration": 25000,
"per_worker_exploration": True,
"worker_side_prioritization": True,
"min_iter_time_s": 30,
},
)
ApexDDPGTrainer = DDPGTrainer.with_updates(
name="APEX_DDPG",
default_config=APEX_DDPG_DEFAULT_CONFIG,
**APEX_TRAINER_PROPERTIES)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/ddpg.py
|
Python
|
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer, \
update_worker_explorations
from ray.rllib.agents.ddpg.ddpg_policy import DDPGTFPolicy
from ray.rllib.utils.schedules import ConstantSchedule, LinearSchedule
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Twin Delayed DDPG (TD3) and Soft Actor-Critic (SAC) tricks ===
# TD3: https://spinningup.openai.com/en/latest/algorithms/td3.html
# In addition to settings below, you can use "exploration_noise_type" and
# "exploration_gauss_act_noise" to get IID Gaussian exploration noise
# instead of OU exploration noise.
# twin Q-net
"twin_q": False,
# delayed policy update
"policy_delay": 1,
# target policy smoothing
# (this also replaces OU exploration noise with IID Gaussian exploration
# noise, for now)
"smooth_target_policy": False,
# gaussian stddev of target action noise for smoothing
"target_noise": 0.2,
# target noise limit (bound)
"target_noise_clip": 0.5,
# === Evaluation ===
# Evaluate with epsilon=0 every `evaluation_interval` training iterations.
# The evaluation stats will be reported under the "evaluation" metric key.
# Note that evaluation is currently not parallelized, and that for Ape-X
# metrics are already only reported for the lowest epsilon workers.
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [400, 300],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [400, 300],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# === Exploration ===
# Turns on annealing schedule for exploration noise. Exploration is
# annealed from 1.0 to exploration_final_eps over schedule_max_timesteps
# scaled by exploration_fraction. Original DDPG and TD3 papers do not
# anneal noise, so this is False by default.
"exploration_should_anneal": False,
# Max num timesteps for annealing schedules.
"schedule_max_timesteps": 100000,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 1000,
# Fraction of entire training period over which the exploration rate is
# annealed
"exploration_fraction": 0.1,
# Final scaling multiplier for action noise (initial is 1.0)
"exploration_final_scale": 0.02,
# valid values: "ou" (time-correlated, like original DDPG paper),
# "gaussian" (IID, like TD3 paper)
"exploration_noise_type": "ou",
# OU-noise scale; this can be used to scale down magnitude of OU noise
# before adding to actions (requires "exploration_noise_type" to be "ou")
"exploration_ou_noise_scale": 0.1,
# theta for OU
"exploration_ou_theta": 0.15,
# sigma for OU
"exploration_ou_sigma": 0.2,
# gaussian stddev of act noise for exploration (requires
# "exploration_noise_type" to be "gaussian")
"exploration_gaussian_sigma": 0.1,
# If True parameter space noise will be used for exploration
# See https://blog.openai.com/better-exploration-with-parameter-noise/
"parameter_noise": False,
# Until this many timesteps have elapsed, the agent's policy will be
# ignored & it will instead take uniform random actions. Can be used in
# conjunction with learning_starts (which controls when the first
# optimization step happens) to decrease dependence of exploration &
# optimization on initial policy parameters. Note that this will be
# disabled when the action noise scale is set to 0 (e.g during evaluation).
"pure_exploration_steps": 1000,
# Extra configuration that disables exploration.
"evaluation_config": {
"exploration_fraction": 0,
"exploration_final_eps": 0,
},
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": 50000,
# If True prioritized replay buffer will be used.
"prioritized_replay": True,
# Alpha parameter for prioritized replay buffer.
"prioritized_replay_alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"prioritized_replay_beta": 0.4,
# Fraction of entire training period over which the beta parameter is
# annealed
"beta_annealing_fraction": 0.2,
# Final value of beta
"final_prioritized_replay_beta": 0.4,
# Epsilon to add to the TD errors when updating priorities.
"prioritized_replay_eps": 1e-6,
# Whether to LZ4 compress observations
"compress_observations": False,
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-3,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-3,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.002,
# If True, use huber loss instead of squared loss for critic network
# Conventionally, no need to clip gradients if using a huber loss
"use_huber": False,
# Threshold of a huber loss
"huber_threshold": 1.0,
# Weights for L2 regularization
"l2_reg": 1e-6,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": None,
# How many steps of the model to sample before learning starts.
"learning_starts": 1500,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"sample_batch_size": 1,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 256,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to use a distribution of epsilons across workers for exploration.
"per_worker_exploration": False,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
})
# __sphinx_doc_end__
# yapf: enable
def make_exploration_schedule(config, worker_index):
# Modification of DQN's schedule to take into account
# `exploration_ou_noise_scale`
if config["per_worker_exploration"]:
assert config["num_workers"] > 1, "This requires multiple workers"
if worker_index >= 0:
# FIXME: what do magic constants mean? (0.4, 7)
max_index = float(config["num_workers"] - 1)
exponent = 1 + worker_index / max_index * 7
return ConstantSchedule(0.4**exponent)
else:
# local ev should have zero exploration so that eval rollouts
# run properly
return ConstantSchedule(0.0)
elif config["exploration_should_anneal"]:
return LinearSchedule(
schedule_timesteps=int(config["exploration_fraction"] *
config["schedule_max_timesteps"]),
initial_p=1.0,
final_p=config["exploration_final_scale"])
else:
# *always* add exploration noise
return ConstantSchedule(1.0)
def setup_ddpg_exploration(trainer):
trainer.exploration0 = make_exploration_schedule(trainer.config, -1)
trainer.explorations = [
make_exploration_schedule(trainer.config, i)
for i in range(trainer.config["num_workers"])
]
def add_pure_exploration_phase(trainer):
global_timestep = trainer.optimizer.num_steps_sampled
pure_expl_steps = trainer.config["pure_exploration_steps"]
if pure_expl_steps:
# tell workers whether they should do pure exploration
only_explore = global_timestep < pure_expl_steps
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.set_pure_exploration_phase(only_explore))
for e in trainer.workers.remote_workers():
e.foreach_trainable_policy.remote(
lambda p, _: p.set_pure_exploration_phase(only_explore))
update_worker_explorations(trainer)
def validate_config(config):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError("DDPG does not support PyTorch yet! Use tf instead.")
DDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="DDPG",
default_config=DEFAULT_CONFIG,
default_policy=DDPGTFPolicy,
validate_config=validate_config,
before_init=setup_ddpg_exploration,
before_train_step=add_pure_exploration_phase)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/ddpg_policy.py
|
Python
|
from gym.spaces import Box
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.agents.dqn.dqn_policy import _postprocess_dqn
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import huber_loss, minimize_and_clip, scope_vars
tf = try_import_tf()
ACTION_SCOPE = "action"
POLICY_SCOPE = "policy"
POLICY_TARGET_SCOPE = "target_policy"
Q_SCOPE = "critic"
Q_TARGET_SCOPE = "target_critic"
TWIN_Q_SCOPE = "twin_critic"
TWIN_Q_TARGET_SCOPE = "twin_target_critic"
# Importance sampling weights for prioritized replay
PRIO_WEIGHTS = "weights"
class DDPGPostprocessing:
"""Implements n-step learning and param noise adjustments."""
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
if self.config["parameter_noise"]:
# adjust the sigma of parameter space noise
states, noisy_actions = [
list(x) for x in sample_batch.columns(
[SampleBatch.CUR_OBS, SampleBatch.ACTIONS])
]
self.sess.run(self.remove_noise_op)
clean_actions = self.sess.run(
self.output_actions,
feed_dict={
self.cur_observations: states,
self.stochastic: False,
self.noise_scale: .0,
self.pure_exploration_phase: False,
})
distance_in_action_space = np.sqrt(
np.mean(np.square(clean_actions - noisy_actions)))
self.pi_distance = distance_in_action_space
if distance_in_action_space < \
self.config["exploration_ou_sigma"] * self.cur_noise_scale:
# multiplying the sampled OU noise by noise scale is
# equivalent to multiplying the sigma of OU by noise scale
self.parameter_noise_sigma_val *= 1.01
else:
self.parameter_noise_sigma_val /= 1.01
self.parameter_noise_sigma.load(
self.parameter_noise_sigma_val, session=self.sess)
return _postprocess_dqn(self, sample_batch)
class DDPGTFPolicy(DDPGPostprocessing, TFPolicy):
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.ddpg.ddpg.DEFAULT_CONFIG, **config)
if not isinstance(action_space, Box):
raise UnsupportedSpaceException(
"Action space {} is not supported for DDPG.".format(
action_space))
if len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space has multiple dimensions "
"{}. ".format(action_space.shape) +
"Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API.")
self.config = config
self.cur_noise_scale = 1.0
self.cur_pure_exploration_phase = False
self.dim_actions = action_space.shape[0]
self.low_action = action_space.low
self.high_action = action_space.high
# create global step for counting the number of update operations
self.global_step = tf.train.get_or_create_global_step()
# use separate optimizers for actor & critic
self._actor_optimizer = tf.train.AdamOptimizer(
learning_rate=self.config["actor_lr"])
self._critic_optimizer = tf.train.AdamOptimizer(
learning_rate=self.config["critic_lr"])
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.noise_scale = tf.placeholder(tf.float32, (), name="noise_scale")
self.pure_exploration_phase = tf.placeholder(
tf.bool, (), name="pure_exploration_phase")
self.cur_observations = tf.placeholder(
tf.float32,
shape=(None, ) + observation_space.shape,
name="cur_obs")
with tf.variable_scope(POLICY_SCOPE) as scope:
policy_out, self.policy_model = self._build_policy_network(
self.cur_observations, observation_space, action_space)
self.policy_vars = scope_vars(scope.name)
# Noise vars for P network except for layer normalization vars
if self.config["parameter_noise"]:
self._build_parameter_noise([
var for var in self.policy_vars if "LayerNorm" not in var.name
])
# Action outputs
with tf.variable_scope(ACTION_SCOPE):
self.output_actions = self._add_exploration_noise(
policy_out, self.stochastic, self.noise_scale,
self.pure_exploration_phase, action_space)
if self.config["smooth_target_policy"]:
self.reset_noise_op = tf.no_op()
else:
with tf.variable_scope(ACTION_SCOPE, reuse=True):
exploration_sample = tf.get_variable(name="ornstein_uhlenbeck")
self.reset_noise_op = tf.assign(exploration_sample,
self.dim_actions * [.0])
# Replay inputs
self.obs_t = tf.placeholder(
tf.float32,
shape=(None, ) + observation_space.shape,
name="observation")
self.act_t = tf.placeholder(
tf.float32, shape=(None, ) + action_space.shape, name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(
tf.float32, shape=(None, ) + observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(
tf.float32, [None], name="weight")
# policy network evaluation
with tf.variable_scope(POLICY_SCOPE, reuse=True) as scope:
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
self.policy_t, _ = self._build_policy_network(
self.obs_t, observation_space, action_space)
policy_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) -
prev_update_ops)
# target policy network evaluation
with tf.variable_scope(POLICY_TARGET_SCOPE) as scope:
policy_tp1, _ = self._build_policy_network(
self.obs_tp1, observation_space, action_space)
target_policy_vars = scope_vars(scope.name)
# Action outputs
with tf.variable_scope(ACTION_SCOPE, reuse=True):
if config["smooth_target_policy"]:
target_noise_clip = self.config["target_noise_clip"]
clipped_normal_sample = tf.clip_by_value(
tf.random_normal(
tf.shape(policy_tp1),
stddev=self.config["target_noise"]),
-target_noise_clip, target_noise_clip)
policy_tp1_smoothed = tf.clip_by_value(
policy_tp1 + clipped_normal_sample,
action_space.low * tf.ones_like(policy_tp1),
action_space.high * tf.ones_like(policy_tp1))
else:
# no smoothing, just use deterministic actions
policy_tp1_smoothed = policy_tp1
# q network evaluation
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with tf.variable_scope(Q_SCOPE) as scope:
# Q-values for given actions & observations in given current
q_t, self.q_model = self._build_q_network(
self.obs_t, observation_space, action_space, self.act_t)
self.q_func_vars = scope_vars(scope.name)
self.stats = {
"mean_q": tf.reduce_mean(q_t),
"max_q": tf.reduce_max(q_t),
"min_q": tf.reduce_min(q_t),
}
with tf.variable_scope(Q_SCOPE, reuse=True):
# Q-values for current policy (no noise) in given current state
q_t_det_policy, _ = self._build_q_network(
self.obs_t, observation_space, action_space, self.policy_t)
if self.config["twin_q"]:
with tf.variable_scope(TWIN_Q_SCOPE) as scope:
twin_q_t, self.twin_q_model = self._build_q_network(
self.obs_t, observation_space, action_space, self.act_t)
self.twin_q_func_vars = scope_vars(scope.name)
q_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops)
# target q network evaluation
with tf.variable_scope(Q_TARGET_SCOPE) as scope:
q_tp1, _ = self._build_q_network(self.obs_tp1, observation_space,
action_space, policy_tp1_smoothed)
target_q_func_vars = scope_vars(scope.name)
if self.config["twin_q"]:
with tf.variable_scope(TWIN_Q_TARGET_SCOPE) as scope:
twin_q_tp1, _ = self._build_q_network(
self.obs_tp1, observation_space, action_space,
policy_tp1_smoothed)
twin_target_q_func_vars = scope_vars(scope.name)
if self.config["twin_q"]:
self.critic_loss, self.actor_loss, self.td_error \
= self._build_actor_critic_loss(
q_t, q_tp1, q_t_det_policy, twin_q_t=twin_q_t,
twin_q_tp1=twin_q_tp1)
else:
self.critic_loss, self.actor_loss, self.td_error \
= self._build_actor_critic_loss(
q_t, q_tp1, q_t_det_policy)
if config["l2_reg"] is not None:
for var in self.policy_vars:
if "bias" not in var.name:
self.actor_loss += (config["l2_reg"] * tf.nn.l2_loss(var))
for var in self.q_func_vars:
if "bias" not in var.name:
self.critic_loss += (config["l2_reg"] * tf.nn.l2_loss(var))
if self.config["twin_q"]:
for var in self.twin_q_func_vars:
if "bias" not in var.name:
self.critic_loss += (
config["l2_reg"] * tf.nn.l2_loss(var))
# update_target_fn will be called periodically to copy Q network to
# target Q network
self.tau_value = config.get("tau")
self.tau = tf.placeholder(tf.float32, (), name="tau")
update_target_expr = []
for var, var_target in zip(
sorted(self.q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
if self.config["twin_q"]:
for var, var_target in zip(
sorted(self.twin_q_func_vars, key=lambda v: v.name),
sorted(twin_target_q_func_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
for var, var_target in zip(
sorted(self.policy_vars, key=lambda v: v.name),
sorted(target_policy_vars, key=lambda v: v.name)):
update_target_expr.append(
var_target.assign(self.tau * var +
(1.0 - self.tau) * var_target))
self.update_target_expr = tf.group(*update_target_expr)
self.sess = tf.get_default_session()
self.loss_inputs = [
(SampleBatch.CUR_OBS, self.obs_t),
(SampleBatch.ACTIONS, self.act_t),
(SampleBatch.REWARDS, self.rew_t),
(SampleBatch.NEXT_OBS, self.obs_tp1),
(SampleBatch.DONES, self.done_mask),
(PRIO_WEIGHTS, self.importance_weights),
]
input_dict = dict(self.loss_inputs)
if self.config["use_state_preprocessor"]:
# Model self-supervised losses
self.actor_loss = self.policy_model.custom_loss(
self.actor_loss, input_dict)
self.critic_loss = self.q_model.custom_loss(
self.critic_loss, input_dict)
if self.config["twin_q"]:
self.critic_loss = self.twin_q_model.custom_loss(
self.critic_loss, input_dict)
TFPolicy.__init__(
self,
observation_space,
action_space,
self.config,
self.sess,
obs_input=self.cur_observations,
action_sampler=self.output_actions,
loss=self.actor_loss + self.critic_loss,
loss_inputs=self.loss_inputs,
update_ops=q_batchnorm_update_ops + policy_batchnorm_update_ops)
self.sess.run(tf.global_variables_initializer())
# Note that this encompasses both the policy and Q-value networks and
# their corresponding target networks
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
tf.group(q_t_det_policy, q_tp1), self.sess)
# Hard initial update
self.update_target(tau=1.0)
@override(TFPolicy)
def optimizer(self):
# we don't use this because we have two separate optimisers
return None
@override(TFPolicy)
def build_apply_op(self, optimizer, grads_and_vars):
# for policy gradient, update policy net one time v.s.
# update critic net `policy_delay` time(s)
should_apply_actor_opt = tf.equal(
tf.mod(self.global_step, self.config["policy_delay"]), 0)
def make_apply_op():
return self._actor_optimizer.apply_gradients(
self._actor_grads_and_vars)
actor_op = tf.cond(
should_apply_actor_opt,
true_fn=make_apply_op,
false_fn=lambda: tf.no_op())
critic_op = self._critic_optimizer.apply_gradients(
self._critic_grads_and_vars)
# increment global step & apply ops
with tf.control_dependencies([tf.assign_add(self.global_step, 1)]):
return tf.group(actor_op, critic_op)
@override(TFPolicy)
def gradients(self, optimizer, loss):
if self.config["grad_norm_clipping"] is not None:
actor_grads_and_vars = minimize_and_clip(
self._actor_optimizer,
self.actor_loss,
var_list=self.policy_vars,
clip_val=self.config["grad_norm_clipping"])
critic_grads_and_vars = minimize_and_clip(
self._critic_optimizer,
self.critic_loss,
var_list=self.q_func_vars + self.twin_q_func_vars
if self.config["twin_q"] else self.q_func_vars,
clip_val=self.config["grad_norm_clipping"])
else:
actor_grads_and_vars = self._actor_optimizer.compute_gradients(
self.actor_loss, var_list=self.policy_vars)
if self.config["twin_q"]:
critic_vars = self.q_func_vars + self.twin_q_func_vars
else:
critic_vars = self.q_func_vars
critic_grads_and_vars = self._critic_optimizer.compute_gradients(
self.critic_loss, var_list=critic_vars)
# save these for later use in build_apply_op
self._actor_grads_and_vars = [(g, v) for (g, v) in actor_grads_and_vars
if g is not None]
self._critic_grads_and_vars = [(g, v)
for (g, v) in critic_grads_and_vars
if g is not None]
grads_and_vars = self._actor_grads_and_vars \
+ self._critic_grads_and_vars
return grads_and_vars
@override(TFPolicy)
def extra_compute_action_feed_dict(self):
return {
# FIXME: what about turning off exploration? Isn't that a good
# idea?
self.stochastic: True,
self.noise_scale: self.cur_noise_scale,
self.pure_exploration_phase: self.cur_pure_exploration_phase,
}
@override(TFPolicy)
def extra_compute_grad_fetches(self):
return {
"td_error": self.td_error,
LEARNER_STATS_KEY: self.stats,
}
@override(TFPolicy)
def get_weights(self):
return self.variables.get_weights()
@override(TFPolicy)
def set_weights(self, weights):
self.variables.set_weights(weights)
@override(Policy)
def get_state(self):
return [
TFPolicy.get_state(self), self.cur_noise_scale,
self.cur_pure_exploration_phase
]
@override(Policy)
def set_state(self, state):
TFPolicy.set_state(self, state[0])
self.set_epsilon(state[1])
self.set_pure_exploration_phase(state[2])
def _build_q_network(self, obs, obs_space, action_space, actions):
if self.config["use_state_preprocessor"]:
q_model = ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, action_space, 1, self.config["model"])
q_out = tf.concat([q_model.last_layer, actions], axis=1)
else:
q_model = None
q_out = tf.concat([obs, actions], axis=1)
activation = getattr(tf.nn, self.config["critic_hidden_activation"])
for hidden in self.config["critic_hiddens"]:
q_out = tf.layers.dense(q_out, units=hidden, activation=activation)
q_values = tf.layers.dense(q_out, units=1, activation=None)
return q_values, q_model
def _build_policy_network(self, obs, obs_space, action_space):
if self.config["use_state_preprocessor"]:
model = ModelCatalog.get_model({
"obs": obs,
"is_training": self._get_is_training_placeholder(),
}, obs_space, action_space, 1, self.config["model"])
action_out = model.last_layer
else:
model = None
action_out = obs
activation = getattr(tf.nn, self.config["actor_hidden_activation"])
for hidden in self.config["actor_hiddens"]:
if self.config["parameter_noise"]:
import tensorflow.contrib.layers as layers
action_out = layers.fully_connected(
action_out,
num_outputs=hidden,
activation_fn=activation,
normalizer_fn=layers.layer_norm)
else:
action_out = tf.layers.dense(
action_out, units=hidden, activation=activation)
action_out = tf.layers.dense(
action_out, units=self.dim_actions, activation=None)
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in DDPG and TD3 papers.
sigmoid_out = tf.nn.sigmoid(2 * action_out)
# Rescale to actual env policy scale
# (shape of sigmoid_out is [batch_size, dim_actions], so we reshape to
# get same dims)
action_range = (action_space.high - action_space.low)[None]
low_action = action_space.low[None]
actions = action_range * sigmoid_out + low_action
return actions, model
def _add_exploration_noise(self, deterministic_actions,
should_be_stochastic, noise_scale,
enable_pure_exploration, action_space):
noise_type = self.config["exploration_noise_type"]
action_low = action_space.low
action_high = action_space.high
action_range = action_space.high - action_low
def compute_stochastic_actions():
def make_noisy_actions():
# shape of deterministic_actions is [None, dim_action]
if noise_type == "gaussian":
# add IID Gaussian noise for exploration, TD3-style
normal_sample = noise_scale * tf.random_normal(
tf.shape(deterministic_actions),
stddev=self.config["exploration_gaussian_sigma"])
stochastic_actions = tf.clip_by_value(
deterministic_actions + normal_sample,
action_low * tf.ones_like(deterministic_actions),
action_high * tf.ones_like(deterministic_actions))
elif noise_type == "ou":
# add OU noise for exploration, DDPG-style
zero_acts = action_low.size * [.0]
exploration_sample = tf.get_variable(
name="ornstein_uhlenbeck",
dtype=tf.float32,
initializer=zero_acts,
trainable=False)
normal_sample = tf.random_normal(
shape=[action_low.size], mean=0.0, stddev=1.0)
ou_new = self.config["exploration_ou_theta"] \
* -exploration_sample \
+ self.config["exploration_ou_sigma"] * normal_sample
exploration_value = tf.assign_add(exploration_sample,
ou_new)
base_scale = self.config["exploration_ou_noise_scale"]
noise = noise_scale * base_scale \
* exploration_value * action_range
stochastic_actions = tf.clip_by_value(
deterministic_actions + noise,
action_low * tf.ones_like(deterministic_actions),
action_high * tf.ones_like(deterministic_actions))
else:
raise ValueError(
"Unknown noise type '%s' (try 'ou' or 'gaussian')" %
noise_type)
return stochastic_actions
def make_uniform_random_actions():
# pure random exploration option
uniform_random_actions = tf.random_uniform(
tf.shape(deterministic_actions))
# rescale uniform random actions according to action range
tf_range = tf.constant(action_range[None], dtype="float32")
tf_low = tf.constant(action_low[None], dtype="float32")
uniform_random_actions = uniform_random_actions * tf_range \
+ tf_low
return uniform_random_actions
stochastic_actions = tf.cond(
# need to condition on noise_scale > 0 because zeroing
# noise_scale is how a worker signals no noise should be used
# (this is ugly and should be fixed by adding an "eval_mode"
# config flag or something)
tf.logical_and(enable_pure_exploration, noise_scale > 0),
true_fn=make_uniform_random_actions,
false_fn=make_noisy_actions)
return stochastic_actions
enable_stochastic = tf.logical_and(should_be_stochastic,
not self.config["parameter_noise"])
actions = tf.cond(enable_stochastic, compute_stochastic_actions,
lambda: deterministic_actions)
return actions
def _build_actor_critic_loss(self,
q_t,
q_tp1,
q_t_det_policy,
twin_q_t=None,
twin_q_tp1=None):
twin_q = self.config["twin_q"]
gamma = self.config["gamma"]
n_step = self.config["n_step"]
use_huber = self.config["use_huber"]
huber_threshold = self.config["huber_threshold"]
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if twin_q:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 = tf.minimum(q_tp1, twin_q_tp1)
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - self.done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = tf.stop_gradient(
self.rew_t + gamma**n_step * q_tp1_best_masked)
# compute the error (potentially clipped)
if twin_q:
td_error = q_t_selected - q_t_selected_target
twin_td_error = twin_q_t_selected - q_t_selected_target
td_error = td_error + twin_td_error
if use_huber:
errors = huber_loss(td_error, huber_threshold) \
+ huber_loss(twin_td_error, huber_threshold)
else:
errors = 0.5 * tf.square(td_error) + 0.5 * tf.square(
twin_td_error)
else:
td_error = q_t_selected - q_t_selected_target
if use_huber:
errors = huber_loss(td_error, huber_threshold)
else:
errors = 0.5 * tf.square(td_error)
critic_loss = tf.reduce_mean(self.importance_weights * errors)
actor_loss = -tf.reduce_mean(q_t_det_policy)
return critic_loss, actor_loss, td_error
def _build_parameter_noise(self, pnet_params):
self.parameter_noise_sigma_val = self.config["exploration_ou_sigma"]
self.parameter_noise_sigma = tf.get_variable(
initializer=tf.constant_initializer(
self.parameter_noise_sigma_val),
name="parameter_noise_sigma",
shape=(),
trainable=False,
dtype=tf.float32)
self.parameter_noise = list()
# No need to add any noise on LayerNorm parameters
for var in pnet_params:
noise_var = tf.get_variable(
name=var.name.split(":")[0] + "_noise",
shape=var.shape,
initializer=tf.constant_initializer(.0),
trainable=False)
self.parameter_noise.append(noise_var)
remove_noise_ops = list()
for var, var_noise in zip(pnet_params, self.parameter_noise):
remove_noise_ops.append(tf.assign_add(var, -var_noise))
self.remove_noise_op = tf.group(*tuple(remove_noise_ops))
generate_noise_ops = list()
for var_noise in self.parameter_noise:
generate_noise_ops.append(
tf.assign(
var_noise,
tf.random_normal(
shape=var_noise.shape,
stddev=self.parameter_noise_sigma)))
with tf.control_dependencies(generate_noise_ops):
add_noise_ops = list()
for var, var_noise in zip(pnet_params, self.parameter_noise):
add_noise_ops.append(tf.assign_add(var, var_noise))
self.add_noise_op = tf.group(*tuple(add_noise_ops))
self.pi_distance = None
def compute_td_error(self, obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
td_err = self.sess.run(
self.td_error,
feed_dict={
self.obs_t: [np.array(ob) for ob in obs_t],
self.act_t: act_t,
self.rew_t: rew_t,
self.obs_tp1: [np.array(ob) for ob in obs_tp1],
self.done_mask: done_mask,
self.importance_weights: importance_weights
})
return td_err
def reset_noise(self, sess):
sess.run(self.reset_noise_op)
def add_parameter_noise(self):
if self.config["parameter_noise"]:
self.sess.run(self.add_noise_op)
# support both hard and soft sync
def update_target(self, tau=None):
tau = tau or self.tau_value
return self.sess.run(
self.update_target_expr, feed_dict={self.tau: tau})
def set_epsilon(self, epsilon):
# set_epsilon is called by optimizer to anneal exploration as
# necessary, and to turn it off during evaluation. The "epsilon" part
# is a carry-over from DQN, which uses epsilon-greedy exploration
# rather than adding action noise to the output of a policy network.
self.cur_noise_scale = epsilon
def set_pure_exploration_phase(self, pure_exploration_phase):
self.cur_pure_exploration_phase = pure_exploration_phase
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/noop_model.py
|
Python
|
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class NoopModel(TFModelV2):
"""Trivial model that just returns the obs flattened.
This is the model used if use_state_preprocessor=False."""
@override(TFModelV2)
def forward(self, input_dict, state, seq_lens):
return tf.cast(input_dict["obs_flat"], tf.float32), state
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ddpg/td3.py
|
Python
|
"""A more stable successor to TD3.
By default, this uses a near-identical configuration to that reported in the
TD3 paper.
"""
from ray.rllib.agents.ddpg.ddpg import DDPGTrainer, \
DEFAULT_CONFIG as DDPG_CONFIG
from ray.rllib.utils import merge_dicts
TD3_DEFAULT_CONFIG = merge_dicts(
DDPG_CONFIG,
{
# largest changes: twin Q functions, delayed policy updates, and target
# smoothing
"twin_q": True,
"policy_delay": 2,
"smooth_target_policy": True,
"target_noise": 0.2,
"target_noise_clip": 0.5,
# other changes & things we want to keep fixed: IID Gaussian
# exploration noise, larger actor learning rate, no l2 regularisation,
# no Huber loss, etc.
"exploration_should_anneal": False,
"exploration_noise_type": "gaussian",
"exploration_gaussian_sigma": 0.1,
"learning_starts": 10000,
"pure_exploration_steps": 10000,
"actor_hiddens": [400, 300],
"critic_hiddens": [400, 300],
"n_step": 1,
"gamma": 0.99,
"actor_lr": 1e-3,
"critic_lr": 1e-3,
"l2_reg": 0.0,
"tau": 5e-3,
"train_batch_size": 100,
"use_huber": False,
"target_network_update_freq": 0,
"num_workers": 0,
"num_gpus_per_worker": 0,
"per_worker_exploration": False,
"worker_side_prioritization": False,
"buffer_size": 1000000,
"prioritized_replay": False,
"clip_rewards": False,
"use_state_preprocessor": False,
},
)
TD3Trainer = DDPGTrainer.with_updates(
name="TD3", default_config=TD3_DEFAULT_CONFIG)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/__init__.py
|
Python
|
from ray.rllib.agents.dqn.apex import ApexTrainer
from ray.rllib.agents.dqn.dqn import DQNTrainer, SimpleQTrainer, DEFAULT_CONFIG
from ray.rllib.utils import renamed_agent
DQNAgent = renamed_agent(DQNTrainer)
ApexAgent = renamed_agent(ApexTrainer)
__all__ = [
"DQNAgent", "ApexAgent", "ApexTrainer", "DQNTrainer", "DEFAULT_CONFIG",
"SimpleQTrainer"
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/apex.py
|
Python
|
from ray.rllib.agents.dqn.dqn import DQNTrainer, DEFAULT_CONFIG as DQN_CONFIG
from ray.rllib.optimizers import AsyncReplayOptimizer
from ray.rllib.utils import merge_dicts
# yapf: disable
# __sphinx_doc_begin__
APEX_DEFAULT_CONFIG = merge_dicts(
DQN_CONFIG, # see also the options in dqn.py, which are also supported
{
"optimizer": merge_dicts(
DQN_CONFIG["optimizer"], {
"max_weight_sync_delay": 400,
"num_replay_buffer_shards": 4,
"debug": False
}),
"n_step": 3,
"num_gpus": 1,
"num_workers": 32,
"buffer_size": 2000000,
"learning_starts": 50000,
"train_batch_size": 512,
"sample_batch_size": 50,
"target_network_update_freq": 500000,
"timesteps_per_iteration": 25000,
"per_worker_exploration": True,
"worker_side_prioritization": True,
"min_iter_time_s": 30,
},
)
# __sphinx_doc_end__
# yapf: enable
def defer_make_workers(trainer, env_creator, policy, config):
# Hack to workaround https://github.com/ray-project/ray/issues/2541
# The workers will be created later, after the optimizer is created
return trainer._make_workers(env_creator, policy, config, 0)
def make_async_optimizer(workers, config):
assert len(workers.remote_workers()) == 0
extra_config = config["optimizer"].copy()
for key in [
"prioritized_replay", "prioritized_replay_alpha",
"prioritized_replay_beta", "prioritized_replay_eps"
]:
if key in config:
extra_config[key] = config[key]
opt = AsyncReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
train_batch_size=config["train_batch_size"],
sample_batch_size=config["sample_batch_size"],
**extra_config)
workers.add_workers(config["num_workers"])
opt._set_workers(workers.remote_workers())
return opt
def update_target_based_on_num_steps_trained(trainer, fetches):
# Ape-X updates based on num steps trained, not sampled
if (trainer.optimizer.num_steps_trained -
trainer.state["last_target_update_ts"] >
trainer.config["target_network_update_freq"]):
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.update_target())
trainer.state["last_target_update_ts"] = (
trainer.optimizer.num_steps_trained)
trainer.state["num_target_updates"] += 1
APEX_TRAINER_PROPERTIES = {
"make_workers": defer_make_workers,
"make_policy_optimizer": make_async_optimizer,
"after_optimizer_step": update_target_based_on_num_steps_trained,
}
ApexTrainer = DQNTrainer.with_updates(
name="APEX", default_config=APEX_DEFAULT_CONFIG, **APEX_TRAINER_PROPERTIES)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/distributional_q_model.py
|
Python
|
import numpy as np
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class DistributionalQModel(TFModelV2):
"""Extension of standard TFModel to provide distributional Q values.
It also supports options for noisy nets and parameter space noise.
Data flow:
obs -> forward() -> model_out
model_out -> get_q_value_distributions() -> Q(s, a) atoms
model_out -> get_state_value() -> V(s)
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
q_hiddens=(256, ),
dueling=False,
num_atoms=1,
use_noisy=False,
v_min=-10.0,
v_max=10.0,
sigma0=0.5,
parameter_noise=False):
"""Initialize variables of this model.
Extra model kwargs:
q_hiddens (list): defines size of hidden layers for the q head.
These will be used to postprocess the model output for the
purposes of computing Q values.
dueling (bool): whether to build the state value head for DDQN
num_atoms (int): if >1, enables distributional DQN
use_noisy (bool): use noisy nets
v_min (float): min value support for distributional DQN
v_max (float): max value support for distributional DQN
sigma0 (float): initial value of noisy nets
parameter_noise (bool): enable layer norm for param noise
Note that the core layers for forward() are not defined here, this
only defines the layers for the Q head. Those layers for forward()
should be defined in subclasses of DistributionalQModel.
"""
super(DistributionalQModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name)
# setup the Q head output (i.e., model for get_q_values)
self.model_out = tf.keras.layers.Input(
shape=(num_outputs, ), name="model_out")
def build_action_value(model_out):
if q_hiddens:
action_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
action_out = self._noisy_layer(
"hidden_%d" % i, action_out, q_hiddens[i], sigma0)
elif parameter_noise:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.keras.layers.LayerNormalization
)(action_out)
else:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation=tf.nn.relu,
name="hidden_%d" % i
)(action_out)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model_out
if use_noisy:
action_scores = self._noisy_layer(
"output",
action_out,
self.action_space.n * num_atoms,
sigma0,
non_linear=False)
elif q_hiddens:
action_scores = tf.keras.layers.Dense(
units=self.action_space.n * num_atoms,
activation=None
)(action_out)
else:
action_scores = model_out
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
support_logits_per_action = tf.reshape(
tensor=action_scores,
shape=(-1, self.action_space.n, num_atoms))
support_prob_per_action = tf.nn.softmax(
logits=support_logits_per_action)
action_scores = tf.reduce_sum(
input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
return [
action_scores, z, support_logits_per_action, logits, dist
]
else:
logits = tf.expand_dims(tf.ones_like(action_scores), -1)
dist = tf.expand_dims(tf.ones_like(action_scores), -1)
return [action_scores, logits, dist]
def build_state_score(model_out):
state_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
state_out = self._noisy_layer("dueling_hidden_%d" % i,
state_out, q_hiddens[i],
sigma0)
elif parameter_noise:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.contrib.layers.layer_norm
)(state_out)
else:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu
)(state_out)
if use_noisy:
state_score = self._noisy_layer(
"dueling_output",
state_out,
num_atoms,
sigma0,
non_linear=False)
else:
state_score = tf.keras.layers.Dense(
units=num_atoms, activation=None
)(state_out)
return state_score
if tf.executing_eagerly():
from tensorflow.python.ops import variable_scope
# Have to use a variable store to reuse variables in eager mode
store = variable_scope.EagerVariableStore()
# Save the scope objects, since in eager we will execute this
# path repeatedly and there is no guarantee it will always be run
# in the same original scope.
with tf.variable_scope(name + "/action_value") as action_scope:
pass
with tf.variable_scope(name + "/state_value") as state_scope:
pass
def build_action_value_in_scope(model_out):
with store.as_default():
with tf.variable_scope(action_scope, reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with store.as_default():
with tf.variable_scope(state_scope, reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
else:
def build_action_value_in_scope(model_out):
with tf.variable_scope(
name + "/action_value", reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with tf.variable_scope(
name + "/state_value", reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
q_out = build_action_value_in_scope(self.model_out)
self.q_value_head = tf.keras.Model(self.model_out, q_out)
self.register_variables(self.q_value_head.variables)
if dueling:
state_out = build_state_score_in_scope(
self.model_out)
self.state_value_head = tf.keras.Model(self.model_out, state_out)
self.register_variables(self.state_value_head.variables)
def get_q_value_distributions(self, model_out):
"""Returns distributional values for Q(s, a) given a state embedding.
Override this in your custom model to customize the Q output head.
Arguments:
model_out (Tensor): embedding from the model layers
Returns:
(action_scores, logits, dist) if num_atoms == 1, otherwise
(action_scores, z, support_logits_per_action, logits, dist)
"""
return self.q_value_head(model_out)
def get_state_value(self, model_out):
"""Returns the state value prediction for the given state embedding."""
return self.state_value_head(model_out)
def _noisy_layer(self,
prefix,
action_in,
out_size,
sigma0,
non_linear=True):
"""
a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \epsilon_w*\sigma_w)^{T}x +
(b+\epsilon_b*\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \sigma are trainable variables which are expected to
vanish along the training procedure
"""
in_size = int(action_in.shape[1])
epsilon_in = tf.random_normal(shape=[in_size])
epsilon_out = tf.random_normal(shape=[out_size])
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0))
epsilon_b = epsilon_out
sigma_w = tf.get_variable(
name=prefix + "_sigma_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-1.0 / np.sqrt(float(in_size)),
maxval=1.0 / np.sqrt(float(in_size))))
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
sigma_b = tf.get_variable(
name=prefix + "_sigma_b",
shape=[out_size],
dtype=tf.float32, # 0.5~GPU, 0.1~CPU
initializer=tf.constant_initializer(
sigma0 / np.sqrt(float(in_size))))
w = tf.get_variable(
name=prefix + "_fc_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.initializers.GlorotUniform())
b = tf.get_variable(
name=prefix + "_fc_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.zeros_initializer())
action_activation = tf.nn.xw_plus_b(action_in, w + sigma_w * epsilon_w,
b + sigma_b * epsilon_b)
if not non_linear:
return action_activation
return tf.nn.relu(action_activation)
def _f_epsilon(self, x):
return tf.sign(x) * tf.sqrt(tf.abs(x))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/dqn.py
|
Python
|
import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.agents.dqn.dqn_policy import DQNTFPolicy
from ray.rllib.agents.dqn.simple_q_policy import SimpleQPolicy
from ray.rllib.optimizers import SyncReplayOptimizer
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.schedules import ConstantSchedule, LinearSchedule
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Model ===
# Number of atoms for representing the distribution of return. When
# this is greater than 1, distributional Q-learning is used.
# the discrete supports are bounded by v_min and v_max
"num_atoms": 1,
"v_min": -10.0,
"v_max": 10.0,
# Whether to use noisy network
"noisy": False,
# control the initial value of noisy nets
"sigma0": 0.5,
# Whether to use dueling dqn
"dueling": True,
# Whether to use double dqn
"double_q": True,
# Postprocess model outputs with these hidden layers to compute the
# state and action values. See also the model config in catalog.py.
"hiddens": [256],
# N-step Q learning
"n_step": 1,
# === Exploration ===
# Max num timesteps for annealing schedules. Exploration is annealed from
# 1.0 to exploration_fraction over this number of timesteps scaled by
# exploration_fraction
"schedule_max_timesteps": 100000,
# Minimum env steps to optimize for per train call. This value does
# not affect learning, only the length of iterations.
"timesteps_per_iteration": 1000,
# Fraction of entire training period over which the exploration rate is
# annealed
"exploration_fraction": 0.1,
# Final value of random action probability
"exploration_final_eps": 0.02,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 500,
# Use softmax for sampling actions. Required for off policy estimation.
"soft_q": False,
# Softmax temperature. Q values are divided by this value prior to softmax.
# Softmax approaches argmax as the temperature drops to zero.
"softmax_temp": 1.0,
# If True parameter space noise will be used for exploration
# See https://blog.openai.com/better-exploration-with-parameter-noise/
"parameter_noise": False,
# Extra configuration that disables exploration.
"evaluation_config": {
"exploration_fraction": 0,
"exploration_final_eps": 0,
},
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": 50000,
# If True prioritized replay buffer will be used.
"prioritized_replay": True,
# Alpha parameter for prioritized replay buffer.
"prioritized_replay_alpha": 0.6,
# Beta parameter for sampling from prioritized replay buffer.
"prioritized_replay_beta": 0.4,
# Fraction of entire training period over which the beta parameter is
# annealed
"beta_annealing_fraction": 0.2,
# Final value of beta
"final_prioritized_replay_beta": 0.4,
# Epsilon to add to the TD errors when updating priorities.
"prioritized_replay_eps": 1e-6,
# Whether to LZ4 compress observations
"compress_observations": True,
# === Optimization ===
# Learning rate for adam optimizer
"lr": 5e-4,
# Learning rate schedule
"lr_schedule": None,
# Adam epsilon hyper parameter
"adam_epsilon": 1e-8,
# If not None, clip gradients during optimization at this value
"grad_norm_clipping": 40,
# How many steps of the model to sample before learning starts.
"learning_starts": 1000,
# Update the replay buffer with this many samples at once. Note that
# this setting applies per-worker if num_workers > 1.
"sample_batch_size": 4,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 32,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you"re using the Async or Ape-X optimizers.
"num_workers": 0,
# Whether to use a distribution of epsilons across workers for exploration.
"per_worker_exploration": False,
# Whether to compute priorities on workers.
"worker_side_prioritization": False,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 1,
})
# __sphinx_doc_end__
# yapf: enable
def make_optimizer(workers, config):
return SyncReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["buffer_size"],
prioritized_replay=config["prioritized_replay"],
prioritized_replay_alpha=config["prioritized_replay_alpha"],
prioritized_replay_beta=config["prioritized_replay_beta"],
schedule_max_timesteps=config["schedule_max_timesteps"],
beta_annealing_fraction=config["beta_annealing_fraction"],
final_prioritized_replay_beta=config["final_prioritized_replay_beta"],
prioritized_replay_eps=config["prioritized_replay_eps"],
train_batch_size=config["train_batch_size"],
sample_batch_size=config["sample_batch_size"],
**config["optimizer"])
def check_config_and_setup_param_noise(config):
"""Update the config based on settings.
Rewrites sample_batch_size to take into account n_step truncation, and also
adds the necessary callbacks to support parameter space noise exploration.
"""
# PyTorch check.
if config["use_pytorch"]:
raise ValueError("DQN does not support PyTorch yet! Use tf instead.")
# Update effective batch size to include n-step
adjusted_batch_size = max(config["sample_batch_size"],
config.get("n_step", 1))
config["sample_batch_size"] = adjusted_batch_size
if config.get("parameter_noise", False):
if config["batch_mode"] != "complete_episodes":
raise ValueError("Exploration with parameter space noise requires "
"batch_mode to be complete_episodes.")
if config.get("noisy", False):
raise ValueError(
"Exploration with parameter space noise and noisy network "
"cannot be used at the same time.")
if config["callbacks"]["on_episode_start"]:
start_callback = config["callbacks"]["on_episode_start"]
else:
start_callback = None
def on_episode_start(info):
# as a callback function to sample and pose parameter space
# noise on the parameters of network
policies = info["policy"]
for pi in policies.values():
pi.add_parameter_noise()
if start_callback:
start_callback(info)
config["callbacks"]["on_episode_start"] = on_episode_start
if config["callbacks"]["on_episode_end"]:
end_callback = config["callbacks"]["on_episode_end"]
else:
end_callback = None
def on_episode_end(info):
# as a callback function to monitor the distance
# between noisy policy and original policy
policies = info["policy"]
episode = info["episode"]
model = policies[DEFAULT_POLICY_ID].model
if hasattr(model, "pi_distance"):
episode.custom_metrics["policy_distance"] = model.pi_distance
if end_callback:
end_callback(info)
config["callbacks"]["on_episode_end"] = on_episode_end
def get_initial_state(config):
return {
"last_target_update_ts": 0,
"num_target_updates": 0,
}
def make_exploration_schedule(config, worker_index):
# Use either a different `eps` per worker, or a linear schedule.
if config["per_worker_exploration"]:
assert config["num_workers"] > 1, \
"This requires multiple workers"
if worker_index >= 0:
# Exploration constants from the Ape-X paper
exponent = (
1 + worker_index / float(config["num_workers"] - 1) * 7)
return ConstantSchedule(0.4**exponent)
else:
# local ev should have zero exploration so that eval rollouts
# run properly
return ConstantSchedule(0.0)
return LinearSchedule(
schedule_timesteps=int(
config["exploration_fraction"] * config["schedule_max_timesteps"]),
initial_p=1.0,
final_p=config["exploration_final_eps"])
def setup_exploration(trainer):
trainer.exploration0 = make_exploration_schedule(trainer.config, -1)
trainer.explorations = [
make_exploration_schedule(trainer.config, i)
for i in range(trainer.config["num_workers"])
]
def update_worker_explorations(trainer):
global_timestep = trainer.optimizer.num_steps_sampled
exp_vals = [trainer.exploration0.value(global_timestep)]
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.set_epsilon(exp_vals[0]))
for i, e in enumerate(trainer.workers.remote_workers()):
exp_val = trainer.explorations[i].value(global_timestep)
e.foreach_trainable_policy.remote(lambda p, _: p.set_epsilon(exp_val))
exp_vals.append(exp_val)
trainer.train_start_timestep = global_timestep
trainer.cur_exp_vals = exp_vals
def add_trainer_metrics(trainer, result):
global_timestep = trainer.optimizer.num_steps_sampled
result.update(
timesteps_this_iter=global_timestep - trainer.train_start_timestep,
info=dict({
"min_exploration": min(trainer.cur_exp_vals),
"max_exploration": max(trainer.cur_exp_vals),
"num_target_updates": trainer.state["num_target_updates"],
}, **trainer.optimizer.stats()))
def update_target_if_needed(trainer, fetches):
global_timestep = trainer.optimizer.num_steps_sampled
if global_timestep - trainer.state["last_target_update_ts"] > \
trainer.config["target_network_update_freq"]:
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.update_target())
trainer.state["last_target_update_ts"] = global_timestep
trainer.state["num_target_updates"] += 1
def collect_metrics(trainer):
if trainer.config["per_worker_exploration"]:
# Only collect metrics from the third of workers with lowest eps
result = trainer.collect_metrics(
selected_workers=trainer.workers.remote_workers()[
-len(trainer.workers.remote_workers()) // 3:])
else:
result = trainer.collect_metrics()
return result
def disable_exploration(trainer):
trainer.evaluation_workers.local_worker().foreach_trainable_policy(
lambda p, _: p.set_epsilon(0))
GenericOffPolicyTrainer = build_trainer(
name="GenericOffPolicyAlgorithm",
default_policy=None,
default_config=DEFAULT_CONFIG,
validate_config=check_config_and_setup_param_noise,
get_initial_state=get_initial_state,
make_policy_optimizer=make_optimizer,
before_init=setup_exploration,
before_train_step=update_worker_explorations,
after_optimizer_step=update_target_if_needed,
after_train_result=add_trainer_metrics,
collect_metrics_fn=collect_metrics,
before_evaluate_fn=disable_exploration)
DQNTrainer = GenericOffPolicyTrainer.with_updates(
name="DQN", default_policy=DQNTFPolicy, default_config=DEFAULT_CONFIG)
SimpleQTrainer = DQNTrainer.with_updates(default_policy=SimpleQPolicy)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/dqn_policy.py
|
Python
|
from gym.spaces import Discrete
import numpy as np
from scipy.stats import entropy
import ray
from ray.rllib.agents.dqn.distributional_q_model import DistributionalQModel
from ray.rllib.agents.dqn.simple_q_policy import ExplorationStateMixin, \
TargetNetworkMixin
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models import ModelCatalog
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.tf_policy import LearningRateSchedule
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils.tf_ops import huber_loss, reduce_mean_ignore_inf, \
minimize_and_clip
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import make_tf_callable
tf = try_import_tf()
Q_SCOPE = "q_func"
Q_TARGET_SCOPE = "target_q_func"
# Importance sampling weights for prioritized replay
PRIO_WEIGHTS = "weights"
class QLoss:
def __init__(self,
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_dist_tp1_best,
importance_weights,
rewards,
done_mask,
gamma=0.99,
n_step=1,
num_atoms=1,
v_min=-10.0,
v_max=10.0):
if num_atoms > 1:
# Distributional Q-learning which corresponds to an entropy loss
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
# (batch_size, 1) * (1, num_atoms) = (batch_size, num_atoms)
r_tau = tf.expand_dims(
rewards, -1) + gamma**n_step * tf.expand_dims(
1.0 - done_mask, -1) * tf.expand_dims(z, 0)
r_tau = tf.clip_by_value(r_tau, v_min, v_max)
b = (r_tau - v_min) / ((v_max - v_min) / float(num_atoms - 1))
lb = tf.floor(b)
ub = tf.ceil(b)
# indispensable judgement which is missed in most implementations
# when b happens to be an integer, lb == ub, so pr_j(s', a*) will
# be discarded because (ub-b) == (b-lb) == 0
floor_equal_ceil = tf.to_float(tf.less(ub - lb, 0.5))
l_project = tf.one_hot(
tf.cast(lb, dtype=tf.int32),
num_atoms) # (batch_size, num_atoms, num_atoms)
u_project = tf.one_hot(
tf.cast(ub, dtype=tf.int32),
num_atoms) # (batch_size, num_atoms, num_atoms)
ml_delta = q_dist_tp1_best * (ub - b + floor_equal_ceil)
mu_delta = q_dist_tp1_best * (b - lb)
ml_delta = tf.reduce_sum(
l_project * tf.expand_dims(ml_delta, -1), axis=1)
mu_delta = tf.reduce_sum(
u_project * tf.expand_dims(mu_delta, -1), axis=1)
m = ml_delta + mu_delta
# Rainbow paper claims that using this cross entropy loss for
# priority is robust and insensitive to `prioritized_replay_alpha`
self.td_error = tf.nn.softmax_cross_entropy_with_logits(
labels=m, logits=q_logits_t_selected)
self.loss = tf.reduce_mean(self.td_error * importance_weights)
self.stats = {
# TODO: better Q stats for dist dqn
"mean_td_error": tf.reduce_mean(self.td_error),
}
else:
q_tp1_best_masked = (1.0 - done_mask) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = rewards + gamma**n_step * q_tp1_best_masked
# compute the error (potentially clipped)
self.td_error = (
q_t_selected - tf.stop_gradient(q_t_selected_target))
self.loss = tf.reduce_mean(
tf.cast(importance_weights, tf.float32) * huber_loss(
self.td_error))
self.stats = {
"mean_q": tf.reduce_mean(q_t_selected),
"min_q": tf.reduce_min(q_t_selected),
"max_q": tf.reduce_max(q_t_selected),
"mean_td_error": tf.reduce_mean(self.td_error),
}
class QValuePolicy:
def __init__(self, q_values, observations, num_actions, cur_epsilon,
softmax, softmax_temp, model_config):
if softmax:
action_dist = Categorical(q_values / softmax_temp)
self.action = action_dist.sample()
self.action_prob = tf.exp(action_dist.sampled_action_logp())
return
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(observations)[0]
# Special case masked out actions (q_value ~= -inf) so that we don't
# even consider them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1,
dtype=tf.float32) < cur_epsilon
self.action = tf.where(chose_random, random_actions,
deterministic_actions)
self.action_prob = None
class ComputeTDErrorMixin:
def __init__(self):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(obs_t, act_t, rew_t, obs_tp1, done_mask,
importance_weights):
# Do forward pass on loss to update td error attribute
build_q_losses(
self, self.model, None, {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.DONES: tf.convert_to_tensor(done_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
})
return self.q_loss.td_error
self.compute_td_error = compute_td_error
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if policy.config["parameter_noise"]:
# adjust the sigma of parameter space noise
states = [list(x) for x in sample_batch.columns(["obs"])][0]
noisy_action_distribution = policy.get_session().run(
policy.action_probs, feed_dict={policy.cur_observations: states})
policy.get_session().run(policy.remove_noise_op)
clean_action_distribution = policy.get_session().run(
policy.action_probs, feed_dict={policy.cur_observations: states})
distance_in_action_space = np.mean(
entropy(clean_action_distribution.T, noisy_action_distribution.T))
policy.pi_distance = distance_in_action_space
if (distance_in_action_space <
-np.log(1 - policy.cur_epsilon_value +
policy.cur_epsilon_value / policy.num_actions)):
policy.parameter_noise_sigma_val *= 1.01
else:
policy.parameter_noise_sigma_val /= 1.01
policy.parameter_noise_sigma.load(
policy.parameter_noise_sigma_val, session=policy.get_session())
return _postprocess_dqn(policy, sample_batch)
def build_q_model(policy, obs_space, action_space, config):
if not isinstance(action_space, Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + config["model"]["fcnet_hiddens"])[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
policy.q_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=DistributionalQModel,
name=Q_SCOPE,
num_atoms=config["num_atoms"],
q_hiddens=config["hiddens"],
dueling=config["dueling"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
parameter_noise=config["parameter_noise"])
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
model_interface=DistributionalQModel,
name=Q_TARGET_SCOPE,
num_atoms=config["num_atoms"],
q_hiddens=config["hiddens"],
dueling=config["dueling"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
parameter_noise=config["parameter_noise"])
return policy.q_model
def build_q_networks(policy, q_model, input_dict, obs_space, action_space,
config):
# Action Q network
q_values, q_logits, q_dist = _compute_q_values(
policy, q_model, input_dict[SampleBatch.CUR_OBS], obs_space,
action_space)
policy.q_values = q_values
policy.q_func_vars = q_model.variables()
# Noise vars for Q network except for layer normalization vars
if config["parameter_noise"]:
_build_parameter_noise(
policy,
[var for var in policy.q_func_vars if "LayerNorm" not in var.name])
policy.action_probs = tf.nn.softmax(policy.q_values)
# Action outputs
qvp = QValuePolicy(q_values, input_dict[SampleBatch.CUR_OBS],
action_space.n, policy.cur_epsilon, config["soft_q"],
config["softmax_temp"], config["model"])
policy.output_actions, policy.action_prob = qvp.action, qvp.action_prob
actions = policy.output_actions
action_prob = (tf.log(policy.action_prob)
if policy.action_prob is not None else None)
return actions, action_prob
def _build_parameter_noise(policy, pnet_params):
policy.parameter_noise_sigma_val = 1.0
policy.parameter_noise_sigma = tf.get_variable(
initializer=tf.constant_initializer(policy.parameter_noise_sigma_val),
name="parameter_noise_sigma",
shape=(),
trainable=False,
dtype=tf.float32)
policy.parameter_noise = list()
# No need to add any noise on LayerNorm parameters
for var in pnet_params:
noise_var = tf.get_variable(
name=var.name.split(":")[0] + "_noise",
shape=var.shape,
initializer=tf.constant_initializer(.0),
trainable=False)
policy.parameter_noise.append(noise_var)
remove_noise_ops = list()
for var, var_noise in zip(pnet_params, policy.parameter_noise):
remove_noise_ops.append(tf.assign_add(var, -var_noise))
policy.remove_noise_op = tf.group(*tuple(remove_noise_ops))
generate_noise_ops = list()
for var_noise in policy.parameter_noise:
generate_noise_ops.append(
tf.assign(
var_noise,
tf.random_normal(
shape=var_noise.shape,
stddev=policy.parameter_noise_sigma)))
with tf.control_dependencies(generate_noise_ops):
add_noise_ops = list()
for var, var_noise in zip(pnet_params, policy.parameter_noise):
add_noise_ops.append(tf.assign_add(var, var_noise))
policy.add_noise_op = tf.group(*tuple(add_noise_ops))
policy.pi_distance = None
def build_q_losses(policy, model, _, train_batch):
config = policy.config
# q network evaluation
q_t, q_logits_t, q_dist_t = _compute_q_values(
policy, policy.q_model, train_batch[SampleBatch.CUR_OBS],
policy.observation_space, policy.action_space)
# target q network evalution
q_tp1, q_logits_tp1, q_dist_tp1 = _compute_q_values(
policy, policy.target_q_model, train_batch[SampleBatch.NEXT_OBS],
policy.observation_space, policy.action_space)
policy.target_q_func_vars = policy.target_q_model.variables()
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(
tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32),
policy.action_space.n)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
q_logits_t_selected = tf.reduce_sum(
q_logits_t * tf.expand_dims(one_hot_selection, -1), 1)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
q_tp1_using_online_net, q_logits_tp1_using_online_net, \
q_dist_tp1_using_online_net = _compute_q_values(
policy, policy.q_model,
train_batch[SampleBatch.NEXT_OBS],
policy.observation_space, policy.action_space)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = tf.one_hot(q_tp1_best_using_online_net,
policy.action_space.n)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1)
else:
q_tp1_best_one_hot_selection = tf.one_hot(
tf.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1)
policy.q_loss = QLoss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_dist_tp1_best,
train_batch[PRIO_WEIGHTS], train_batch[SampleBatch.REWARDS],
tf.cast(train_batch[SampleBatch.DONES],
tf.float32), config["gamma"], config["n_step"],
config["num_atoms"], config["v_min"], config["v_max"])
return policy.q_loss.loss
def adam_optimizer(policy, config):
return tf.train.AdamOptimizer(
learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"])
def clip_gradients(policy, optimizer, loss):
if policy.config["grad_norm_clipping"] is not None:
grads_and_vars = minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_norm_clipping"])
else:
grads_and_vars = optimizer.compute_gradients(
loss, var_list=policy.q_func_vars)
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if g is not None]
return grads_and_vars
def build_q_stats(policy, batch):
return dict({
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
}, **policy.q_loss.stats)
def setup_early_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
ExplorationStateMixin.__init__(policy, obs_space, action_space, config)
def setup_mid_mixins(policy, obs_space, action_space, config):
ComputeTDErrorMixin.__init__(policy)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
def _compute_q_values(policy, model, obs, obs_space, action_space):
config = policy.config
model_out, state = model({
"obs": obs,
"is_training": policy._get_is_training_placeholder(),
}, [], None)
if config["num_atoms"] > 1:
(action_scores, z, support_logits_per_action, logits,
dist) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits,
dist) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if config["num_atoms"] > 1:
support_logits_per_action_mean = tf.reduce_mean(
support_logits_per_action, 1)
support_logits_per_action_centered = (
support_logits_per_action - tf.expand_dims(
support_logits_per_action_mean, 1))
support_logits_per_action = tf.expand_dims(
state_score, 1) + support_logits_per_action_centered
support_prob_per_action = tf.nn.softmax(
logits=support_logits_per_action)
value = tf.reduce_sum(
input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
else:
action_scores_mean = reduce_mean_ignore_inf(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1)
value = state_score + action_scores_centered
else:
value = action_scores
return value, logits, dist
def _adjust_nstep(n_step, gamma, obs, actions, rewards, new_obs, dones):
"""Rewrites the given trajectory fragments to encode n-step rewards.
reward[i] = (
reward[i] * gamma**0 +
reward[i+1] * gamma**1 +
... +
reward[i+n_step-1] * gamma**(n_step-1))
The ith new_obs is also adjusted to point to the (i+n_step-1)'th new obs.
At the end of the trajectory, n is truncated to fit in the traj length.
"""
assert not any(dones[:-1]), "Unexpected done in middle of trajectory"
traj_length = len(rewards)
for i in range(traj_length):
for j in range(1, n_step):
if i + j < traj_length:
new_obs[i] = new_obs[i + j]
dones[i] = dones[i + j]
rewards[i] += gamma**j * rewards[i + j]
def _postprocess_dqn(policy, batch):
# N-step Q adjustments
if policy.config["n_step"] > 1:
_adjust_nstep(policy.config["n_step"], policy.config["gamma"],
batch[SampleBatch.CUR_OBS], batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS], batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.DONES])
if PRIO_WEIGHTS not in batch:
batch[PRIO_WEIGHTS] = np.ones_like(batch[SampleBatch.REWARDS])
# Prioritize on the worker side
if batch.count > 0 and policy.config["worker_side_prioritization"]:
td_errors = policy.compute_td_error(
batch[SampleBatch.CUR_OBS], batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS], batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.DONES], batch[PRIO_WEIGHTS])
new_priorities = (
np.abs(td_errors) + policy.config["prioritized_replay_eps"])
batch.data[PRIO_WEIGHTS] = new_priorities
return batch
DQNTFPolicy = build_tf_policy(
name="DQNTFPolicy",
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model=build_q_model,
action_sampler_fn=build_q_networks,
loss_fn=build_q_losses,
stats_fn=build_q_stats,
postprocess_fn=postprocess_trajectory,
optimizer_fn=adam_optimizer,
gradients_fn=clip_gradients,
extra_action_fetches_fn=lambda policy: {"q_values": policy.q_values},
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False,
mixins=[
ExplorationStateMixin,
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/simple_q_model.py
|
Python
|
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
class SimpleQModel(TFModelV2):
"""Extension of standard TFModel to provide Q values.
Data flow:
obs -> forward() -> model_out
model_out -> get_q_values() -> Q(s, a)
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
q_hiddens=(256, )):
"""Initialize variables of this model.
Extra model kwargs:
q_hiddens (list): defines size of hidden layers for the q head.
These will be used to postprocess the model output for the
purposes of computing Q values.
Note that the core layers for forward() are not defined here, this
only defines the layers for the Q head. Those layers for forward()
should be defined in subclasses of SimpleQModel.
"""
super(SimpleQModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
# setup the Q head output (i.e., model for get_q_values)
self.model_out = tf.keras.layers.Input(
shape=(num_outputs, ), name="model_out")
if q_hiddens:
last_layer = self.model_out
for i, n in enumerate(q_hiddens):
last_layer = tf.keras.layers.Dense(
n, name="q_hidden_{}".format(i),
activation=tf.nn.relu)(last_layer)
q_out = tf.keras.layers.Dense(
action_space.n, activation=None, name="q_out")(last_layer)
else:
q_out = self.model_out
self.q_value_head = tf.keras.Model(self.model_out, q_out)
self.register_variables(self.q_value_head.variables)
def get_q_values(self, model_out):
"""Returns Q(s, a) given a feature tensor for the state.
Override this in your custom model to customize the Q output head.
Arguments:
model_out (Tensor): embedding from the model layers
Returns:
action scores Q(s, a) for each action, shape [None, action_space.n]
"""
return self.q_value_head(model_out)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/dqn/simple_q_policy.py
|
Python
|
"""Basic example of a DQN policy without any optimizations."""
from gym.spaces import Discrete
import logging
import ray
from ray.rllib.agents.dqn.simple_q_model import SimpleQModel
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import huber_loss, make_tf_callable
tf = try_import_tf()
logger = logging.getLogger(__name__)
Q_SCOPE = "q_func"
Q_TARGET_SCOPE = "target_q_func"
class ExplorationStateMixin:
def __init__(self, obs_space, action_space, config):
# Python value, should always be same as the TF variable
self.cur_epsilon_value = 1.0
self.cur_epsilon = tf.get_variable(
initializer=tf.constant_initializer(self.cur_epsilon_value),
name="eps",
shape=(),
trainable=False,
dtype=tf.float32)
def add_parameter_noise(self):
if self.config["parameter_noise"]:
self.sess.run(self.add_noise_op)
def set_epsilon(self, epsilon):
self.cur_epsilon_value = epsilon
self.cur_epsilon.load(
self.cur_epsilon_value, session=self.get_session())
@override(Policy)
def get_state(self):
return [TFPolicy.get_state(self), self.cur_epsilon_value]
@override(Policy)
def set_state(self, state):
TFPolicy.set_state(self, state[0])
self.set_epsilon(state[1])
class TargetNetworkMixin:
def __init__(self, obs_space, action_space, config):
@make_tf_callable(self.get_session())
def do_update():
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
assert len(self.q_func_vars) == len(self.target_q_func_vars), \
(self.q_func_vars, self.target_q_func_vars)
for var, var_target in zip(self.q_func_vars,
self.target_q_func_vars):
update_target_expr.append(var_target.assign(var))
logger.debug("Update target op {}".format(var_target))
return tf.group(*update_target_expr)
self.update_target = do_update
@override(TFPolicy)
def variables(self):
return self.q_func_vars + self.target_q_func_vars
def build_q_models(policy, obs_space, action_space, config):
if not isinstance(action_space, Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space))
if config["hiddens"]:
num_outputs = 256
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
policy.q_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
name=Q_SCOPE,
model_interface=SimpleQModel,
q_hiddens=config["hiddens"])
policy.target_q_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
num_outputs,
config["model"],
framework="tf",
name=Q_TARGET_SCOPE,
model_interface=SimpleQModel,
q_hiddens=config["hiddens"])
return policy.q_model
def build_action_sampler(policy, q_model, input_dict, obs_space, action_space,
config):
# Action Q network
q_values = _compute_q_values(policy, q_model,
input_dict[SampleBatch.CUR_OBS], obs_space,
action_space)
policy.q_values = q_values
policy.q_func_vars = q_model.variables()
# Action outputs
deterministic_actions = tf.argmax(q_values, axis=1)
batch_size = tf.shape(input_dict[SampleBatch.CUR_OBS])[0]
# Special case masked out actions (q_value ~= -inf) so that we don't
# even consider them for exploration.
random_valid_action_logits = tf.where(
tf.equal(q_values, tf.float32.min),
tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
random_actions = tf.squeeze(
tf.multinomial(random_valid_action_logits, 1), axis=1)
chose_random = tf.random_uniform(
tf.stack([batch_size]), minval=0, maxval=1,
dtype=tf.float32) < policy.cur_epsilon
stochastic_actions = tf.where(chose_random, random_actions,
deterministic_actions)
action_logp = None
return stochastic_actions, action_logp
def build_q_losses(policy, model, dist_class, train_batch):
# q network evaluation
q_t = _compute_q_values(policy, policy.q_model,
train_batch[SampleBatch.CUR_OBS],
policy.observation_space, policy.action_space)
# target q network evalution
q_tp1 = _compute_q_values(policy, policy.target_q_model,
train_batch[SampleBatch.NEXT_OBS],
policy.observation_space, policy.action_space)
policy.target_q_func_vars = policy.target_q_model.variables()
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(
tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32),
policy.action_space.n)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
# compute estimate of best possible value starting from state at t + 1
dones = tf.cast(train_batch[SampleBatch.DONES], tf.float32)
q_tp1_best_one_hot_selection = tf.one_hot(
tf.argmax(q_tp1, 1), policy.action_space.n)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_tp1_best_masked = (1.0 - dones) * q_tp1_best
# compute RHS of bellman equation
q_t_selected_target = (train_batch[SampleBatch.REWARDS] +
policy.config["gamma"] * q_tp1_best_masked)
# compute the error (potentially clipped)
td_error = q_t_selected - tf.stop_gradient(q_t_selected_target)
loss = tf.reduce_mean(huber_loss(td_error))
# save TD error as an attribute for outside access
policy.td_error = td_error
return loss
def _compute_q_values(policy, model, obs, obs_space, action_space):
input_dict = {
"obs": obs,
"is_training": policy._get_is_training_placeholder(),
}
model_out, _ = model(input_dict, [], None)
return model.get_q_values(model_out)
def setup_early_mixins(policy, obs_space, action_space, config):
ExplorationStateMixin.__init__(policy, obs_space, action_space, config)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
SimpleQPolicy = build_tf_policy(
name="SimpleQPolicy",
get_default_config=lambda: ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG,
make_model=build_q_models,
action_sampler_fn=build_action_sampler,
loss_fn=build_q_losses,
extra_action_fetches_fn=lambda policy: {"q_values": policy.q_values},
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
before_init=setup_early_mixins,
after_init=setup_late_mixins,
obs_include_prev_action_reward=False,
mixins=[
ExplorationStateMixin,
TargetNetworkMixin,
])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/es/__init__.py
|
Python
|
from ray.rllib.agents.es.es import (ESTrainer, DEFAULT_CONFIG)
from ray.rllib.utils import renamed_agent
ESAgent = renamed_agent(ESTrainer)
__all__ = ["ESAgent", "ESTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/es/es.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
from collections import namedtuple
import logging
import numpy as np
import time
import ray
from ray.rllib.agents import Trainer, with_common_config
from ray.rllib.agents.es import optimizers
from ray.rllib.agents.es import policies
from ray.rllib.agents.es import utils
from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID
from ray.rllib.utils.annotations import override
from ray.rllib.utils.memory import ray_get_and_free
from ray.rllib.utils import FilterManager
logger = logging.getLogger(__name__)
Result = namedtuple("Result", [
"noise_indices", "noisy_returns", "sign_noisy_returns", "noisy_lengths",
"eval_returns", "eval_lengths"
])
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
"l2_coeff": 0.005,
"noise_stdev": 0.02,
"episodes_per_batch": 1000,
"train_batch_size": 10000,
"eval_prob": 0.003,
"return_proc_mode": "centered_rank",
"num_workers": 10,
"stepsize": 0.01,
"observation_filter": "MeanStdFilter",
"noise_size": 250000000,
"report_length": 10,
})
# __sphinx_doc_end__
# yapf: enable
@ray.remote
def create_shared_noise(count):
"""Create a large array of noise to be shared by all workers."""
seed = 123
noise = np.random.RandomState(seed).randn(count).astype(np.float32)
return noise
class SharedNoiseTable:
def __init__(self, noise):
self.noise = noise
assert self.noise.dtype == np.float32
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, dim):
return np.random.randint(0, len(self.noise) - dim + 1)
@ray.remote
class Worker:
def __init__(self,
config,
policy_params,
env_creator,
noise,
min_task_runtime=0.2):
self.min_task_runtime = min_task_runtime
self.config = config
self.policy_params = policy_params
self.noise = SharedNoiseTable(noise)
self.env = env_creator(config["env_config"])
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(
self.env, config["model"])
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
self.sess, self.env.action_space, self.env.observation_space,
self.preprocessor, config["observation_filter"], config["model"],
**policy_params)
@property
def filters(self):
return {DEFAULT_POLICY_ID: self.policy.get_filter()}
def sync_filters(self, new_filters):
for k in self.filters:
self.filters[k].sync(new_filters[k])
def get_filters(self, flush_after=False):
return_filters = {}
for k, f in self.filters.items():
return_filters[k] = f.as_serializable()
if flush_after:
f.clear_buffer()
return return_filters
def rollout(self, timestep_limit, add_noise=True):
rollout_rewards, rollout_length = policies.rollout(
self.policy,
self.env,
timestep_limit=timestep_limit,
add_noise=add_noise)
return rollout_rewards, rollout_length
def do_rollouts(self, params, timestep_limit=None):
# Set the network weights.
self.policy.set_weights(params)
noise_indices, returns, sign_returns, lengths = [], [], [], []
eval_returns, eval_lengths = [], []
# Perform some rollouts with noise.
task_tstart = time.time()
while (len(noise_indices) == 0
or time.time() - task_tstart < self.min_task_runtime):
if np.random.uniform() < self.config["eval_prob"]:
# Do an evaluation run with no perturbation.
self.policy.set_weights(params)
rewards, length = self.rollout(timestep_limit, add_noise=False)
eval_returns.append(rewards.sum())
eval_lengths.append(length)
else:
# Do a regular run with parameter perturbations.
noise_index = self.noise.sample_index(self.policy.num_params)
perturbation = self.config["noise_stdev"] * self.noise.get(
noise_index, self.policy.num_params)
# These two sampling steps could be done in parallel on
# different actors letting us update twice as frequently.
self.policy.set_weights(params + perturbation)
rewards_pos, lengths_pos = self.rollout(timestep_limit)
self.policy.set_weights(params - perturbation)
rewards_neg, lengths_neg = self.rollout(timestep_limit)
noise_indices.append(noise_index)
returns.append([rewards_pos.sum(), rewards_neg.sum()])
sign_returns.append(
[np.sign(rewards_pos).sum(),
np.sign(rewards_neg).sum()])
lengths.append([lengths_pos, lengths_neg])
return Result(
noise_indices=noise_indices,
noisy_returns=returns,
sign_noisy_returns=sign_returns,
noisy_lengths=lengths,
eval_returns=eval_returns,
eval_lengths=eval_lengths)
class ESTrainer(Trainer):
"""Large-scale implementation of Evolution Strategies in Ray."""
_name = "ES"
_default_config = DEFAULT_CONFIG
@override(Trainer)
def _init(self, config, env_creator):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError(
"ES does not support PyTorch yet! Use tf instead."
)
policy_params = {"action_noise_std": 0.01}
env = env_creator(config["env_config"])
from ray.rllib import models
preprocessor = models.ModelCatalog.get_preprocessor(env)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
self.sess, env.action_space, env.observation_space, preprocessor,
config["observation_filter"], config["model"], **policy_params)
self.optimizer = optimizers.Adam(self.policy, config["stepsize"])
self.report_length = config["report_length"]
# Create the shared noise table.
logger.info("Creating shared noise table.")
noise_id = create_shared_noise.remote(config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
logger.info("Creating actors.")
self._workers = [
Worker.remote(config, policy_params, env_creator, noise_id)
for _ in range(config["num_workers"])
]
self.episodes_so_far = 0
self.reward_list = []
self.tstart = time.time()
@override(Trainer)
def _train(self):
config = self.config
theta = self.policy.get_weights()
assert theta.dtype == np.float32
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["episodes_per_batch"], config["train_batch_size"])
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (len(all_noise_indices) == len(all_training_returns) ==
len(all_training_lengths))
self.episodes_so_far += num_episodes
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# Process the returns.
if config["return_proc_mode"] == "centered_rank":
proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)
else:
raise NotImplementedError(config["return_proc_mode"])
# Compute and take a step.
g, count = utils.batched_weighted_sum(
proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params)
for index in noise_indices),
batch_size=500)
g /= noisy_returns.size
assert (g.shape == (self.policy.num_params, ) and g.dtype == np.float32
and count == len(noise_indices))
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g +
config["l2_coeff"] * theta)
# Set the new weights in the local copy of the policy.
self.policy.set_weights(theta)
# Store the rewards
if len(all_eval_returns) > 0:
self.reward_list.append(np.mean(eval_returns))
# Now sync the filters
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.get_filter()
}, self._workers)
info = {
"weights_norm": np.square(theta).sum(),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
}
reward_mean = np.mean(self.reward_list[-self.report_length:])
result = dict(
episode_reward_mean=reward_mean,
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info)
return result
@override(Trainer)
def compute_action(self, observation):
return self.policy.compute(observation, update=False)[0]
@override(Trainer)
def _stop(self):
# workaround for https://github.com/ray-project/ray/issues/1516
for w in self._workers:
w.__ray_terminate__.remote()
def _collect_results(self, theta_id, min_episodes, min_timesteps):
num_episodes, num_timesteps = 0, 0
results = []
while num_episodes < min_episodes or num_timesteps < min_timesteps:
logger.info(
"Collected {} episodes {} timesteps so far this iter".format(
num_episodes, num_timesteps))
rollout_ids = [
worker.do_rollouts.remote(theta_id) for worker in self._workers
]
# Get the results of the rollouts.
for result in ray_get_and_free(rollout_ids):
results.append(result)
# Update the number of episodes and the number of timesteps
# keeping in mind that result.noisy_lengths is a list of lists,
# where the inner lists have length 2.
num_episodes += sum(len(pair) for pair in result.noisy_lengths)
num_timesteps += sum(
sum(pair) for pair in result.noisy_lengths)
return results, num_episodes, num_timesteps
def __getstate__(self):
return {
"weights": self.policy.get_weights(),
"filter": self.policy.get_filter(),
"episodes_so_far": self.episodes_so_far,
}
def __setstate__(self, state):
self.episodes_so_far = state["episodes_so_far"]
self.policy.set_weights(state["weights"])
self.policy.set_filter(state["filter"])
FilterManager.synchronize({
DEFAULT_POLICY_ID: self.policy.get_filter()
}, self._workers)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/es/optimizers.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import numpy as np
class Optimizer:
def __init__(self, pi):
self.pi = pi
self.dim = pi.num_params
self.t = 0
def update(self, globalg):
self.t += 1
step = self._compute_step(globalg)
theta = self.pi.get_weights()
ratio = np.linalg.norm(step) / np.linalg.norm(theta)
return theta + step, ratio
def _compute_step(self, globalg):
raise NotImplementedError
class SGD(Optimizer):
def __init__(self, pi, stepsize, momentum=0.9):
Optimizer.__init__(self, pi)
self.v = np.zeros(self.dim, dtype=np.float32)
self.stepsize, self.momentum = stepsize, momentum
def _compute_step(self, globalg):
self.v = self.momentum * self.v + (1. - self.momentum) * globalg
step = -self.stepsize * self.v
return step
class Adam(Optimizer):
def __init__(self, pi, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08):
Optimizer.__init__(self, pi)
self.stepsize = stepsize
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.m = np.zeros(self.dim, dtype=np.float32)
self.v = np.zeros(self.dim, dtype=np.float32)
def _compute_step(self, globalg):
a = self.stepsize * (np.sqrt(1 - self.beta2**self.t) /
(1 - self.beta1**self.t))
self.m = self.beta1 * self.m + (1 - self.beta1) * globalg
self.v = self.beta2 * self.v + (1 - self.beta2) * (globalg * globalg)
step = -a * self.m / (np.sqrt(self.v) + self.epsilon)
return step
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/es/policies.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import gym
import numpy as np
import ray
import ray.experimental.tf_utils
from ray.rllib.evaluation.sampler import _unbatch_tuple_actions
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.filter import get_filter
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def rollout(policy, env, timestep_limit=None, add_noise=False):
"""Do a rollout.
If add_noise is True, the rollout will take noisy actions with
noise drawn from that stream. Otherwise, no action noise will be added.
"""
env_timestep_limit = env.spec.max_episode_steps
timestep_limit = (env_timestep_limit if timestep_limit is None else min(
timestep_limit, env_timestep_limit))
rews = []
t = 0
observation = env.reset()
for _ in range(timestep_limit or 999999):
ac = policy.compute(observation, add_noise=add_noise)[0]
observation, rew, done, _ = env.step(ac)
rews.append(rew)
t += 1
if done:
break
rews = np.array(rews, dtype=np.float32)
return rews, t
class GenericPolicy:
def __init__(self, sess, action_space, obs_space, preprocessor,
observation_filter, model_options, action_noise_std):
self.sess = sess
self.action_space = action_space
self.action_noise_std = action_noise_std
self.preprocessor = preprocessor
self.observation_filter = get_filter(observation_filter,
self.preprocessor.shape)
self.inputs = tf.placeholder(tf.float32,
[None] + list(self.preprocessor.shape))
# Policy network.
dist_class, dist_dim = ModelCatalog.get_action_dist(
self.action_space, model_options, dist_type="deterministic")
model = ModelCatalog.get_model({
"obs": self.inputs
}, obs_space, action_space, dist_dim, model_options)
dist = dist_class(model.outputs, model)
self.sampler = dist.sample()
self.variables = ray.experimental.tf_utils.TensorFlowVariables(
model.outputs, self.sess)
self.num_params = sum(
np.prod(variable.shape.as_list())
for _, variable in self.variables.variables.items())
self.sess.run(tf.global_variables_initializer())
def compute(self, observation, add_noise=False, update=True):
observation = self.preprocessor.transform(observation)
observation = self.observation_filter(observation[None], update=update)
action = self.sess.run(
self.sampler, feed_dict={self.inputs: observation})
action = _unbatch_tuple_actions(action)
if add_noise and isinstance(self.action_space, gym.spaces.Box):
action += np.random.randn(*action.shape) * self.action_noise_std
return action
def set_weights(self, x):
self.variables.set_flat(x)
def get_weights(self):
return self.variables.get_flat()
def get_filter(self):
return self.observation_filter
def set_filter(self, observation_filter):
self.observation_filter = observation_filter
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/es/utils.py
|
Python
|
# Code in this file is copied and adapted from
# https://github.com/openai/evolution-strategies-starter.
import numpy as np
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def compute_ranks(x):
"""Returns ranks in [0, len(x))
Note: This is different from scipy.stats.rankdata, which returns ranks in
[1, len(x)].
"""
assert x.ndim == 1
ranks = np.empty(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
def compute_centered_ranks(x):
y = compute_ranks(x.ravel()).reshape(x.shape).astype(np.float32)
y /= (x.size - 1)
y -= 0.5
return y
def make_session(single_threaded):
if not single_threaded:
return tf.Session()
return tf.Session(
config=tf.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1))
def itergroups(items, group_size):
assert group_size >= 1
group = []
for x in items:
group.append(x)
if len(group) == group_size:
yield tuple(group)
del group[:]
if group:
yield tuple(group)
def batched_weighted_sum(weights, vecs, batch_size):
total = 0
num_items_summed = 0
for batch_weights, batch_vecs in zip(
itergroups(weights, batch_size), itergroups(vecs, batch_size)):
assert len(batch_weights) == len(batch_vecs) <= batch_size
total += np.dot(
np.asarray(batch_weights, dtype=np.float32),
np.asarray(batch_vecs, dtype=np.float32))
num_items_summed += len(batch_weights)
return total, num_items_summed
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/impala/__init__.py
|
Python
|
from ray.rllib.agents.impala.impala import ImpalaTrainer, DEFAULT_CONFIG
from ray.rllib.utils import renamed_agent
ImpalaAgent = renamed_agent(ImpalaTrainer)
__all__ = ["ImpalaAgent", "ImpalaTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/impala/impala.py
|
Python
|
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
from ray.rllib.agents.impala.vtrace_policy import VTraceTFPolicy
from ray.rllib.agents.trainer import Trainer, with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.optimizers import AsyncSamplesOptimizer
from ray.rllib.optimizers.aso_tree_aggregator import TreeAggregator
from ray.rllib.utils.annotations import override
from ray.tune.trainable import Trainable
from ray.tune.resources import Resources
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# V-trace params (see vtrace.py).
"vtrace": True,
"vtrace_clip_rho_threshold": 1.0,
"vtrace_clip_pg_rho_threshold": 1.0,
# System params.
#
# == Overview of data flow in IMPALA ==
# 1. Policy evaluation in parallel across `num_workers` actors produces
# batches of size `sample_batch_size * num_envs_per_worker`.
# 2. If enabled, the replay buffer stores and produces batches of size
# `sample_batch_size * num_envs_per_worker`.
# 3. If enabled, the minibatch ring buffer stores and replays batches of
# size `train_batch_size` up to `num_sgd_iter` times per batch.
# 4. The learner thread executes data parallel SGD across `num_gpus` GPUs
# on batches of size `train_batch_size`.
#
"sample_batch_size": 50,
"train_batch_size": 500,
"min_iter_time_s": 10,
"num_workers": 2,
# number of GPUs the learner should use.
"num_gpus": 1,
# set >1 to load data into GPUs in parallel. Increases GPU memory usage
# proportionally with the number of buffers.
"num_data_loader_buffers": 1,
# how many train batches should be retained for minibatching. This conf
# only has an effect if `num_sgd_iter > 1`.
"minibatch_buffer_size": 1,
# number of passes to make over each train batch
"num_sgd_iter": 1,
# set >0 to enable experience replay. Saved samples will be replayed with
# a p:1 proportion to new data samples.
"replay_proportion": 0.0,
# number of sample batches to store for replay. The number of transitions
# saved total will be (replay_buffer_num_slots * sample_batch_size).
"replay_buffer_num_slots": 0,
# max queue size for train batches feeding into the learner
"learner_queue_size": 16,
# wait for train batches to be available in minibatch buffer queue
# this many seconds. This may need to be increased e.g. when training
# with a slow environment
"learner_queue_timeout": 300,
# level of queuing for sampling.
"max_sample_requests_in_flight_per_worker": 2,
# max number of workers to broadcast one set of weights to
"broadcast_interval": 1,
# use intermediate actors for multi-level aggregation. This can make sense
# if ingesting >2GB/s of samples, or if the data requires decompression.
"num_aggregation_workers": 0,
# Learning params.
"grad_clip": 40.0,
# either "adam" or "rmsprop"
"opt_type": "adam",
"lr": 0.0005,
"lr_schedule": None,
# rmsprop considered
"decay": 0.99,
"momentum": 0.0,
"epsilon": 0.1,
# balancing the three losses
"vf_loss_coeff": 0.5,
"entropy_coeff": 0.01,
"entropy_coeff_schedule": None,
# use fake (infinite speed) sampler for testing
"_fake_sampler": False,
})
# __sphinx_doc_end__
# yapf: enable
def choose_policy(config):
if config["vtrace"]:
return VTraceTFPolicy
else:
return A3CTFPolicy
def validate_config(config):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError(
"IMPALA does not support PyTorch yet! Use tf instead."
)
if config["entropy_coeff"] < 0:
raise DeprecationWarning("entropy_coeff must be >= 0")
def defer_make_workers(trainer, env_creator, policy, config):
# Defer worker creation to after the optimizer has been created.
return trainer._make_workers(env_creator, policy, config, 0)
def make_aggregators_and_optimizer(workers, config):
if config["num_aggregation_workers"] > 0:
# Create co-located aggregator actors first for placement pref
aggregators = TreeAggregator.precreate_aggregators(
config["num_aggregation_workers"])
else:
aggregators = None
workers.add_workers(config["num_workers"])
optimizer = AsyncSamplesOptimizer(
workers,
lr=config["lr"],
num_gpus=config["num_gpus"],
sample_batch_size=config["sample_batch_size"],
train_batch_size=config["train_batch_size"],
replay_buffer_num_slots=config["replay_buffer_num_slots"],
replay_proportion=config["replay_proportion"],
num_data_loader_buffers=config["num_data_loader_buffers"],
max_sample_requests_in_flight_per_worker=config[
"max_sample_requests_in_flight_per_worker"],
broadcast_interval=config["broadcast_interval"],
num_sgd_iter=config["num_sgd_iter"],
minibatch_buffer_size=config["minibatch_buffer_size"],
num_aggregation_workers=config["num_aggregation_workers"],
learner_queue_size=config["learner_queue_size"],
learner_queue_timeout=config["learner_queue_timeout"],
**config["optimizer"])
if aggregators:
# Assign the pre-created aggregators to the optimizer
optimizer.aggregator.init(aggregators)
return optimizer
class OverrideDefaultResourceRequest:
@classmethod
@override(Trainable)
def default_resource_request(cls, config):
cf = dict(cls._default_config, **config)
Trainer._validate_config(cf)
return Resources(
cpu=cf["num_cpus_for_driver"],
gpu=cf["num_gpus"],
memory=cf["memory"],
object_store_memory=cf["object_store_memory"],
extra_cpu=cf["num_cpus_per_worker"] * cf["num_workers"] +
cf["num_aggregation_workers"],
extra_gpu=cf["num_gpus_per_worker"] * cf["num_workers"],
extra_memory=cf["memory_per_worker"] * cf["num_workers"],
extra_object_store_memory=cf["object_store_memory_per_worker"] *
cf["num_workers"])
ImpalaTrainer = build_trainer(
name="IMPALA",
default_config=DEFAULT_CONFIG,
default_policy=VTraceTFPolicy,
validate_config=validate_config,
get_policy_class=choose_policy,
make_workers=defer_make_workers,
make_policy_optimizer=make_aggregators_and_optimizer,
mixins=[OverrideDefaultResourceRequest])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/impala/vtrace.py
|
Python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to compute V-trace off-policy actor critic targets.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
See https://arxiv.org/abs/1802.01561 for the full paper.
In addition to the original paper's code, changes have been made
to support MultiDiscrete action spaces. behaviour_policy_logits,
target_policy_logits and actions parameters in the entry point
multi_from_logits method accepts lists of tensors instead of just
tensors.
"""
import collections
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
VTraceFromLogitsReturns = collections.namedtuple("VTraceFromLogitsReturns", [
"vs", "pg_advantages", "log_rhos", "behaviour_action_log_probs",
"target_action_log_probs"
])
VTraceReturns = collections.namedtuple("VTraceReturns", "vs pg_advantages")
def log_probs_from_logits_and_actions(policy_logits,
actions,
dist_class=Categorical,
model=None):
return multi_log_probs_from_logits_and_actions([policy_logits], [actions],
dist_class, model)[0]
def multi_log_probs_from_logits_and_actions(policy_logits, actions, dist_class,
model):
"""Computes action log-probs from policy logits and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing a softmax policy.
actions: A list with length of ACTION_SPACE of
tensors of shapes
[T, B, ...],
...,
[T, B, ...]
with actions.
dist_class: Python class of the action distribution
Returns:
A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B],
...,
[T, B]
corresponding to the sampling log probability
of the chosen action w.r.t. the policy.
"""
log_probs = []
for i in range(len(policy_logits)):
p_shape = tf.shape(policy_logits[i])
a_shape = tf.shape(actions[i])
policy_logits_flat = tf.reshape(policy_logits[i],
tf.concat([[-1], p_shape[2:]], axis=0))
actions_flat = tf.reshape(actions[i],
tf.concat([[-1], a_shape[2:]], axis=0))
log_probs.append(
tf.reshape(
dist_class(policy_logits_flat, model).logp(actions_flat),
a_shape[:2]))
return log_probs
def from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class=Categorical,
model=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
"""multi_from_logits wrapper used only for tests"""
res = multi_from_logits(
[behaviour_policy_logits], [target_policy_logits], [actions],
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
name=name)
return VTraceFromLogitsReturns(
vs=res.vs,
pg_advantages=res.pg_advantages,
log_rhos=res.log_rhos,
behaviour_action_log_probs=tf.squeeze(
res.behaviour_action_log_probs, axis=0),
target_action_log_probs=tf.squeeze(
res.target_action_log_probs, axis=0),
)
def multi_from_logits(behaviour_policy_logits,
target_policy_logits,
actions,
discounts,
rewards,
values,
bootstrap_value,
dist_class,
model,
behaviour_action_log_probs=None,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_logits"):
r"""V-trace for softmax policies.
Calculates V-trace actor critic targets for softmax polices as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
Target policy refers to the policy we are interested in improving and
behaviour policy refers to the policy that generated the given
rewards and actions.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size and
ACTION_SPACE refers to the list of numbers each representing a number of
actions.
Args:
behaviour_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax behaviour
policy.
target_policy_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
with un-normalized log-probabilities parameterizing the softmax target
policy.
actions: A list with length of ACTION_SPACE of
tensors of shapes
[T, B, ...],
...,
[T, B, ...]
with actions sampled from the behaviour policy.
discounts: A float32 tensor of shape [T, B] with the discount encountered
when following the behaviour policy.
rewards: A float32 tensor of shape [T, B] with the rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
dist_class: action distribution class for the logits.
model: backing ModelV2 instance
behaviour_action_log_probs: precalculated values of the behaviour actions
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)).
name: The name scope that all V-trace operations will be created in.
Returns:
A `VTraceFromLogitsReturns` namedtuple with the following fields:
vs: A float32 tensor of shape [T, B]. Can be used as target to train a
baseline (V(x_t) - vs_t)^2.
pg_advantages: A float 32 tensor of shape [T, B]. Can be used as an
estimate of the advantage in the calculation of policy gradients.
log_rhos: A float32 tensor of shape [T, B] containing the log importance
sampling weights (log rhos).
behaviour_action_log_probs: A float32 tensor of shape [T, B] containing
behaviour policy action log probabilities (log \mu(a_t)).
target_action_log_probs: A float32 tensor of shape [T, B] containing
target policy action probabilities (log \pi(a_t)).
"""
for i in range(len(behaviour_policy_logits)):
behaviour_policy_logits[i] = tf.convert_to_tensor(
behaviour_policy_logits[i], dtype=tf.float32)
target_policy_logits[i] = tf.convert_to_tensor(
target_policy_logits[i], dtype=tf.float32)
# Make sure tensor ranks are as expected.
# The rest will be checked by from_action_log_probs.
behaviour_policy_logits[i].shape.assert_has_rank(3)
target_policy_logits[i].shape.assert_has_rank(3)
with tf.name_scope(
name,
values=[
behaviour_policy_logits, target_policy_logits, actions,
discounts, rewards, values, bootstrap_value
]):
target_action_log_probs = multi_log_probs_from_logits_and_actions(
target_policy_logits, actions, dist_class, model)
if (len(behaviour_policy_logits) > 1
or behaviour_action_log_probs is None):
# can't use precalculated values, recompute them. Note that
# recomputing won't work well for autoregressive action dists
# which may have variables not captured by 'logits'
behaviour_action_log_probs = (
multi_log_probs_from_logits_and_actions(
behaviour_policy_logits, actions, dist_class, model))
log_rhos = get_log_rhos(target_action_log_probs,
behaviour_action_log_probs)
vtrace_returns = from_importance_weights(
log_rhos=log_rhos,
discounts=discounts,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
return VTraceFromLogitsReturns(
log_rhos=log_rhos,
behaviour_action_log_probs=behaviour_action_log_probs,
target_action_log_probs=target_action_log_probs,
**vtrace_returns._asdict())
def from_importance_weights(log_rhos,
discounts,
rewards,
values,
bootstrap_value,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
name="vtrace_from_importance_weights"):
r"""V-trace from log importance weights.
Calculates V-trace actor critic targets as described in
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
In the notation used throughout documentation and comments, T refers to the
time dimension ranging from 0 to T-1. B refers to the batch size. This code
also supports the case where all tensors have the same number of additional
dimensions, e.g., `rewards` is [T, B, C], `values` is [T, B, C],
`bootstrap_value` is [B, C].
Args:
log_rhos: A float32 tensor of shape [T, B] representing the
log importance sampling weights, i.e.
log(target_policy(a) / behaviour_policy(a)). V-trace performs operations
on rhos in log-space for numerical stability.
discounts: A float32 tensor of shape [T, B] with discounts encountered when
following the behaviour policy.
rewards: A float32 tensor of shape [T, B] containing rewards generated by
following the behaviour policy.
values: A float32 tensor of shape [T, B] with the value function estimates
wrt. the target policy.
bootstrap_value: A float32 of shape [B] with the value function estimate at
time T.
clip_rho_threshold: A scalar float32 tensor with the clipping threshold for
importance weights (rho) when calculating the baseline targets (vs).
rho^bar in the paper. If None, no clipping is applied.
clip_pg_rho_threshold: A scalar float32 tensor with the clipping threshold
on rho_s in \rho_s \delta log \pi(a|x) (r + \gamma v_{s+1} - V(x_s)). If
None, no clipping is applied.
name: The name scope that all V-trace operations will be created in.
Returns:
A VTraceReturns namedtuple (vs, pg_advantages) where:
vs: A float32 tensor of shape [T, B]. Can be used as target to
train a baseline (V(x_t) - vs_t)^2.
pg_advantages: A float32 tensor of shape [T, B]. Can be used as the
advantage in the calculation of policy gradients.
"""
log_rhos = tf.convert_to_tensor(log_rhos, dtype=tf.float32)
discounts = tf.convert_to_tensor(discounts, dtype=tf.float32)
rewards = tf.convert_to_tensor(rewards, dtype=tf.float32)
values = tf.convert_to_tensor(values, dtype=tf.float32)
bootstrap_value = tf.convert_to_tensor(bootstrap_value, dtype=tf.float32)
if clip_rho_threshold is not None:
clip_rho_threshold = tf.convert_to_tensor(
clip_rho_threshold, dtype=tf.float32)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold = tf.convert_to_tensor(
clip_pg_rho_threshold, dtype=tf.float32)
# Make sure tensor ranks are consistent.
rho_rank = log_rhos.shape.ndims # Usually 2.
values.shape.assert_has_rank(rho_rank)
bootstrap_value.shape.assert_has_rank(rho_rank - 1)
discounts.shape.assert_has_rank(rho_rank)
rewards.shape.assert_has_rank(rho_rank)
if clip_rho_threshold is not None:
clip_rho_threshold.shape.assert_has_rank(0)
if clip_pg_rho_threshold is not None:
clip_pg_rho_threshold.shape.assert_has_rank(0)
with tf.name_scope(
name,
values=[log_rhos, discounts, rewards, values, bootstrap_value]):
rhos = tf.exp(log_rhos)
if clip_rho_threshold is not None:
clipped_rhos = tf.minimum(
clip_rho_threshold, rhos, name="clipped_rhos")
tf.summary.histogram("clipped_rhos_1000", tf.minimum(1000.0, rhos))
tf.summary.scalar(
"num_of_clipped_rhos",
tf.reduce_sum(
tf.cast(
tf.equal(clipped_rhos, clip_rho_threshold), tf.int32)))
tf.summary.scalar("size_of_clipped_rhos", tf.size(clipped_rhos))
else:
clipped_rhos = rhos
cs = tf.minimum(1.0, rhos, name="cs")
# Append bootstrapped value to get [v1, ..., v_t+1]
values_t_plus_1 = tf.concat(
[values[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
deltas = clipped_rhos * (
rewards + discounts * values_t_plus_1 - values)
# All sequences are reversed, computation starts from the back.
sequences = (
tf.reverse(discounts, axis=[0]),
tf.reverse(cs, axis=[0]),
tf.reverse(deltas, axis=[0]),
)
# V-trace vs are calculated through a scan from the back to the
# beginning of the given trajectory.
def scanfunc(acc, sequence_item):
discount_t, c_t, delta_t = sequence_item
return delta_t + discount_t * c_t * acc
initial_values = tf.zeros_like(bootstrap_value)
vs_minus_v_xs = tf.scan(
fn=scanfunc,
elems=sequences,
initializer=initial_values,
parallel_iterations=1,
back_prop=False,
name="scan")
# Reverse the results back to original order.
vs_minus_v_xs = tf.reverse(vs_minus_v_xs, [0], name="vs_minus_v_xs")
# Add V(x_s) to get v_s.
vs = tf.add(vs_minus_v_xs, values, name="vs")
# Advantage for policy gradient.
vs_t_plus_1 = tf.concat(
[vs[1:], tf.expand_dims(bootstrap_value, 0)], axis=0)
if clip_pg_rho_threshold is not None:
clipped_pg_rhos = tf.minimum(
clip_pg_rho_threshold, rhos, name="clipped_pg_rhos")
else:
clipped_pg_rhos = rhos
pg_advantages = (
clipped_pg_rhos * (rewards + discounts * vs_t_plus_1 - values))
# Make sure no gradients backpropagated through the returned values.
return VTraceReturns(
vs=tf.stop_gradient(vs),
pg_advantages=tf.stop_gradient(pg_advantages))
def get_log_rhos(target_action_log_probs, behaviour_action_log_probs):
"""With the selected log_probs for multi-discrete actions of behaviour
and target policies we compute the log_rhos for calculating the vtrace."""
t = tf.stack(target_action_log_probs)
b = tf.stack(behaviour_action_log_probs)
log_rhos = tf.reduce_sum(t - b, axis=0)
return log_rhos
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/impala/vtrace_policy.py
|
Python
|
"""Adapted from A3CTFPolicy to add V-trace.
Keep in sync with changes to A3CTFPolicy and VtraceSurrogatePolicy."""
import numpy as np
import logging
import gym
import ray
from ray.rllib.agents.impala import vtrace
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule, \
EntropyCoeffSchedule, ACTION_LOGP
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
BEHAVIOUR_LOGITS = "behaviour_logits"
class VTraceLoss:
def __init__(self,
actions,
actions_logp,
actions_entropy,
dones,
behaviour_action_logp,
behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
config,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0):
"""Policy gradient loss with vtrace importance weighting.
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Args:
actions: An int|float32 tensor of shape [T, B, ACTION_SPACE].
actions_logp: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_action_logp: Tensor of shape [T, B].
behaviour_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
target_logits: A list with length of ACTION_SPACE of float32
tensors of shapes
[T, B, ACTION_SPACE[0]],
...,
[T, B, ACTION_SPACE[-1]]
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
dist_class: action distribution class for logits.
valid_mask: A bool tensor of valid RNN input elements (#2992).
config: Trainer config dict.
"""
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_action_log_probs=behaviour_action_logp,
behaviour_policy_logits=behaviour_logits,
target_policy_logits=target_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.to_float(~dones) * discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold,
tf.float32))
self.value_targets = self.vtrace_returns.vs
# The policy gradients loss
self.pi_loss = -tf.reduce_sum(
tf.boolean_mask(actions_logp * self.vtrace_returns.pg_advantages,
valid_mask))
# The baseline loss
delta = tf.boolean_mask(values - self.vtrace_returns.vs, valid_mask)
self.vf_loss = 0.5 * tf.reduce_sum(tf.square(delta))
# The entropy loss
self.entropy = tf.reduce_sum(
tf.boolean_mask(actions_entropy, valid_mask))
# The summed weighted loss
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
def _make_time_major(policy, seq_lens, tensor, drop_last=False):
"""Swaps batch and trajectory axis.
Arguments:
policy: Policy reference
seq_lens: Sequence lengths if recurrent or None
tensor: A tensor or list of tensors to reshape.
drop_last: A bool indicating whether to drop the last
trajectory item.
Returns:
res: A tensor with swapped axes or a list of tensors with
swapped axes.
"""
if isinstance(tensor, list):
return [
_make_time_major(policy, seq_lens, t, drop_last) for t in tensor
]
if policy.is_recurrent():
B = tf.shape(seq_lens)[0]
T = tf.shape(tensor)[0] // B
else:
# Important: chop the tensor into batches at known episode cut
# boundaries. TODO(ekl) this is kind of a hack
T = policy.config["sample_batch_size"]
B = tf.shape(tensor)[0] // T
rs = tf.reshape(tensor, tf.concat([[B, T], tf.shape(tensor)[1:]], axis=0))
# swap B and T axes
res = tf.transpose(
rs, [1, 0] + list(range(2, 1 + int(tf.shape(tensor).shape[0]))))
if drop_last:
return res[:-1]
return res
def build_vtrace_loss(policy, model, dist_class, train_batch):
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space,
gym.spaces.multi_discrete.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def make_time_major(*args, **kw):
return _make_time_major(policy, train_batch.get("seq_lens"), *args,
**kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_action_logp = train_batch[ACTION_LOGP]
behaviour_logits = train_batch[BEHAVIOUR_LOGITS]
unpacked_behaviour_logits = tf.split(
behaviour_logits, output_hidden_shape, axis=1)
unpacked_outputs = tf.split(model_out, output_hidden_shape, axis=1)
values = model.value_function()
if policy.is_recurrent():
max_seq_len = tf.reduce_max(train_batch["seq_lens"]) - 1
mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
# Prepare actions for loss
loss_actions = actions if is_multidiscrete else tf.expand_dims(
actions, axis=1)
# Inputs are reshaped from [B * T] => [T - 1, B] for V-trace calc.
policy.loss = VTraceLoss(
actions=make_time_major(loss_actions, drop_last=True),
actions_logp=make_time_major(
action_dist.logp(actions), drop_last=True),
actions_entropy=make_time_major(
action_dist.multi_entropy(), drop_last=True),
dones=make_time_major(dones, drop_last=True),
behaviour_action_logp=make_time_major(
behaviour_action_logp, drop_last=True),
behaviour_logits=make_time_major(
unpacked_behaviour_logits, drop_last=True),
target_logits=make_time_major(unpacked_outputs, drop_last=True),
discount=policy.config["gamma"],
rewards=make_time_major(rewards, drop_last=True),
values=make_time_major(values, drop_last=True),
bootstrap_value=make_time_major(values)[-1],
dist_class=Categorical if is_multidiscrete else dist_class,
model=model,
valid_mask=make_time_major(mask, drop_last=True),
config=policy.config,
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.entropy_coeff,
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config["vtrace_clip_pg_rho_threshold"])
return policy.loss.total_loss
def stats(policy, train_batch):
values_batched = _make_time_major(
policy,
train_batch.get("seq_lens"),
policy.model.value_function(),
drop_last=policy.config["vtrace"])
return {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"entropy": policy.loss.entropy,
"entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64),
"var_gnorm": tf.global_norm(policy.model.trainable_variables()),
"vf_loss": policy.loss.vf_loss,
"vf_explained_var": explained_variance(
tf.reshape(policy.loss.value_targets, [-1]),
tf.reshape(values_batched, [-1])),
}
def grad_stats(policy, train_batch, grads):
return {
"grad_gnorm": tf.global_norm(grads),
}
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
# not used, so save some bandwidth
del sample_batch.data[SampleBatch.NEXT_OBS]
return sample_batch
def add_behaviour_logits(policy):
return {BEHAVIOUR_LOGITS: policy.model.last_output()}
def validate_config(policy, obs_space, action_space, config):
if config["vtrace"]:
assert config["batch_mode"] == "truncate_episodes", \
"Must use `truncate_episodes` batch mode with V-trace."
def choose_optimizer(policy, config):
if policy.config["opt_type"] == "adam":
return tf.train.AdamOptimizer(policy.cur_lr)
else:
return tf.train.RMSPropOptimizer(policy.cur_lr, config["decay"],
config["momentum"], config["epsilon"])
def clip_gradients(policy, optimizer, loss):
grads_and_vars = optimizer.compute_gradients(
loss, policy.model.trainable_variables())
grads = [g for (g, v) in grads_and_vars]
policy.grads, _ = tf.clip_by_global_norm(grads, policy.config["grad_clip"])
clipped_grads = list(zip(policy.grads, policy.model.trainable_variables()))
return clipped_grads
def setup_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"],
config["entropy_coeff_schedule"])
VTraceTFPolicy = build_tf_policy(
name="VTraceTFPolicy",
get_default_config=lambda: ray.rllib.agents.impala.impala.DEFAULT_CONFIG,
loss_fn=build_vtrace_loss,
stats_fn=stats,
grad_stats_fn=grad_stats,
postprocess_fn=postprocess_trajectory,
optimizer_fn=choose_optimizer,
gradients_fn=clip_gradients,
extra_action_fetches_fn=add_behaviour_logits,
before_init=validate_config,
before_loss_init=setup_mixins,
mixins=[LearningRateSchedule, EntropyCoeffSchedule],
get_batch_divisibility_req=lambda p: p.config["sample_batch_size"])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/impala/vtrace_test.py
|
Python
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for V-trace.
For details and theory see:
"IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures"
by Espeholt, Soyer, Munos et al.
"""
from absl.testing import parameterized
import numpy as np
import vtrace
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def _shaped_arange(*shape):
"""Runs np.arange, converts to float and reshapes."""
return np.arange(np.prod(shape), dtype=np.float32).reshape(*shape)
def _softmax(logits):
"""Applies softmax non-linearity on inputs."""
return np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True)
def _ground_truth_calculation(discounts, log_rhos, rewards, values,
bootstrap_value, clip_rho_threshold,
clip_pg_rho_threshold):
"""Calculates the ground truth for V-trace in Python/Numpy."""
vs = []
seq_len = len(discounts)
rhos = np.exp(log_rhos)
cs = np.minimum(rhos, 1.0)
clipped_rhos = rhos
if clip_rho_threshold:
clipped_rhos = np.minimum(rhos, clip_rho_threshold)
clipped_pg_rhos = rhos
if clip_pg_rho_threshold:
clipped_pg_rhos = np.minimum(rhos, clip_pg_rho_threshold)
# This is a very inefficient way to calculate the V-trace ground truth.
# We calculate it this way because it is close to the mathematical notation
# of
# V-trace.
# v_s = V(x_s)
# + \sum^{T-1}_{t=s} \gamma^{t-s}
# * \prod_{i=s}^{t-1} c_i
# * \rho_t (r_t + \gamma V(x_{t+1}) - V(x_t))
# Note that when we take the product over c_i, we write `s:t` as the
# notation
# of the paper is inclusive of the `t-1`, but Python is exclusive.
# Also note that np.prod([]) == 1.
values_t_plus_1 = np.concatenate(
[values, bootstrap_value[None, :]], axis=0)
for s in range(seq_len):
v_s = np.copy(values[s]) # Very important copy.
for t in range(s, seq_len):
v_s += (np.prod(discounts[s:t], axis=0) * np.prod(cs[s:t], axis=0)
* clipped_rhos[t] * (rewards[t] + discounts[t] *
values_t_plus_1[t + 1] - values[t]))
vs.append(v_s)
vs = np.stack(vs, axis=0)
pg_advantages = (clipped_pg_rhos * (rewards + discounts * np.concatenate(
[vs[1:], bootstrap_value[None, :]], axis=0) - values))
return vtrace.VTraceReturns(vs=vs, pg_advantages=pg_advantages)
class LogProbsFromLogitsAndActionsTest(tf.test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(("Batch1", 1), ("Batch2", 2))
def test_log_probs_from_logits_and_actions(self, batch_size):
"""Tests log_probs_from_logits_and_actions."""
seq_len = 7
num_actions = 3
policy_logits = _shaped_arange(seq_len, batch_size, num_actions) + 10
actions = np.random.randint(
0, num_actions - 1, size=(seq_len, batch_size), dtype=np.int32)
action_log_probs_tensor = vtrace.log_probs_from_logits_and_actions(
policy_logits, actions)
# Ground Truth
# Using broadcasting to create a mask that indexes action logits
action_index_mask = actions[..., None] == np.arange(num_actions)
def index_with_mask(array, mask):
return array[mask].reshape(*array.shape[:-1])
# Note: Normally log(softmax) is not a good idea because it's not
# numerically stable. However, in this test we have well-behaved
# values.
ground_truth_v = index_with_mask(
np.log(_softmax(policy_logits)), action_index_mask)
with self.test_session() as session:
self.assertAllClose(ground_truth_v,
session.run(action_log_probs_tensor))
class VtraceTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(("Batch1", 1), ("Batch5", 5))
def test_vtrace(self, batch_size):
"""Tests V-trace against ground truth data calculated in python."""
seq_len = 5
# Create log_rhos such that rho will span from near-zero to above the
# clipping thresholds. In particular, calculate log_rhos in
# [-2.5, 2.5),
# so that rho is in approx [0.08, 12.2).
log_rhos = _shaped_arange(seq_len, batch_size) / (batch_size * seq_len)
log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5).
values = {
"log_rhos": log_rhos,
# T, B where B_i: [0.9 / (i+1)] * T
"discounts": np.array([[0.9 / (b + 1) for b in range(batch_size)]
for _ in range(seq_len)]),
"rewards": _shaped_arange(seq_len, batch_size),
"values": _shaped_arange(seq_len, batch_size) / batch_size,
"bootstrap_value": _shaped_arange(batch_size) + 1.0,
"clip_rho_threshold": 3.7,
"clip_pg_rho_threshold": 2.2,
}
output = vtrace.from_importance_weights(**values)
with self.test_session() as session:
output_v = session.run(output)
ground_truth_v = _ground_truth_calculation(**values)
for a, b in zip(ground_truth_v, output_v):
self.assertAllClose(a, b)
@parameterized.named_parameters(("Batch1", 1), ("Batch2", 2))
def test_vtrace_from_logits(self, batch_size):
"""Tests V-trace calculated from logits."""
seq_len = 5
num_actions = 3
clip_rho_threshold = None # No clipping.
clip_pg_rho_threshold = None # No clipping.
# Intentionally leaving shapes unspecified to test if V-trace can
# deal with that.
placeholders = {
# T, B, NUM_ACTIONS
"behaviour_policy_logits": tf.placeholder(
dtype=tf.float32, shape=[None, None, None]),
# T, B, NUM_ACTIONS
"target_policy_logits": tf.placeholder(
dtype=tf.float32, shape=[None, None, None]),
"actions": tf.placeholder(dtype=tf.int32, shape=[None, None]),
"discounts": tf.placeholder(dtype=tf.float32, shape=[None, None]),
"rewards": tf.placeholder(dtype=tf.float32, shape=[None, None]),
"values": tf.placeholder(dtype=tf.float32, shape=[None, None]),
"bootstrap_value": tf.placeholder(dtype=tf.float32, shape=[None]),
}
from_logits_output = vtrace.from_logits(
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold,
**placeholders)
target_log_probs = vtrace.log_probs_from_logits_and_actions(
placeholders["target_policy_logits"], placeholders["actions"])
behaviour_log_probs = vtrace.log_probs_from_logits_and_actions(
placeholders["behaviour_policy_logits"], placeholders["actions"])
log_rhos = target_log_probs - behaviour_log_probs
ground_truth = (log_rhos, behaviour_log_probs, target_log_probs)
values = {
"behaviour_policy_logits": _shaped_arange(seq_len, batch_size,
num_actions),
"target_policy_logits": _shaped_arange(seq_len, batch_size,
num_actions),
"actions": np.random.randint(
0, num_actions - 1, size=(seq_len, batch_size)),
"discounts": np.array( # T, B where B_i: [0.9 / (i+1)] * T
[[0.9 / (b + 1) for b in range(batch_size)]
for _ in range(seq_len)]),
"rewards": _shaped_arange(seq_len, batch_size),
"values": _shaped_arange(seq_len, batch_size) / batch_size,
"bootstrap_value": _shaped_arange(batch_size) + 1.0, # B
}
feed_dict = {placeholders[k]: v for k, v in values.items()}
with self.test_session() as session:
from_logits_output_v = session.run(
from_logits_output, feed_dict=feed_dict)
(ground_truth_log_rhos, ground_truth_behaviour_action_log_probs,
ground_truth_target_action_log_probs) = session.run(
ground_truth, feed_dict=feed_dict)
# Calculate V-trace using the ground truth logits.
from_iw = vtrace.from_importance_weights(
log_rhos=ground_truth_log_rhos,
discounts=values["discounts"],
rewards=values["rewards"],
values=values["values"],
bootstrap_value=values["bootstrap_value"],
clip_rho_threshold=clip_rho_threshold,
clip_pg_rho_threshold=clip_pg_rho_threshold)
with self.test_session() as session:
from_iw_v = session.run(from_iw)
self.assertAllClose(from_iw_v.vs, from_logits_output_v.vs)
self.assertAllClose(from_iw_v.pg_advantages,
from_logits_output_v.pg_advantages)
self.assertAllClose(ground_truth_behaviour_action_log_probs,
from_logits_output_v.behaviour_action_log_probs)
self.assertAllClose(ground_truth_target_action_log_probs,
from_logits_output_v.target_action_log_probs)
self.assertAllClose(ground_truth_log_rhos,
from_logits_output_v.log_rhos)
def test_higher_rank_inputs_for_importance_weights(self):
"""Checks support for additional dimensions in inputs."""
placeholders = {
"log_rhos": tf.placeholder(
dtype=tf.float32, shape=[None, None, 1]),
"discounts": tf.placeholder(
dtype=tf.float32, shape=[None, None, 1]),
"rewards": tf.placeholder(
dtype=tf.float32, shape=[None, None, 42]),
"values": tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
"bootstrap_value": tf.placeholder(
dtype=tf.float32, shape=[None, 42])
}
output = vtrace.from_importance_weights(**placeholders)
self.assertEqual(output.vs.shape.as_list()[-1], 42)
def test_inconsistent_rank_inputs_for_importance_weights(self):
"""Test one of many possible errors in shape of inputs."""
placeholders = {
"log_rhos": tf.placeholder(
dtype=tf.float32, shape=[None, None, 1]),
"discounts": tf.placeholder(
dtype=tf.float32, shape=[None, None, 1]),
"rewards": tf.placeholder(
dtype=tf.float32, shape=[None, None, 42]),
"values": tf.placeholder(dtype=tf.float32, shape=[None, None, 42]),
# Should be [None, 42].
"bootstrap_value": tf.placeholder(dtype=tf.float32, shape=[None])
}
with self.assertRaisesRegexp(ValueError, "must have rank 2"):
vtrace.from_importance_weights(**placeholders)
if __name__ == "__main__":
tf.test.main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/marwil/__init__.py
|
Python
|
from ray.rllib.agents.marwil.marwil import MARWILTrainer, DEFAULT_CONFIG
__all__ = ["MARWILTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/marwil/marwil.py
|
Python
|
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.agents.marwil.marwil_policy import MARWILPolicy
from ray.rllib.optimizers import SyncBatchReplayOptimizer
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# You should override this to point to an offline dataset (see agent.py).
"input": "sampler",
# Use importance sampling estimators for reward
"input_evaluation": ["is", "wis"],
# Scaling of advantages in exponential terms
# When beta is 0, MARWIL is reduced to imitation learning
"beta": 1.0,
# Balancing value estimation loss and policy optimization loss
"vf_coeff": 1.0,
# Whether to calculate cumulative rewards
"postprocess_inputs": True,
# Whether to rollout "complete_episodes" or "truncate_episodes"
"batch_mode": "complete_episodes",
# Learning rate for adam optimizer
"lr": 1e-4,
# Number of timesteps collected for each SGD round
"train_batch_size": 2000,
# Number of steps max to keep in the batch replay buffer
"replay_buffer_size": 100000,
# Number of steps to read before learning starts
"learning_starts": 0,
# === Parallelism ===
"num_workers": 0,
})
# __sphinx_doc_end__
# yapf: enable
def make_optimizer(workers, config):
return SyncBatchReplayOptimizer(
workers,
learning_starts=config["learning_starts"],
buffer_size=config["replay_buffer_size"],
train_batch_size=config["train_batch_size"],
)
def validate_config(config):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError("DDPG does not support PyTorch yet! Use tf instead.")
MARWILTrainer = build_trainer(
name="MARWIL",
default_config=DEFAULT_CONFIG,
default_policy=MARWILPolicy,
validate_config=validate_config,
make_policy_optimizer=make_optimizer
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/marwil/marwil_policy.py
|
Python
|
import ray
from ray.rllib.models import ModelCatalog
from ray.rllib.evaluation.postprocessing import compute_advantages, \
Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.evaluation.metrics import LEARNER_STATS_KEY
from ray.rllib.utils.annotations import override
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils import try_import_tf
from ray.rllib.utils.tf_ops import scope_vars
tf = try_import_tf()
POLICY_SCOPE = "p_func"
VALUE_SCOPE = "v_func"
class ValueLoss:
def __init__(self, state_values, cumulative_rewards):
self.loss = 0.5 * tf.reduce_mean(
tf.square(state_values - cumulative_rewards))
class ReweightedImitationLoss:
def __init__(self, state_values, cumulative_rewards, logits, actions,
action_space, beta, model):
ma_adv_norm = tf.get_variable(
name="moving_average_of_advantage_norm",
dtype=tf.float32,
initializer=100.0,
trainable=False)
# advantage estimation
adv = cumulative_rewards - state_values
# update averaged advantage norm
update_adv_norm = tf.assign_add(
ref=ma_adv_norm,
value=1e-6 * (tf.reduce_mean(tf.square(adv)) - ma_adv_norm))
# exponentially weighted advantages
with tf.control_dependencies([update_adv_norm]):
exp_advs = tf.exp(
beta * tf.divide(adv, 1e-8 + tf.sqrt(ma_adv_norm)))
# log\pi_\theta(a|s)
dist_class, _ = ModelCatalog.get_action_dist(action_space, {})
action_dist = dist_class(logits, model)
logprobs = action_dist.logp(actions)
self.loss = -1.0 * tf.reduce_mean(
tf.stop_gradient(exp_advs) * logprobs)
class MARWILPostprocessing:
"""Adds the advantages field to the trajectory."""
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
raise NotImplementedError(
"last done mask in a batch should be True. "
"For now, we only support reading experience batches produced "
"with batch_mode='complete_episodes'.",
len(sample_batch[SampleBatch.DONES]),
sample_batch[SampleBatch.DONES][-1])
batch = compute_advantages(
sample_batch, last_r, gamma=self.config["gamma"], use_gae=False)
return batch
class MARWILPolicy(MARWILPostprocessing, TFPolicy):
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG, **config)
self.config = config
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Action inputs
self.obs_t = tf.placeholder(
tf.float32, shape=(None, ) + observation_space.shape)
prev_actions_ph = ModelCatalog.get_action_placeholder(action_space)
prev_rewards_ph = tf.placeholder(
tf.float32, [None], name="prev_reward")
with tf.variable_scope(POLICY_SCOPE) as scope:
self.model = ModelCatalog.get_model({
"obs": self.obs_t,
"prev_actions": prev_actions_ph,
"prev_rewards": prev_rewards_ph,
"is_training": self._get_is_training_placeholder(),
}, observation_space, action_space, logit_dim,
self.config["model"])
logits = self.model.outputs
self.p_func_vars = scope_vars(scope.name)
# Action outputs
action_dist = dist_class(logits, self.model)
self.output_actions = action_dist.sample()
# Training inputs
self.act_t = ModelCatalog.get_action_placeholder(action_space)
self.cum_rew_t = tf.placeholder(tf.float32, [None], name="reward")
# v network evaluation
with tf.variable_scope(VALUE_SCOPE) as scope:
state_values = self.model.value_function()
self.v_func_vars = scope_vars(scope.name)
self.v_loss = self._build_value_loss(state_values, self.cum_rew_t)
self.p_loss = self._build_policy_loss(state_values, self.cum_rew_t,
logits, self.act_t, action_space)
# which kind of objective to optimize
objective = (
self.p_loss.loss + self.config["vf_coeff"] * self.v_loss.loss)
self.explained_variance = tf.reduce_mean(
explained_variance(self.cum_rew_t, state_values))
# initialize TFPolicy
self.sess = tf.get_default_session()
self.loss_inputs = [
(SampleBatch.CUR_OBS, self.obs_t),
(SampleBatch.ACTIONS, self.act_t),
(Postprocessing.ADVANTAGES, self.cum_rew_t),
]
TFPolicy.__init__(
self,
observation_space,
action_space,
self.config,
self.sess,
obs_input=self.obs_t,
action_sampler=self.output_actions,
action_logp=action_dist.sampled_action_logp(),
loss=objective,
model=self.model,
loss_inputs=self.loss_inputs,
state_inputs=self.model.state_in,
state_outputs=self.model.state_out,
prev_action_input=prev_actions_ph,
prev_reward_input=prev_rewards_ph)
self.sess.run(tf.global_variables_initializer())
self.stats_fetches = {
"total_loss": objective,
"vf_explained_var": self.explained_variance,
"policy_loss": self.p_loss.loss,
"vf_loss": self.v_loss.loss
}
def _build_value_loss(self, state_values, cum_rwds):
return ValueLoss(state_values, cum_rwds)
def _build_policy_loss(self, state_values, cum_rwds, logits, actions,
action_space):
return ReweightedImitationLoss(state_values, cum_rwds, logits, actions,
action_space, self.config["beta"],
self.model)
@override(TFPolicy)
def extra_compute_grad_fetches(self):
return {LEARNER_STATS_KEY: self.stats_fetches}
@override(Policy)
def get_initial_state(self):
return self.model.state_init
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/mock.py
|
Python
|
import os
import pickle
import numpy as np
from ray.tune import result as tune_result
from ray.rllib.agents.trainer import Trainer, with_common_config
class _MockTrainer(Trainer):
"""Mock trainer for use in tests"""
_name = "MockTrainer"
_default_config = with_common_config({
"mock_error": False,
"persistent_error": False,
"test_variable": 1,
"num_workers": 0,
"user_checkpoint_freq": 0,
"object_store_memory_per_worker": 0,
"object_store_memory": 0,
})
@classmethod
def default_resource_request(cls, config):
return None
def _init(self, config, env_creator):
self.info = None
self.restored = False
def _train(self):
if self.config["mock_error"] and self.iteration == 1 \
and (self.config["persistent_error"] or not self.restored):
raise Exception("mock error")
result = dict(
episode_reward_mean=10,
episode_len_mean=10,
timesteps_this_iter=10,
info={})
if self.config["user_checkpoint_freq"] > 0 and self.iteration > 0:
if self.iteration % self.config["user_checkpoint_freq"] == 0:
result.update({tune_result.SHOULD_CHECKPOINT: True})
return result
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "mock_agent.pkl")
with open(path, "wb") as f:
pickle.dump(self.info, f)
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path, "rb") as f:
info = pickle.load(f)
self.info = info
self.restored = True
def _register_if_needed(self, env_object):
pass
def set_info(self, info):
self.info = info
return info
def get_info(self):
return self.info
class _SigmoidFakeData(_MockTrainer):
"""Trainer that returns sigmoid learning curves.
This can be helpful for evaluating early stopping algorithms."""
_name = "SigmoidFakeData"
_default_config = with_common_config({
"width": 100,
"height": 100,
"offset": 0,
"iter_time": 10,
"iter_timesteps": 1,
"num_workers": 0,
"object_store_memory_per_worker": 0,
"object_store_memory": 0,
})
def _train(self):
i = max(0, self.iteration - self.config["offset"])
v = np.tanh(float(i) / self.config["width"])
v *= self.config["height"]
return dict(
episode_reward_mean=v,
episode_len_mean=v,
timesteps_this_iter=self.config["iter_timesteps"],
time_this_iter_s=self.config["iter_time"],
info={})
class _ParameterTuningTrainer(_MockTrainer):
_name = "ParameterTuningTrainer"
_default_config = with_common_config({
"reward_amt": 10,
"dummy_param": 10,
"dummy_param2": 15,
"iter_time": 10,
"iter_timesteps": 1,
"num_workers": 0,
"object_store_memory_per_worker": 0,
"object_store_memory": 0,
})
def _train(self):
return dict(
episode_reward_mean=self.config["reward_amt"] * self.iteration,
episode_len_mean=self.config["reward_amt"],
timesteps_this_iter=self.config["iter_timesteps"],
time_this_iter_s=self.config["iter_time"],
info={})
def _agent_import_failed(trace):
"""Returns dummy agent class for if PyTorch etc. is not installed."""
class _AgentImportFailed(Trainer):
_name = "AgentImportFailed"
_default_config = with_common_config({})
def _setup(self, config):
raise ImportError(trace)
return _AgentImportFailed
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/pg/__init__.py
|
Python
|
from ray.rllib.agents.pg.pg import PGTrainer, DEFAULT_CONFIG
from ray.rllib.agents.pg.pg_tf_policy import pg_tf_loss, \
post_process_advantages
from ray.rllib.agents.pg.pg_torch_policy import pg_torch_loss
__all__ = ["PGTrainer", "pg_tf_loss", "pg_torch_loss",
"post_process_advantages", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/pg/pg.py
|
Python
|
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.agents.pg.pg_tf_policy import PGTFPolicy
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# No remote workers by default.
"num_workers": 0,
# Learning rate.
"lr": 0.0004,
})
# __sphinx_doc_end__
# yapf: enable
def get_policy_class(config):
if config["use_pytorch"]:
from ray.rllib.agents.pg.pg_torch_policy import PGTorchPolicy
return PGTorchPolicy
else:
return PGTFPolicy
PGTrainer = build_trainer(
name="PG",
default_config=DEFAULT_CONFIG,
default_policy=PGTFPolicy,
get_policy_class=get_policy_class)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/pg/pg_tf_policy.py
|
Python
|
import ray
from ray.rllib.evaluation.postprocessing import Postprocessing, \
compute_advantages
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
def post_process_advantages(policy, sample_batch, other_agent_batches=None,
episode=None):
"""This adds the "advantages" column to the sample train_batch."""
return compute_advantages(sample_batch, 0.0, policy.config["gamma"],
use_gae=False)
def pg_tf_loss(policy, model, dist_class, train_batch):
"""The basic policy gradients loss."""
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
return -tf.reduce_mean(action_dist.logp(train_batch[SampleBatch.ACTIONS])
* train_batch[Postprocessing.ADVANTAGES])
PGTFPolicy = build_tf_policy(
name="PGTFPolicy",
get_default_config=lambda: ray.rllib.agents.pg.pg.DEFAULT_CONFIG,
postprocess_fn=post_process_advantages,
loss_fn=pg_tf_loss)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/pg/pg_torch_policy.py
|
Python
|
import ray
from ray.rllib.agents.pg.pg_tf_policy import post_process_advantages
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.framework import try_import_torch
torch, _ = try_import_torch()
def pg_torch_loss(policy, model, dist_class, train_batch):
"""The basic policy gradients loss."""
logits, _ = model.from_batch(train_batch)
action_dist = dist_class(logits, model)
log_probs = action_dist.logp(train_batch[SampleBatch.ACTIONS])
# Save the error in the policy object.
# policy.pi_err = -train_batch[Postprocessing.ADVANTAGES].dot(
# log_probs.reshape(-1)) / len(log_probs)
policy.pi_err = -torch.mean(
log_probs * train_batch[Postprocessing.ADVANTAGES]
)
return policy.pi_err
def pg_loss_stats(policy, train_batch):
""" The error is recorded when computing the loss."""
return {"policy_loss": policy.pi_err.item()}
PGTorchPolicy = build_torch_policy(
name="PGTorchPolicy",
get_default_config=lambda: ray.rllib.agents.pg.pg.DEFAULT_CONFIG,
loss_fn=pg_torch_loss,
stats_fn=pg_loss_stats,
postprocess_fn=post_process_advantages)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/pg/tests/test_pg.py
|
Python
|
import numpy as np
import unittest
import ray
import ray.rllib.agents.pg as pg
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import check, fc
class TestPG(unittest.TestCase):
ray.init()
def test_pg_compilation(self):
"""Test whether a PGTrainer can be built with both frameworks."""
config = pg.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
# tf.
trainer = pg.PGTrainer(config=config, env="CartPole-v0")
num_iterations = 2
for i in range(num_iterations):
trainer.train()
# Torch.
config["use_pytorch"] = True
trainer = pg.PGTrainer(config=config, env="CartPole-v0")
for i in range(num_iterations):
trainer.train()
def test_pg_loss_functions(self):
"""Tests the PG loss function math."""
config = pg.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
config["eager"] = True
config["gamma"] = 0.99
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
# Fake CartPole episode of n timesteps.
train_batch = {
SampleBatch.CUR_OBS: np.array([
[0.1, 0.2, 0.3, 0.4],
[0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]
]),
SampleBatch.ACTIONS: np.array([0, 1, 1]),
SampleBatch.REWARDS: np.array([1.0, 1.0, 1.0]),
SampleBatch.DONES: np.array([False, False, True])
}
# tf.
trainer = pg.PGTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
vars = policy.model.trainable_variables()
# Post-process (calculate simple (non-GAE) advantages) and attach to
# train_batch dict.
# A = [0.99^2 * 1.0 + 0.99 * 1.0 + 1.0, 0.99 * 1.0 + 1.0, 1.0] =
# [2.9701, 1.99, 1.0]
train_batch = pg.post_process_advantages(policy, train_batch)
# Check Advantage values.
check(train_batch[Postprocessing.ADVANTAGES], [2.9701, 1.99, 1.0])
# Actual loss results.
results = pg.pg_tf_loss(
policy, policy.model, dist_class=Categorical,
train_batch=train_batch
)
# Calculate expected results.
expected_logits = fc(
fc(
train_batch[SampleBatch.CUR_OBS],
vars[0].numpy(), vars[1].numpy()
),
vars[2].numpy(), vars[3].numpy()
)
expected_logp = Categorical(expected_logits, policy.model).logp(
train_batch[SampleBatch.ACTIONS]
)
expected_loss = -np.mean(
expected_logp * train_batch[Postprocessing.ADVANTAGES]
)
check(results.numpy(), expected_loss, decimals=4)
# Torch.
config["use_pytorch"] = True
trainer = pg.PGTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
train_batch = policy._lazy_tensor_dict(train_batch)
results = pg.pg_torch_loss(
policy, policy.model, dist_class=TorchCategorical,
train_batch=train_batch
)
expected_logits = policy.model._last_output
expected_logp = TorchCategorical(expected_logits, policy.model).logp(
train_batch[SampleBatch.ACTIONS]
)
expected_loss = -np.mean(
expected_logp.detach().numpy() *
train_batch[Postprocessing.ADVANTAGES].numpy()
)
check(results.detach().numpy(), expected_loss, decimals=4)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/__init__.py
|
Python
|
from ray.rllib.agents.ppo.ppo import PPOTrainer, DEFAULT_CONFIG
from ray.rllib.agents.ppo.appo import APPOTrainer
from ray.rllib.utils import renamed_agent
PPOAgent = renamed_agent(PPOTrainer)
__all__ = ["PPOAgent", "APPOTrainer", "PPOTrainer", "DEFAULT_CONFIG"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/appo.py
|
Python
|
from ray.rllib.agents.ppo.appo_policy import AsyncPPOTFPolicy
from ray.rllib.agents.trainer import with_base_config
from ray.rllib.agents.ppo.ppo import update_kl
from ray.rllib.agents import impala
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_base_config(impala.DEFAULT_CONFIG, {
# Whether to use V-trace weighted advantages. If false, PPO GAE advantages
# will be used instead.
"vtrace": False,
# == These two options only apply if vtrace: False ==
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# GAE(lambda) parameter
"lambda": 1.0,
# == PPO surrogate loss options ==
"clip_param": 0.4,
# == PPO KL Loss options ==
"use_kl_loss": False,
"kl_coeff": 1.0,
"kl_target": 0.01,
# == IMPALA optimizer params (see documentation in impala.py) ==
"sample_batch_size": 50,
"train_batch_size": 500,
"min_iter_time_s": 10,
"num_workers": 2,
"num_gpus": 0,
"num_data_loader_buffers": 1,
"minibatch_buffer_size": 1,
"num_sgd_iter": 1,
"replay_proportion": 0.0,
"replay_buffer_num_slots": 100,
"learner_queue_size": 16,
"learner_queue_timeout": 300,
"max_sample_requests_in_flight_per_worker": 2,
"broadcast_interval": 1,
"grad_clip": 40.0,
"opt_type": "adam",
"lr": 0.0005,
"lr_schedule": None,
"decay": 0.99,
"momentum": 0.0,
"epsilon": 0.1,
"vf_loss_coeff": 0.5,
"entropy_coeff": 0.01,
"entropy_coeff_schedule": None,
})
# __sphinx_doc_end__
# yapf: enable
def update_target_and_kl(trainer, fetches):
# Update the KL coeff depending on how many steps LearnerThread has stepped
# through
learner_steps = trainer.optimizer.learner.num_steps
if learner_steps >= trainer.target_update_frequency:
# Update Target Network
trainer.optimizer.learner.num_steps = 0
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.update_target())
# Also update KL Coeff
if trainer.config["use_kl_loss"]:
update_kl(trainer, trainer.optimizer.learner.stats)
def initialize_target(trainer):
trainer.workers.local_worker().foreach_trainable_policy(
lambda p, _: p.update_target())
trainer.target_update_frequency = trainer.config["num_sgd_iter"] \
* trainer.config["minibatch_buffer_size"]
APPOTrainer = impala.ImpalaTrainer.with_updates(
name="APPO",
default_config=DEFAULT_CONFIG,
default_policy=AsyncPPOTFPolicy,
get_policy_class=lambda _: AsyncPPOTFPolicy,
after_init=initialize_target,
after_optimizer_step=update_target_and_kl)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/appo_policy.py
|
Python
|
"""Adapted from VTraceTFPolicy to use the PPO surrogate loss.
Keep in sync with changes to VTraceTFPolicy."""
import numpy as np
import logging
import gym
from ray.rllib.agents.impala import vtrace
from ray.rllib.agents.impala.vtrace_policy import _make_time_major, \
BEHAVIOUR_LOGITS, clip_gradients, validate_config, choose_optimizer
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.utils import try_import_tf
from ray.rllib.policy.tf_policy_template import build_tf_policy
from ray.rllib.policy.tf_policy import LearningRateSchedule, TFPolicy
from ray.rllib.agents.ppo.ppo_policy import KLCoeffMixin, ValueNetworkMixin
from ray.rllib.models import ModelCatalog
from ray.rllib.utils.annotations import override
from ray.rllib.utils.explained_variance import explained_variance
from ray.rllib.utils.tf_ops import make_tf_callable
tf = try_import_tf()
POLICY_SCOPE = "func"
TARGET_POLICY_SCOPE = "target_func"
logger = logging.getLogger(__name__)
class PPOSurrogateLoss:
"""Loss used when V-trace is disabled.
Arguments:
prev_actions_logp: A float32 tensor of shape [T, B].
actions_logp: A float32 tensor of shape [T, B].
action_kl: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
valid_mask: A bool tensor of valid RNN input elements (#2992).
advantages: A float32 tensor of shape [T, B].
value_targets: A float32 tensor of shape [T, B].
vf_loss_coeff (float): Coefficient of the value function loss.
entropy_coeff (float): Coefficient of the entropy regularizer.
clip_param (float): Clip parameter.
cur_kl_coeff (float): Coefficient for KL loss.
use_kl_loss (bool): If true, use KL loss.
"""
def __init__(self,
prev_actions_logp,
actions_logp,
action_kl,
actions_entropy,
values,
valid_mask,
advantages,
value_targets,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_param=0.3,
cur_kl_coeff=None,
use_kl_loss=False):
def reduce_mean_valid(t):
return tf.reduce_mean(tf.boolean_mask(t, valid_mask))
logp_ratio = tf.exp(actions_logp - prev_actions_logp)
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_kl = reduce_mean_valid(action_kl)
self.pi_loss = -reduce_mean_valid(surrogate_loss)
# The baseline loss
delta = values - value_targets
self.value_targets = value_targets
self.vf_loss = 0.5 * reduce_mean_valid(tf.square(delta))
# The entropy loss
self.entropy = reduce_mean_valid(actions_entropy)
# The summed weighted loss
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
# Optional additional KL Loss
if use_kl_loss:
self.total_loss += cur_kl_coeff * self.mean_kl
class VTraceSurrogateLoss:
def __init__(self,
actions,
prev_actions_logp,
actions_logp,
old_policy_actions_logp,
action_kl,
actions_entropy,
dones,
behaviour_logits,
old_policy_behaviour_logits,
target_logits,
discount,
rewards,
values,
bootstrap_value,
dist_class,
model,
valid_mask,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
clip_rho_threshold=1.0,
clip_pg_rho_threshold=1.0,
clip_param=0.3,
cur_kl_coeff=None,
use_kl_loss=False):
"""APPO Loss, with IS modifications and V-trace for Advantage Estimation
VTraceLoss takes tensors of shape [T, B, ...], where `B` is the
batch_size. The reason we need to know `B` is for V-trace to properly
handle episode cut boundaries.
Arguments:
actions: An int|float32 tensor of shape [T, B, logit_dim].
prev_actions_logp: A float32 tensor of shape [T, B].
actions_logp: A float32 tensor of shape [T, B].
old_policy_actions_logp: A float32 tensor of shape [T, B].
action_kl: A float32 tensor of shape [T, B].
actions_entropy: A float32 tensor of shape [T, B].
dones: A bool tensor of shape [T, B].
behaviour_logits: A float32 tensor of shape [T, B, logit_dim].
old_policy_behaviour_logits: A float32 tensor of shape
[T, B, logit_dim].
target_logits: A float32 tensor of shape [T, B, logit_dim].
discount: A float32 scalar.
rewards: A float32 tensor of shape [T, B].
values: A float32 tensor of shape [T, B].
bootstrap_value: A float32 tensor of shape [B].
dist_class: action distribution class for logits.
model: backing ModelV2 instance
valid_mask: A bool tensor of valid RNN input elements (#2992).
vf_loss_coeff (float): Coefficient of the value function loss.
entropy_coeff (float): Coefficient of the entropy regularizer.
clip_param (float): Clip parameter.
cur_kl_coeff (float): Coefficient for KL loss.
use_kl_loss (bool): If true, use KL loss.
"""
def reduce_mean_valid(t):
return tf.reduce_mean(tf.boolean_mask(t, valid_mask))
# Compute vtrace on the CPU for better perf.
with tf.device("/cpu:0"):
self.vtrace_returns = vtrace.multi_from_logits(
behaviour_policy_logits=behaviour_logits,
target_policy_logits=old_policy_behaviour_logits,
actions=tf.unstack(actions, axis=2),
discounts=tf.to_float(~dones) * discount,
rewards=rewards,
values=values,
bootstrap_value=bootstrap_value,
dist_class=dist_class,
model=model,
clip_rho_threshold=tf.cast(clip_rho_threshold, tf.float32),
clip_pg_rho_threshold=tf.cast(clip_pg_rho_threshold,
tf.float32))
self.is_ratio = tf.clip_by_value(
tf.exp(prev_actions_logp - old_policy_actions_logp), 0.0, 2.0)
logp_ratio = self.is_ratio * tf.exp(actions_logp - prev_actions_logp)
advantages = self.vtrace_returns.pg_advantages
surrogate_loss = tf.minimum(
advantages * logp_ratio,
advantages * tf.clip_by_value(logp_ratio, 1 - clip_param,
1 + clip_param))
self.mean_kl = reduce_mean_valid(action_kl)
self.pi_loss = -reduce_mean_valid(surrogate_loss)
# The baseline loss
delta = values - self.vtrace_returns.vs
self.value_targets = self.vtrace_returns.vs
self.vf_loss = 0.5 * reduce_mean_valid(tf.square(delta))
# The entropy loss
self.entropy = reduce_mean_valid(actions_entropy)
# The summed weighted loss
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
# Optional additional KL Loss
if use_kl_loss:
self.total_loss += cur_kl_coeff * self.mean_kl
def build_appo_model(policy, obs_space, action_space, config):
_, logit_dim = ModelCatalog.get_action_dist(action_space, config["model"])
policy.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
config["model"],
name=POLICY_SCOPE,
framework="tf")
policy.target_model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
config["model"],
name=TARGET_POLICY_SCOPE,
framework="tf")
return policy.model
def build_appo_surrogate_loss(policy, model, dist_class, train_batch):
model_out, _ = model.from_batch(train_batch)
action_dist = dist_class(model_out, model)
if isinstance(policy.action_space, gym.spaces.Discrete):
is_multidiscrete = False
output_hidden_shape = [policy.action_space.n]
elif isinstance(policy.action_space,
gym.spaces.multi_discrete.MultiDiscrete):
is_multidiscrete = True
output_hidden_shape = policy.action_space.nvec.astype(np.int32)
else:
is_multidiscrete = False
output_hidden_shape = 1
def make_time_major(*args, **kw):
return _make_time_major(policy, train_batch.get("seq_lens"), *args,
**kw)
actions = train_batch[SampleBatch.ACTIONS]
dones = train_batch[SampleBatch.DONES]
rewards = train_batch[SampleBatch.REWARDS]
behaviour_logits = train_batch[BEHAVIOUR_LOGITS]
target_model_out, _ = policy.target_model.from_batch(train_batch)
old_policy_behaviour_logits = tf.stop_gradient(target_model_out)
unpacked_behaviour_logits = tf.split(
behaviour_logits, output_hidden_shape, axis=1)
unpacked_old_policy_behaviour_logits = tf.split(
old_policy_behaviour_logits, output_hidden_shape, axis=1)
unpacked_outputs = tf.split(model_out, output_hidden_shape, axis=1)
old_policy_action_dist = dist_class(old_policy_behaviour_logits, model)
prev_action_dist = dist_class(behaviour_logits, policy.model)
values = policy.model.value_function()
policy.model_vars = policy.model.variables()
policy.target_model_vars = policy.target_model.variables()
if policy.is_recurrent():
max_seq_len = tf.reduce_max(train_batch["seq_lens"]) - 1
mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len)
mask = tf.reshape(mask, [-1])
else:
mask = tf.ones_like(rewards)
if policy.config["vtrace"]:
logger.debug("Using V-Trace surrogate loss (vtrace=True)")
# Prepare actions for loss
loss_actions = actions if is_multidiscrete else tf.expand_dims(
actions, axis=1)
# Prepare KL for Loss
mean_kl = make_time_major(
old_policy_action_dist.multi_kl(action_dist), drop_last=True)
policy.loss = VTraceSurrogateLoss(
actions=make_time_major(loss_actions, drop_last=True),
prev_actions_logp=make_time_major(
prev_action_dist.logp(actions), drop_last=True),
actions_logp=make_time_major(
action_dist.logp(actions), drop_last=True),
old_policy_actions_logp=make_time_major(
old_policy_action_dist.logp(actions), drop_last=True),
action_kl=tf.reduce_mean(mean_kl, axis=0)
if is_multidiscrete else mean_kl,
actions_entropy=make_time_major(
action_dist.multi_entropy(), drop_last=True),
dones=make_time_major(dones, drop_last=True),
behaviour_logits=make_time_major(
unpacked_behaviour_logits, drop_last=True),
old_policy_behaviour_logits=make_time_major(
unpacked_old_policy_behaviour_logits, drop_last=True),
target_logits=make_time_major(unpacked_outputs, drop_last=True),
discount=policy.config["gamma"],
rewards=make_time_major(rewards, drop_last=True),
values=make_time_major(values, drop_last=True),
bootstrap_value=make_time_major(values)[-1],
dist_class=Categorical if is_multidiscrete else dist_class,
model=policy.model,
valid_mask=make_time_major(mask, drop_last=True),
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.config["entropy_coeff"],
clip_rho_threshold=policy.config["vtrace_clip_rho_threshold"],
clip_pg_rho_threshold=policy.config[
"vtrace_clip_pg_rho_threshold"],
clip_param=policy.config["clip_param"],
cur_kl_coeff=policy.kl_coeff,
use_kl_loss=policy.config["use_kl_loss"])
else:
logger.debug("Using PPO surrogate loss (vtrace=False)")
# Prepare KL for Loss
mean_kl = make_time_major(prev_action_dist.multi_kl(action_dist))
policy.loss = PPOSurrogateLoss(
prev_actions_logp=make_time_major(prev_action_dist.logp(actions)),
actions_logp=make_time_major(action_dist.logp(actions)),
action_kl=tf.reduce_mean(mean_kl, axis=0)
if is_multidiscrete else mean_kl,
actions_entropy=make_time_major(action_dist.multi_entropy()),
values=make_time_major(values),
valid_mask=make_time_major(mask),
advantages=make_time_major(train_batch[Postprocessing.ADVANTAGES]),
value_targets=make_time_major(
train_batch[Postprocessing.VALUE_TARGETS]),
vf_loss_coeff=policy.config["vf_loss_coeff"],
entropy_coeff=policy.config["entropy_coeff"],
clip_param=policy.config["clip_param"],
cur_kl_coeff=policy.kl_coeff,
use_kl_loss=policy.config["use_kl_loss"])
return policy.loss.total_loss
def stats(policy, train_batch):
values_batched = _make_time_major(
policy,
train_batch.get("seq_lens"),
policy.model.value_function(),
drop_last=policy.config["vtrace"])
stats_dict = {
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
"policy_loss": policy.loss.pi_loss,
"entropy": policy.loss.entropy,
"var_gnorm": tf.global_norm(policy.model.trainable_variables()),
"vf_loss": policy.loss.vf_loss,
"vf_explained_var": explained_variance(
tf.reshape(policy.loss.value_targets, [-1]),
tf.reshape(values_batched, [-1])),
}
if policy.config["vtrace"]:
is_stat_mean, is_stat_var = tf.nn.moments(policy.loss.is_ratio, [0, 1])
stats_dict.update({"mean_IS": is_stat_mean})
stats_dict.update({"var_IS": is_stat_var})
if policy.config["use_kl_loss"]:
stats_dict.update({"kl": policy.loss.mean_kl})
stats_dict.update({"KL_Coeff": policy.kl_coeff})
return stats_dict
def postprocess_trajectory(policy,
sample_batch,
other_agent_batches=None,
episode=None):
if not policy.config["vtrace"]:
completed = sample_batch["dones"][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append([sample_batch["state_out_{}".format(i)][-1]])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"])
else:
batch = sample_batch
del batch.data["new_obs"] # not used, so save some bandwidth
return batch
def add_values_and_logits(policy):
out = {BEHAVIOUR_LOGITS: policy.model.last_output()}
if not policy.config["vtrace"]:
out[SampleBatch.VF_PREDS] = policy.model.value_function()
return out
class TargetNetworkMixin:
def __init__(self, obs_space, action_space, config):
"""Target Network is updated by the master learner every
trainer.update_target_frequency steps. All worker batches
are importance sampled w.r. to the target network to ensure
a more stable pi_old in PPO.
"""
@make_tf_callable(self.get_session())
def do_update():
assign_ops = []
assert len(self.model_vars) == len(self.target_model_vars)
for var, var_target in zip(self.model_vars,
self.target_model_vars):
assign_ops.append(var_target.assign(var))
return tf.group(*assign_ops)
self.update_target = do_update
@override(TFPolicy)
def variables(self):
return self.model_vars + self.target_model_vars
def setup_mixins(policy, obs_space, action_space, config):
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
KLCoeffMixin.__init__(policy, config)
ValueNetworkMixin.__init__(policy, obs_space, action_space, config)
def setup_late_mixins(policy, obs_space, action_space, config):
TargetNetworkMixin.__init__(policy, obs_space, action_space, config)
AsyncPPOTFPolicy = build_tf_policy(
name="AsyncPPOTFPolicy",
make_model=build_appo_model,
loss_fn=build_appo_surrogate_loss,
stats_fn=stats,
postprocess_fn=postprocess_trajectory,
optimizer_fn=choose_optimizer,
gradients_fn=clip_gradients,
extra_action_fetches_fn=add_values_and_logits,
before_init=validate_config,
before_loss_init=setup_mixins,
after_init=setup_late_mixins,
mixins=[
LearningRateSchedule, KLCoeffMixin, TargetNetworkMixin,
ValueNetworkMixin
],
get_batch_divisibility_req=lambda p: p.config["sample_batch_size"])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
rllib/agents/ppo/ppo.py
|
Python
|
import logging
from ray.rllib.agents import with_common_config
from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer
from ray.rllib.utils import try_import_tf
tf = try_import_tf()
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# If true, use the Generalized Advantage Estimator (GAE)
# with a value function, see https://arxiv.org/pdf/1506.02438.pdf.
"use_gae": True,
# The GAE(lambda) parameter.
"lambda": 1.0,
# Initial coefficient for KL divergence.
"kl_coeff": 0.2,
# Size of batches collected from each worker.
"sample_batch_size": 200,
# Number of timesteps collected for each SGD round. This defines the size
# of each SGD epoch.
"train_batch_size": 4000,
# Total SGD batch size across all devices for SGD. This defines the
# minibatch size within each epoch.
"sgd_minibatch_size": 128,
# Whether to shuffle sequences in the batch when training (recommended).
"shuffle_sequences": True,
# Number of SGD iterations in each outer loop (i.e., number of epochs to
# execute per train batch).
"num_sgd_iter": 30,
# Stepsize of SGD.
"lr": 5e-5,
# Learning rate schedule.
"lr_schedule": None,
# Share layers for value function. If you set this to True, it's important
# to tune vf_loss_coeff.
"vf_share_layers": False,
# Coefficient of the value function loss. IMPORTANT: you must tune this if
# you set vf_share_layers: True.
"vf_loss_coeff": 1.0,
# Coefficient of the entropy regularizer.
"entropy_coeff": 0.0,
# Decay schedule for the entropy regularizer.
"entropy_coeff_schedule": None,
# PPO clip parameter.
"clip_param": 0.3,
# Clip param for the value function. Note that this is sensitive to the
# scale of the rewards. If your expected V is large, increase this.
"vf_clip_param": 10.0,
# If specified, clip the global norm of gradients by this amount.
"grad_clip": None,
# Target value for KL divergence.
"kl_target": 0.01,
# Whether to rollout "complete_episodes" or "truncate_episodes".
"batch_mode": "truncate_episodes",
# Which observation filter to apply to the observation.
"observation_filter": "NoFilter",
# Uses the sync samples optimizer instead of the multi-gpu one. This is
# usually slower, but you might want to try it if you run into issues with
# the default optimizer.
"simple_optimizer": False,
})
# __sphinx_doc_end__
# yapf: enable
def choose_policy_optimizer(workers, config):
if config["simple_optimizer"]:
return SyncSamplesOptimizer(
workers,
num_sgd_iter=config["num_sgd_iter"],
train_batch_size=config["train_batch_size"],
sgd_minibatch_size=config["sgd_minibatch_size"],
standardize_fields=["advantages"])
return LocalMultiGPUOptimizer(
workers,
sgd_batch_size=config["sgd_minibatch_size"],
num_sgd_iter=config["num_sgd_iter"],
num_gpus=config["num_gpus"],
sample_batch_size=config["sample_batch_size"],
num_envs_per_worker=config["num_envs_per_worker"],
train_batch_size=config["train_batch_size"],
standardize_fields=["advantages"],
shuffle_sequences=config["shuffle_sequences"])
def update_kl(trainer, fetches):
if "kl" in fetches:
# single-agent
trainer.workers.local_worker().for_policy(
lambda pi: pi.update_kl(fetches["kl"]))
else:
def update(pi, pi_id):
if pi_id in fetches:
pi.update_kl(fetches[pi_id]["kl"])
else:
logger.debug("No data for {}, not updating kl".format(pi_id))
# multi-agent
trainer.workers.local_worker().foreach_trainable_policy(update)
def warn_about_bad_reward_scales(trainer, result):
if result["policy_reward_mean"]:
return # Punt on handling multiagent case.
# Warn about excessively high VF loss.
learner_stats = result["info"]["learner"]
if "default_policy" in learner_stats:
scaled_vf_loss = (trainer.config["vf_loss_coeff"] *
learner_stats["default_policy"]["vf_loss"])
policy_loss = learner_stats["default_policy"]["policy_loss"]
if trainer.config["vf_share_layers"] and scaled_vf_loss > 100:
logger.warning(
"The magnitude of your value function loss is extremely large "
"({}) compared to the policy loss ({}). This can prevent the "
"policy from learning. Consider scaling down the VF loss by "
"reducing vf_loss_coeff, or disabling vf_share_layers.".format(
scaled_vf_loss, policy_loss))
# Warn about bad clipping configs
if trainer.config["vf_clip_param"] <= 0:
rew_scale = float("inf")
else:
rew_scale = round(
abs(result["episode_reward_mean"]) /
trainer.config["vf_clip_param"], 0)
if rew_scale > 200:
logger.warning(
"The magnitude of your environment rewards are more than "
"{}x the scale of `vf_clip_param`. ".format(rew_scale) +
"This means that it will take more than "
"{} iterations for your value ".format(rew_scale) +
"function to converge. If this is not intended, consider "
"increasing `vf_clip_param`."
)
def validate_config(config):
# PyTorch check.
if config["use_pytorch"]:
raise ValueError("PPO does not support PyTorch yet! Use tf instead.")
if config["entropy_coeff"] < 0:
raise DeprecationWarning("entropy_coeff must be >= 0")
if isinstance(config["entropy_coeff"], int):
config["entropy_coeff"] = float(config["entropy_coeff"])
if config["sgd_minibatch_size"] > config["train_batch_size"]:
raise ValueError(
"Minibatch size {} must be <= train batch size {}.".
format(config["sgd_minibatch_size"], config["train_batch_size"])
)
if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]:
raise ValueError(
"Episode truncation is not supported without a value "
"function. Consider setting batch_mode=complete_episodes.")
if config["multiagent"]["policies"] and not config["simple_optimizer"]:
logger.info(
"In multi-agent mode, policies will be optimized sequentially "
"by the multi-GPU optimizer. Consider setting "
"simple_optimizer=True if this doesn't work for you.")
if config["simple_optimizer"]:
logger.warning(
"Using the simple minibatch optimizer. This will significantly "
"reduce performance, consider simple_optimizer=False.")
elif tf and tf.executing_eagerly():
config["simple_optimizer"] = True # multi-gpu not supported
PPOTrainer = build_trainer(
name="PPO",
default_config=DEFAULT_CONFIG,
default_policy=PPOTFPolicy,
make_policy_optimizer=choose_policy_optimizer,
validate_config=validate_config,
after_optimizer_step=update_kl,
after_train_result=warn_about_bad_reward_scales)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.