after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def find_redis_address(address=None):
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
cmdline = proc.cmdline()
# NOTE(kfstorm): To support Windows, we can't use
# `os.path.basename(cmdline[0]) == "raylet"` here.
if len(cmdline) > 0 and "raylet" in os.path.basename(cmdline[0]):
for arglist in cmdline:
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
proc_addr = arg.split("=")[1]
if address is not None and address != proc_addr:
continue
redis_addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return redis_addresses
|
def find_redis_address(address=None):
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
for arglist in proc.cmdline():
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
proc_addr = arg.split("=")[1]
if address is not None and address != proc_addr:
continue
redis_addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return redis_addresses
|
https://github.com/ray-project/ray/issues/11436
|
Traceback (most recent call last):
File "/home/swang/anaconda3/envs/ray-36/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1462, in main
return cli()
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 479, in start
f"Ray is already running at {default_address}. "
ConnectionError: Ray is already running at 192.168.1.46:6379. Please specify a different port using the `--port` command to `ray start`.
|
ConnectionError
|
def _run_helper(self, final_cmd, with_output=False, exit_on_fail=False, silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells(),
)
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
joined_cmd = " ".join(final_cmd)
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=joined_cmd,
)
if exit_on_fail:
raise click.ClickException(
"Command failed:\n\n {}\n".format(joined_cmd)
) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
|
def _run_helper(self, final_cmd, with_output=False, exit_on_fail=False, silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells(),
)
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=quoted_cmd,
)
if exit_on_fail:
raise click.ClickException(
"Command failed:\n\n {}\n".format(quoted_cmd)
) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
|
https://github.com/ray-project/ray/issues/11652
|
Traceback (most recent call last):
File "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 248, in run
self.process_runner.check_call(final_cmd, shell=True)
File "/Users/mkoh/.pyenv/versions/3.7.7/lib/python3.7/subprocess.py", line 363, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'kubectl -n nlp exec -it ray-head-22r7w -- bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (tmux kill-session -t flambe)'' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/mkoh/projects/flambe-internal/flambe/runner/run.py", line 86, in main
save_to_db=args.save,
File "/Users/mkoh/projects/flambe-internal/flambe/workflow/workflow.py", line 124, in run_remote_experiment
save_to_db=save_to_db,
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 263, in run
self.kill("flambe")
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 163, in kill
self.exec_cluster(cmd=cmd)
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 60, in exec_cluster
with_output=with_output,
File "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 868, in exec_cluster
shutdown_after_run=shutdown_after_run)
File "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 919, in _exec
shutdown_after_run=shutdown_after_run)
File "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 252, in run
[quote(final_cmd[-1])])
TypeError: can only concatenate str (not "list") to str
|
subprocess.CalledProcessError
|
def __init__(
self,
space: Optional[Union[Dict, List[Dict]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
parameter_constraints: Optional[List] = None,
outcome_constraints: Optional[List] = None,
ax_client: Optional[AxClient] = None,
use_early_stopped_trials: Optional[bool] = None,
max_concurrent: Optional[int] = None,
):
assert ax is not None, "Ax must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(AxSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self._ax = ax_client
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._parameter_constraints = parameter_constraints
self._outcome_constraints = outcome_constraints
self.max_concurrent = max_concurrent
self._objective_name = metric
self._parameters = []
self._live_trial_mapping = {}
if self._ax or self._space:
self.setup_experiment()
|
def __init__(
self,
space: Optional[List[Dict]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
parameter_constraints: Optional[List] = None,
outcome_constraints: Optional[List] = None,
ax_client: Optional[AxClient] = None,
use_early_stopped_trials: Optional[bool] = None,
max_concurrent: Optional[int] = None,
):
assert ax is not None, "Ax must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(AxSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self._ax = ax_client
self._space = space
self._parameter_constraints = parameter_constraints
self._outcome_constraints = outcome_constraints
self.max_concurrent = max_concurrent
self._objective_name = metric
self._parameters = []
self._live_trial_mapping = {}
if self._ax or self._space:
self.setup_experiment()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`."
)
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.0
elif mode == "min":
self._metric_op = -1.0
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`."
)
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.0
elif mode == "min":
self._metric_op = -1.0
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space."
)
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. Dropped quantization."
)
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler)
)
return (domain.lower, domain.upper)
raise ValueError(
"BayesOpt does not support parameters of type `{}`".format(
type(domain).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
if join:
spec.update(bounds)
bounds = spec
return bounds
|
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space."
)
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. Dropped quantization."
)
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler)
)
return (domain.lower, domain.upper)
raise ValueError(
"BayesOpt does not support parameters of type `{}`".format(
type(domain).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return bounds
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
space: Optional[Union[Dict, ConfigSpace.ConfigurationSpace]] = None,
bohb_config: Optional[Dict] = None,
max_concurrent: int = 10,
metric: Optional[str] = None,
mode: Optional[str] = None,
):
from hpbandster.optimizers.config_generators.bohb import BOHB
assert BOHB is not None, "HpBandSter must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._max_concurrent = max_concurrent
self.trial_to_params = {}
self.running = set()
self.paused = set()
self._metric = metric
self._bohb_config = bohb_config
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
super(TuneBOHB, self).__init__(metric=self._metric, mode=mode)
if self._space:
self.setup_bohb()
|
def __init__(
self,
space: Optional[ConfigSpace.ConfigurationSpace] = None,
bohb_config: Optional[Dict] = None,
max_concurrent: int = 10,
metric: Optional[str] = None,
mode: Optional[str] = None,
):
from hpbandster.optimizers.config_generators.bohb import BOHB
assert BOHB is not None, "HpBandSter must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._max_concurrent = max_concurrent
self.trial_to_params = {}
self.running = set()
self.paused = set()
self._metric = metric
self._bohb_config = bohb_config
self._space = space
super(TuneBOHB, self).__init__(metric=self._metric, mode=mode)
if self._space:
self.setup_bohb()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
optimizer: Optional[BlackboxOptimiser] = None,
domain: Optional[str] = None,
space: Optional[Union[Dict, List[Dict]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
**kwargs,
):
assert dragonfly is not None, """dragonfly must be installed!
You can install Dragonfly with the command:
`pip install dragonfly-opt`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(DragonflySearch, self).__init__(metric=metric, mode=mode, **kwargs)
self._opt_arg = optimizer
self._domain = domain
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._initial_points = []
self._live_trial_mapping = {}
self._opt = None
if isinstance(optimizer, BlackboxOptimiser):
if domain or space:
raise ValueError(
"If you pass an optimizer instance to dragonfly, do not "
"pass a `domain` or `space`."
)
self._opt = optimizer
self.init_dragonfly()
elif self._space:
self.setup_dragonfly()
|
def __init__(
self,
optimizer: Optional[BlackboxOptimiser] = None,
domain: Optional[str] = None,
space: Optional[List[Dict]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
**kwargs,
):
assert dragonfly is not None, """dragonfly must be installed!
You can install Dragonfly with the command:
`pip install dragonfly-opt`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(DragonflySearch, self).__init__(metric=metric, mode=mode, **kwargs)
self._opt_arg = optimizer
self._domain = domain
self._space = space
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._initial_points = []
self._live_trial_mapping = {}
self._opt = None
if isinstance(optimizer, BlackboxOptimiser):
if domain or space:
raise ValueError(
"If you pass an optimizer instance to dragonfly, do not "
"pass a `domain` or `space`."
)
self._opt = optimizer
self.init_dragonfly()
elif self._space:
self.setup_dragonfly()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
n_initial_points: int = 20,
random_state_seed: Optional[int] = None,
gamma: float = 0.25,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert hpo is not None, "HyperOpt must be installed! Run `pip install hyperopt`."
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
from hyperopt.fmin import generate_trials_to_calculate
super(HyperOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self.max_concurrent = max_concurrent
# hyperopt internally minimizes, so "max" => -1
if mode == "max":
self.metric_op = -1.0
elif mode == "min":
self.metric_op = 1.0
if n_initial_points is None:
self.algo = hpo.tpe.suggest
else:
self.algo = partial(hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if gamma is not None:
self.algo = partial(self.algo, gamma=gamma)
if points_to_evaluate is None:
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert isinstance(points_to_evaluate, (list, tuple))
self._hpopt_trials = generate_trials_to_calculate(points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if random_state_seed is None:
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
self.domain = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
self.domain = hpo.Domain(lambda spc: spc, space)
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
n_initial_points: int = 20,
random_state_seed: Optional[int] = None,
gamma: float = 0.25,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert hpo is not None, "HyperOpt must be installed! Run `pip install hyperopt`."
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
from hyperopt.fmin import generate_trials_to_calculate
super(HyperOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self.max_concurrent = max_concurrent
# hyperopt internally minimizes, so "max" => -1
if mode == "max":
self.metric_op = -1.0
elif mode == "min":
self.metric_op = 1.0
if n_initial_points is None:
self.algo = hpo.tpe.suggest
else:
self.algo = partial(hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if gamma is not None:
self.algo = partial(self.algo, gamma=gamma)
if points_to_evaluate is None:
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert isinstance(points_to_evaluate, (list, tuple))
self._hpopt_trials = generate_trials_to_calculate(points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if random_state_seed is None:
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
self.domain = None
if space:
self.domain = hpo.Domain(lambda spc: spc, space)
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
optimizer: Union[None, Optimizer, ConfiguredOptimizer] = None,
space: Optional[Union[Dict, Parameter]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
**kwargs,
):
assert ng is not None, "Nevergrad must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(NevergradSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=max_concurrent, **kwargs
)
self._space = None
self._opt_factory = None
self._nevergrad_opt = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
if isinstance(optimizer, Optimizer):
if space is not None or isinstance(space, list):
raise ValueError(
"If you pass a configured optimizer to Nevergrad, either "
"pass a list of parameter names or None as the `space` "
"parameter."
)
self._parameters = space
self._nevergrad_opt = optimizer
elif isinstance(optimizer, ConfiguredOptimizer):
self._opt_factory = optimizer
self._parameters = None
self._space = space
else:
raise ValueError(
"The `optimizer` argument passed to NevergradSearch must be "
"either an `Optimizer` or a `ConfiguredOptimizer`."
)
self._live_trial_mapping = {}
self.max_concurrent = max_concurrent
if self._nevergrad_opt or self._space:
self.setup_nevergrad()
|
def __init__(
self,
optimizer: Union[None, Optimizer, ConfiguredOptimizer] = None,
space: Optional[Parameter] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
**kwargs,
):
assert ng is not None, "Nevergrad must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(NevergradSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=max_concurrent, **kwargs
)
self._space = None
self._opt_factory = None
self._nevergrad_opt = None
if isinstance(optimizer, Optimizer):
if space is not None or isinstance(space, list):
raise ValueError(
"If you pass a configured optimizer to Nevergrad, either "
"pass a list of parameter names or None as the `space` "
"parameter."
)
self._parameters = space
self._nevergrad_opt = optimizer
elif isinstance(optimizer, ConfiguredOptimizer):
self._opt_factory = optimizer
self._parameters = None
self._space = space
else:
raise ValueError(
"The `optimizer` argument passed to NevergradSearch must be "
"either an `Optimizer` or a `ConfiguredOptimizer`."
)
self._live_trial_mapping = {}
self.max_concurrent = max_concurrent
if self._nevergrad_opt or self._space:
self.setup_nevergrad()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
space: Optional[Union[Dict, List[Tuple]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sampler: Optional[BaseSampler] = None,
):
assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
super(OptunaSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=None, use_early_stopped_trials=None
)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._study_name = "optuna" # Fixed study name for in-memory storage
self._sampler = sampler or ot.samplers.TPESampler()
assert isinstance(self._sampler, BaseSampler), (
"You can only pass an instance of `optuna.samplers.BaseSampler` "
"as a sampler to `OptunaSearcher`."
)
self._pruner = ot.pruners.NopPruner()
self._storage = ot.storages.InMemoryStorage()
self._ot_trials = {}
self._ot_study = None
if self._space:
self.setup_study(mode)
|
def __init__(
self,
space: Optional[List[Tuple]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sampler: Optional[BaseSampler] = None,
):
assert ot is not None, "Optuna must be installed! Run `pip install optuna`."
super(OptunaSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=None, use_early_stopped_trials=None
)
self._space = space
self._study_name = "optuna" # Fixed study name for in-memory storage
self._sampler = sampler or ot.samplers.TPESampler()
assert isinstance(self._sampler, BaseSampler), (
"You can only pass an instance of `optuna.samplers.BaseSampler` "
"as a sampler to `OptunaSearcher`."
)
self._pruner = ot.pruners.NopPruner()
self._storage = ot.storages.InMemoryStorage()
self._ot_trials = {}
self._ot_study = None
if self._space:
self.setup_study(mode)
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
optimizer: Optional[sko.optimizer.Optimizer] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(UNRESOLVED_SEARCH_SPACE.format(par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names."
)
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = space.values()
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self.setup_skopt()
self._live_trial_mapping = {}
|
def __init__(
self,
optimizer: Optional[sko.optimizer.Optimizer] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials,
)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names."
)
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = space.values()
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self.setup_skopt()
self._live_trial_mapping = {}
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space."
)
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"SkOpt search does not support quantization. Dropped quantization."
)
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler)
)
return domain.lower, domain.upper
if isinstance(domain, Integer):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler)
)
return domain.lower, domain.upper
if isinstance(domain, Categorical):
return domain.categories
raise ValueError(
"SkOpt does not support parameters of type `{}`".format(
type(domain).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
space = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
if join:
spec.update(space)
space = spec
return space
|
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space."
)
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"SkOpt search does not support quantization. Dropped quantization."
)
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler)
)
return domain.lower, domain.upper
if isinstance(domain, Integer):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler)
)
return domain.lower, domain.upper
if isinstance(domain, Categorical):
return domain.categories
raise ValueError(
"SkOpt does not support parameters of type `{}`".format(
type(domain).__name__
)
)
# Parameter name is e.g. "a/b/c" for nested dicts
space = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return space
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
algo: str = "asracos",
budget: Optional[int] = None,
dim_dict: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
**kwargs,
):
assert zoopt is not None, (
"ZOOpt not found - please install zoopt by `pip install -U zoopt`."
)
assert budget is not None, "`budget` should not be None!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
_algo = algo.lower()
assert _algo in ["asracos", "sracos"], (
"`algo` must be in ['asracos', 'sracos'] currently"
)
self._algo = _algo
if isinstance(dim_dict, dict) and dim_dict:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(dim_dict)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(par="dim_dict", cls=type(self))
)
dim_dict = self.convert_search_space(dim_dict, join=True)
self._dim_dict = dim_dict
self._budget = budget
self._metric = metric
if mode == "max":
self._metric_op = -1.0
elif mode == "min":
self._metric_op = 1.0
self._live_trial_mapping = {}
self._dim_keys = []
self.solution_dict = {}
self.best_solution_list = []
self.optimizer = None
self.kwargs = kwargs
super(ZOOptSearch, self).__init__(metric=self._metric, mode=mode)
if self._dim_dict:
self.setup_zoopt()
|
def __init__(
self,
algo: str = "asracos",
budget: Optional[int] = None,
dim_dict: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
**kwargs,
):
assert zoopt is not None, (
"ZOOpt not found - please install zoopt by `pip install -U zoopt`."
)
assert budget is not None, "`budget` should not be None!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
_algo = algo.lower()
assert _algo in ["asracos", "sracos"], (
"`algo` must be in ['asracos', 'sracos'] currently"
)
self._algo = _algo
self._dim_dict = dim_dict
self._budget = budget
self._metric = metric
if mode == "max":
self._metric_op = -1.0
elif mode == "min":
self._metric_op = 1.0
self._live_trial_mapping = {}
self._dim_keys = []
self.solution_dict = {}
self.best_solution_list = []
self.optimizer = None
self.kwargs = kwargs
super(ZOOptSearch, self).__init__(metric=self._metric, mode=mode)
if self._dim_dict:
self.setup_zoopt()
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict, join: bool = False) -> Dict[str, Tuple]:
spec = copy.deepcopy(spec)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a ZOOpt search space."
)
def resolve_value(domain: Domain) -> Tuple:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
precision = quantize or 1e-12
if isinstance(sampler, Uniform):
return (ValueType.CONTINUOUS, [domain.lower, domain.upper], precision)
elif isinstance(domain, Integer):
if isinstance(sampler, Uniform):
return (ValueType.DISCRETE, [domain.lower, domain.upper], True)
elif isinstance(domain, Categorical):
# Categorical variables would use ValueType.DISCRETE with
# has_partial_order=False, however, currently we do not
# keep track of category values and cannot automatically
# translate back and forth between them.
if isinstance(sampler, Uniform):
return (ValueType.GRID, domain.categories)
raise ValueError(
"ZOOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
conv_spec = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
if join:
spec.update(conv_spec)
conv_spec = spec
return conv_spec
|
def convert_search_space(spec: Dict) -> Dict[str, Tuple]:
spec = copy.deepcopy(spec)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return []
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a ZOOpt search space."
)
def resolve_value(domain: Domain) -> Tuple:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
precision = quantize or 1e-12
if isinstance(sampler, Uniform):
return (ValueType.CONTINUOUS, [domain.lower, domain.upper], precision)
elif isinstance(domain, Integer):
if isinstance(sampler, Uniform):
return (ValueType.DISCRETE, [domain.lower, domain.upper], True)
elif isinstance(domain, Categorical):
# Categorical variables would use ValueType.DISCRETE with
# has_partial_order=False, however, currently we do not
# keep track of category values and cannot automatically
# translate back and forth between them.
if isinstance(sampler, Uniform):
return (ValueType.GRID, domain.categories)
raise ValueError(
"ZOOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__, type(domain.sampler).__name__
)
)
spec = {"/".join(path): resolve_value(domain) for path, domain in domain_vars}
return spec
|
https://github.com/ray-project/ray/issues/11434
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_code_search_path=None,
_temp_dir=None,
_load_code_from_local=False,
_lru_evict=False,
_metrics_export_port=None,
_object_spilling_config=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_load_code_from_local: Whether code should be loaded from a local
module or from the GCS.
_java_worker_options: Overwrite the options to start Java workers.
_code_search_path (list): Java classpath or python import path.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_object_spilling_config (str): The configuration json string for object
spilling I/O worker.
_system_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
logger.debug(
"Automatically increasing RLIMIT_NOFILE to max value of {}".format(hard)
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
java_worker_options=_java_worker_options,
code_search_path=_code_search_path,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
object_spilling_config=_object_spilling_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_code_search_path=None,
_temp_dir=None,
_load_code_from_local=False,
_lru_evict=False,
_metrics_export_port=None,
_object_spilling_config=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_load_code_from_local: Whether code should be loaded from a local
module or from the GCS.
_java_worker_options: Overwrite the options to start Java workers.
_code_search_path (list): Java classpath or python import path.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_object_spilling_config (str): The configuration json string for object
spilling I/O worker.
_system_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
java_worker_options=_java_worker_options,
code_search_path=_code_search_path,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
object_spilling_config=_object_spilling_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
https://github.com/ray-project/ray/issues/11309
|
^CTraceback (most recent call last):
File "test.py", line 72, in <module>
**tune_kwargs)
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/tune.py", line 405, in run
runner.step()
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 375, in step
self._process_events() # blocking
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 475, in _process_events
trial = self.trial_executor.get_next_available_trial() # blocking
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 463, in get_next_available_trial
[result_id], _ = ray.wait(shuffled_results)
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 1558, in wait
worker.current_task_id,
File "python/ray/_raylet.pyx", line 939, in ray._raylet.CoreWorker.wait
File "python/ray/_raylet.pyx", line 144, in ray._raylet.check_status
KeyboardInterrupt
^CError in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 784, in shutdown
time.sleep(0.5)
KeyboardInterrupt
|
CError
|
def with_parameters(fn, **kwargs):
"""Wrapper for function trainables to pass arbitrary large data objects.
This wrapper function will store all passed parameters in the Ray
object store and retrieve them when calling the function. It can thus
be used to pass arbitrary data, even datasets, to Tune trainable functions.
This can also be used as an alternative to `functools.partial` to pass
default arguments to trainables.
Args:
fn: function to wrap
**kwargs: parameters to store in object store.
.. code-block:: python
from ray import tune
def train(config, data=None):
for sample in data:
# ...
tune.report(loss=loss)
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(train, data=data),
#...
)
"""
if not callable(fn):
raise ValueError(
"`tune.with_parameters()` only works with the function API. "
"If you want to pass parameters to Trainable _classes_, consider "
"passing them via the `config` parameter."
)
prefix = f"{str(fn)}_"
for k, v in kwargs.items():
parameter_registry.put(prefix + k, v)
use_checkpoint = detect_checkpoint_function(fn)
def inner(config, checkpoint_dir=None):
fn_kwargs = {}
if use_checkpoint:
default = checkpoint_dir
sig = inspect.signature(fn)
if "checkpoint_dir" in sig.parameters:
default = sig.parameters["checkpoint_dir"].default or default
fn_kwargs["checkpoint_dir"] = default
for k in kwargs:
fn_kwargs[k] = parameter_registry.get(prefix + k)
fn(config, **fn_kwargs)
# Use correct function signature if no `checkpoint_dir` parameter is set
if not use_checkpoint:
def _inner(config):
inner(config, checkpoint_dir=None)
return _inner
return inner
|
def with_parameters(fn, **kwargs):
"""Wrapper for function trainables to pass arbitrary large data objects.
This wrapper function will store all passed parameters in the Ray
object store and retrieve them when calling the function. It can thus
be used to pass arbitrary data, even datasets, to Tune trainable functions.
This can also be used as an alternative to `functools.partial` to pass
default arguments to trainables.
Args:
fn: function to wrap
**kwargs: parameters to store in object store.
.. code-block:: python
from ray import tune
def train(config, data=None):
for sample in data:
# ...
tune.report(loss=loss)
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(train, data=data),
#...
)
"""
prefix = f"{str(fn)}_"
for k, v in kwargs.items():
parameter_registry.put(prefix + k, v)
use_checkpoint = detect_checkpoint_function(fn)
def inner(config, checkpoint_dir=None):
fn_kwargs = {}
if use_checkpoint:
default = checkpoint_dir
sig = inspect.signature(fn)
if "checkpoint_dir" in sig.parameters:
default = sig.parameters["checkpoint_dir"].default or default
fn_kwargs["checkpoint_dir"] = default
for k in kwargs:
fn_kwargs[k] = parameter_registry.get(prefix + k)
fn(config, **fn_kwargs)
# Use correct function signature if no `checkpoint_dir` parameter is set
if not use_checkpoint:
def _inner(config):
inner(config, checkpoint_dir=None)
return _inner
return inner
|
https://github.com/ray-project/ray/issues/11047
|
Failure # 1 (occurred at 2020-09-26_16-50-01)
Traceback (most recent call last):
File "/home/karol/PycharmProjects/ray/python/ray/tune/trial_runner.py", line 518, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/karol/PycharmProjects/ray/python/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/home/karol/PycharmProjects/ray/python/ray/worker.py", line 1438, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(TuneError): �[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/home/karol/PycharmProjects/ray/python/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 346, in step
self._report_thread_runner_error(block=True)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 464, in _report_thread_runner_error
raise TuneError(("Trial raised an exception. Traceback:\n{}"
ray.tune.error.TuneError: Trial raised an exception. Traceback:
�[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 233, in run
self._entrypoint()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 295, in entrypoint
return self._trainable_func(self.config, self._status_reporter,
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 527, in _trainable_func
output = fn()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 595, in _inner
inner(config, checkpoint_dir=None)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 589, in inner
fn(config, **fn_kwargs)
TypeError: __init__() got multiple values for argument 'arch'
|
ray.tune.error.TuneError
|
def wandb_mixin(func: Callable):
"""wandb_mixin
Weights and biases (https://www.wandb.com/) is a tool for experiment
tracking, model optimization, and dataset versioning. This Ray Tune
Trainable mixin helps initializing the Wandb API for use with the
``Trainable`` class or with `@wandb_mixin` for the function API.
For basic usage, just prepend your training function with the
``@wandb_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
wandb.log()
Wandb configuration is done by passing a ``wandb`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``wandb`` config entry is passed to ``wandb.init()``
as keyword arguments. The exception are the following settings, which
are used to configure the ``WandbTrainableMixin`` itself:
Args:
api_key_file (str): Path to file containing the Wandb API KEY. This
file must be on all nodes if using the `wandb_mixin`.
api_key (str): Wandb API Key. Alternative to setting `api_key_file`.
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
by Tune, but can be overwritten by filling out the respective configuration
values.
Please see here for all other valid configuration settings:
https://docs.wandb.com/library/init
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
wandb.log({"loss": loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# wandb configuration
"wandb": {
"project": "Optimization_Project",
"api_key_file": "/path/to/file"
}
})
"""
func.__mixins__ = (WandbTrainableMixin,)
return func
|
def wandb_mixin(func: Callable):
"""wandb_mixin
Weights and biases (https://www.wandb.com/) is a tool for experiment
tracking, model optimization, and dataset versioning. This Ray Tune
Trainable mixin helps initializing the Wandb API for use with the
``Trainable`` class or with `@wandb_mixin` for the function API.
For basic usage, just prepend your training function with the
``@wandb_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
wandb.log()
Wandb configuration is done by passing a ``wandb`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``wandb`` config entry is passed to ``wandb.init()``
as keyword arguments. The exception are the following settings, which
are used to configure the ``WandbTrainableMixin`` itself:
Args:
api_key_file (str): Path to file containing the Wandb API KEY. This
file must be on all nodes if using the `wandb_mixin`.
api_key (str): Wandb API Key. Alternative to setting `api_key_file`.
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
by Tune, but can be overwritten by filling out the respective configuration
values.
Please see here for all other valid configuration settings:
https://docs.wandb.com/library/init
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
wandb.log({"loss": loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# wandb configuration
"wandb": {
"project": "Optimization_Project",
"api_key_file": "/path/to/file"
}
})
"""
func.__mixins__ = (WandbTrainableMixin,)
func.__wandb_group__ = func.__name__
return func
|
https://github.com/ray-project/ray/issues/10911
|
Traceback (most recent call last):
File "/home/ubuntu/run_ray_tune.py", line 222, in <module>
tune_helsinki_(args)
File "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_
ray_wandb_func = wandb_mixin(ray_func)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin
func.__wandb_group__ = func.__name__
AttributeError: 'functools.partial' object has no attribute '__name__'
Shared connection to 34.220.26.193 closed.
Error: Command failed:
|
AttributeError
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification."
)
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id if self.trial else None
trial_name = str(self.trial) if self.trial else None
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError("You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop(
"group", self.trial.trainable_name if self.trial else None
)
# remove unpickleable items!
config = _clean_log(config)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config,
)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs,
)
self._wandb.start()
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification."
)
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError("You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
# remove unpickleable items!
config = _clean_log(config)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config,
)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs,
)
self._wandb.start()
|
https://github.com/ray-project/ray/issues/10911
|
Traceback (most recent call last):
File "/home/ubuntu/run_ray_tune.py", line 222, in <module>
tune_helsinki_(args)
File "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_
ray_wandb_func = wandb_mixin(ray_func)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin
func.__wandb_group__ = func.__name__
AttributeError: 'functools.partial' object has no attribute '__name__'
Shared connection to 34.220.26.193 closed.
Error: Command failed:
|
AttributeError
|
def Concurrently(
ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None,
round_robin_weights=None,
):
"""Operator that runs the given parent iterators concurrently.
Args:
mode (str): One of 'round_robin', 'async'. In 'round_robin' mode,
we alternate between pulling items from each parent iterator in
order deterministically. In 'async' mode, we pull from each parent
iterator as fast as they are produced. This is non-deterministic.
output_indexes (list): If specified, only output results from the
given ops. For example, if ``output_indexes=[0]``, only results
from the first op in ops will be returned.
round_robin_weights (list): List of weights to use for round robin
mode. For example, ``[2, 1]`` will cause the iterator to pull twice
as many items from the first iterator as the second. ``[2, 1, *]``
will cause as many items to be pulled as possible from the third
iterator without blocking. This is only allowed in round robin
mode.
Examples:
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
if round_robin_weights:
raise ValueError("round_robin_weights cannot be specified in async mode")
else:
raise ValueError("Unknown mode {}".format(mode))
if round_robin_weights and all(r == "*" for r in round_robin_weights):
raise ValueError("Cannot specify all round robin weights = *")
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(
*ops[1:], deterministic=deterministic, round_robin_weights=round_robin_weights
)
if output_indexes:
output = output.filter(lambda tup: tup[0] in output_indexes).for_each(
lambda tup: tup[1]
)
return output
|
def Concurrently(
ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None,
round_robin_weights=None,
):
"""Operator that runs the given parent iterators concurrently.
Args:
mode (str): One of {'round_robin', 'async'}.
- In 'round_robin' mode, we alternate between pulling items from
each parent iterator in order deterministically.
- In 'async' mode, we pull from each parent iterator as fast as
they are produced. This is non-deterministic.
output_indexes (list): If specified, only output results from the
given ops. For example, if output_indexes=[0], only results from
the first op in ops will be returned.
round_robin_weights (list): List of weights to use for round robin
mode. For example, [2, 1] will cause the iterator to pull twice
as many items from the first iterator as the second. [2, 1, *] will
cause as many items to be pulled as possible from the third
iterator without blocking. This is only allowed in round robin
mode.
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
if round_robin_weights:
raise ValueError("round_robin_weights cannot be specified in async mode")
else:
raise ValueError("Unknown mode {}".format(mode))
if round_robin_weights and all(r == "*" for r in round_robin_weights):
raise ValueError("Cannot specify all round robin weights = *")
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(
*ops[1:], deterministic=deterministic, round_robin_weights=round_robin_weights
)
if output_indexes:
output = output.filter(lambda tup: tup[0] in output_indexes).for_each(
lambda tup: tup[1]
)
return output
|
https://github.com/ray-project/ray/issues/10372
|
Traceback (most recent call last):
File "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>
results = trainer.train()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train
raise e
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train
result = Trainable.train(self)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train
result = self.step()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union
item = next(it)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
[Previous line repeated 2 more times]
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay
item = local_buffer.replay()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay
beta=self.prioritized_replay_beta)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample
batch = self._encode_sample(idxes)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample
out = SampleBatch.concat_samples([self._storage[i] for i in idxes])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__
return self.data[key]
KeyError: 'action_logp'
|
KeyError
|
def ParallelRollouts(
workers: WorkerSet, *, mode="bulk_sync", num_async=1
) -> LocalIterator[SampleBatch]:
"""Operator to collect experiences in parallel from rollout workers.
If there are no remote workers, experiences will be collected serially from
the local worker instance instead.
Args:
workers (WorkerSet): set of rollout workers to use.
mode (str): One of 'async', 'bulk_sync', 'raw'. In 'async' mode,
batches are returned as soon as they are computed by rollout
workers with no order guarantees. In 'bulk_sync' mode, we collect
one batch from each worker and concatenate them together into a
large batch to return. In 'raw' mode, the ParallelIterator object
is returned directly and the caller is responsible for implementing
gather and updating the timesteps counter.
num_async (int): In async mode, the max number of async
requests in flight per actor.
Returns:
A local iterator over experiences collected in parallel.
Examples:
>>> rollouts = ParallelRollouts(workers, mode="async")
>>> batch = next(rollouts)
>>> print(batch.count)
50 # config.rollout_fragment_length
>>> rollouts = ParallelRollouts(workers, mode="bulk_sync")
>>> batch = next(rollouts)
>>> print(batch.count)
200 # config.rollout_fragment_length * config.num_workers
Updates the STEPS_SAMPLED_COUNTER counter in the local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
def report_timesteps(batch):
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += batch.count
return batch
if not workers.remote_workers():
# Handle the serial sampling case.
def sampler(_):
while True:
yield workers.local_worker().sample()
return LocalIterator(sampler, SharedMetrics()).for_each(report_timesteps)
# Create a parallel iterator over generated experiences.
rollouts = from_actors(workers.remote_workers())
if mode == "bulk_sync":
return (
rollouts.batch_across_shards()
.for_each(lambda batches: SampleBatch.concat_samples(batches))
.for_each(report_timesteps)
)
elif mode == "async":
return rollouts.gather_async(num_async=num_async).for_each(report_timesteps)
elif mode == "raw":
return rollouts
else:
raise ValueError(
"mode must be one of 'bulk_sync', 'async', 'raw', got '{}'".format(mode)
)
|
def ParallelRollouts(
workers: WorkerSet, *, mode="bulk_sync", num_async=1
) -> LocalIterator[SampleBatch]:
"""Operator to collect experiences in parallel from rollout workers.
If there are no remote workers, experiences will be collected serially from
the local worker instance instead.
Args:
workers (WorkerSet): set of rollout workers to use.
mode (str): One of {'async', 'bulk_sync', 'raw'}.
- In 'async' mode, batches are returned as soon as they are
computed by rollout workers with no order guarantees.
- In 'bulk_sync' mode, we collect one batch from each worker
and concatenate them together into a large batch to return.
- In 'raw' mode, the ParallelIterator object is returned directly
and the caller is responsible for implementing gather and
updating the timesteps counter.
num_async (int): In async mode, the max number of async
requests in flight per actor.
Returns:
A local iterator over experiences collected in parallel.
Examples:
>>> rollouts = ParallelRollouts(workers, mode="async")
>>> batch = next(rollouts)
>>> print(batch.count)
50 # config.rollout_fragment_length
>>> rollouts = ParallelRollouts(workers, mode="bulk_sync")
>>> batch = next(rollouts)
>>> print(batch.count)
200 # config.rollout_fragment_length * config.num_workers
Updates the STEPS_SAMPLED_COUNTER counter in the local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
def report_timesteps(batch):
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += batch.count
return batch
if not workers.remote_workers():
# Handle the serial sampling case.
def sampler(_):
while True:
yield workers.local_worker().sample()
return LocalIterator(sampler, SharedMetrics()).for_each(report_timesteps)
# Create a parallel iterator over generated experiences.
rollouts = from_actors(workers.remote_workers())
if mode == "bulk_sync":
return (
rollouts.batch_across_shards()
.for_each(lambda batches: SampleBatch.concat_samples(batches))
.for_each(report_timesteps)
)
elif mode == "async":
return rollouts.gather_async(num_async=num_async).for_each(report_timesteps)
elif mode == "raw":
return rollouts
else:
raise ValueError(
"mode must be one of 'bulk_sync', 'async', 'raw', got '{}'".format(mode)
)
|
https://github.com/ray-project/ray/issues/10372
|
Traceback (most recent call last):
File "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>
results = trainer.train()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train
raise e
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train
result = Trainable.train(self)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train
result = self.step()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union
item = next(it)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
[Previous line repeated 2 more times]
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay
item = local_buffer.replay()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay
beta=self.prioritized_replay_beta)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample
batch = self._encode_sample(idxes)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample
out = SampleBatch.concat_samples([self._storage[i] for i in idxes])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__
return self.data[key]
KeyError: 'action_logp'
|
KeyError
|
def _get_node_specific_docker_config(self, node_id):
if "docker" not in self.config:
return {}
docker_config = copy.deepcopy(self.config.get("docker", {}))
node_specific_docker = self._get_node_type_specific_fields(node_id, "docker")
docker_config.update(node_specific_docker)
return docker_config
|
def _get_node_specific_docker_config(self, node_id):
docker_config = copy.deepcopy(self.config.get("docker", {}))
node_specific_docker = self._get_node_type_specific_fields(node_id, "docker")
docker_config.update(node_specific_docker)
return docker_config
|
https://github.com/ray-project/ray/issues/10690
|
==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.err <==
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.13077020645141602}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.14269304275512695}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.
Error in monitor loop
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered
2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod
2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)
Error in sys.excepthook:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook
worker_id = global_worker.worker_id
AttributeError: 'Worker' object has no attribute 'worker_id'
Original exception was:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>
monitor.run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>
redis_client, ray_constants.MONITOR_DIED_ERROR, message)
File "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis
pubsub_msg.SerializeAsString())
AttributeError: SerializeAsString
==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.out <==
Destroying cluster. Confirm [y/N]: y [automatic, due to --yes]
1 random worker nodes will not be shut down. (due to --keep-min-workers)
The head node will not be shut down. (due to --workers-only)
No nodes remaining.
==> /tmp/ray/session_latest/logs/monitor.err <==
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.13077020645141602}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.14269304275512695}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.
Error in monitor loop
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered
2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod
2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)
Error in sys.excepthook:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook
worker_id = global_worker.worker_id
AttributeError: 'Worker' object has no attribute 'worker_id'
Original exception was:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>
monitor.run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>
redis_client, ray_constants.MONITOR_DIED_ERROR, message)
File "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis
pubsub_msg.SerializeAsString())
AttributeError: SerializeAsString
==> /tmp/ray/session_latest/logs/monitor.out <==
Destroying cluster. Confirm [y/N]: y [automatic, due to --yes]
1 random worker nodes will not be shut down. (due to --keep-min-workers)
The head node will not be shut down. (due to --workers-only)
No nodes remaining.
|
KeyError
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8",
)
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
https://github.com/ray-project/ray/issues/10426
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification."
)
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError("You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config,
)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs,
)
self._wandb.start()
|
def _init(self):
config = self.config.copy()
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification."
)
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError("You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config,
)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs,
)
self._wandb.start()
|
https://github.com/ray-project/ray/issues/10426
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
elif _is_allowed_type(obj):
return obj
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8",
)
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
fallback = str(obj)
# Try to convert to int
try:
fallback = int(fallback)
return fallback
except ValueError:
pass
# Try to convert to float
try:
fallback = float(fallback)
return fallback
except ValueError:
pass
# Else, return string
return fallback
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8",
)
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
https://github.com/ray-project/ray/issues/10426
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _handle_result(self, result):
config_update = result.get("config", {}).copy()
log = {}
flat_result = flatten_dict(result, delimiter="/")
for k, v in flat_result.items():
if any(k.startswith(item + "/") or k == item for item in self._to_config):
config_update[k] = v
elif any(k.startswith(item + "/") or k == item for item in self._exclude):
continue
elif not _is_allowed_type(v):
continue
else:
log[k] = v
config_update.pop("callbacks", None) # Remove callbacks
return log, config_update
|
def _handle_result(self, result):
config_update = result.get("config", {}).copy()
log = {}
flat_result = flatten_dict(result, delimiter="/")
for k, v in flat_result.items():
if any(k.startswith(item + "/") or k == item for item in self._to_config):
config_update[k] = v
elif any(k.startswith(item + "/") or k == item for item in self._exclude):
continue
elif not isinstance(v, Number):
continue
else:
log[k] = v
config_update.pop("callbacks", None) # Remove callbacks
return log, config_update
|
https://github.com/ray-project/ray/issues/10426
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def start(
node_ip_address,
redis_address,
address,
redis_port,
port,
num_redis_shards,
redis_max_clients,
redis_password,
redis_shard_ports,
object_manager_port,
node_manager_port,
gcs_server_port,
min_worker_port,
max_worker_port,
memory,
object_store_memory,
redis_max_memory,
num_cpus,
num_gpus,
resources,
head,
include_webui,
webui_host,
include_dashboard,
dashboard_host,
dashboard_port,
block,
plasma_directory,
huge_pages,
autoscaling_config,
no_redirect_worker_output,
no_redirect_output,
plasma_store_socket_name,
raylet_socket_name,
temp_dir,
java_worker_options,
code_search_path,
load_code_from_local,
system_config,
lru_evict,
enable_object_reconstruction,
metrics_export_port,
log_style,
log_color,
verbose,
):
"""Start Ray processes manually on the local machine."""
cli_logger.log_style = log_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.detect_colors()
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head."
)
if redis_address is not None:
cli_logger.abort(
"{} is deprecated. Use {} instead.",
cf.bold("--redis-address"),
cf.bold("--address"),
)
raise DeprecationWarning(
"The --redis-address argument is deprecated. Please use --address instead."
)
if redis_port is not None:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--redis-port"),
cf.bold("--port"),
)
cli_logger.old_warning(
logger,
"The --redis-port argument will be deprecated soon. "
"Please use --port instead.",
)
if port is not None and port != redis_port:
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--port"),
cf.bold("--redis-port"),
cf.bold("--port"),
)
raise ValueError(
"Cannot specify both --port and --redis-port "
"as port is a rename of deprecated redis-port"
)
if include_webui is not None:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--include-webui"),
cf.bold("--include-dashboard"),
)
cli_logger.old_warning(
logger,
"The --include-webui argument will be deprecated soon"
"Please use --include-dashboard instead.",
)
if include_dashboard is not None:
include_dashboard = include_webui
dashboard_host_default = "localhost"
if webui_host != dashboard_host_default:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--webui-host"),
cf.bold("--dashboard-host"),
)
cli_logger.old_warning(
logger,
"The --webui-host argument will be deprecated"
" soon. Please use --dashboard-host instead.",
)
if webui_host != dashboard_host and dashboard_host != "localhost":
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--dashboard-host"),
cf.bold("--webui-host"),
cf.bold("--dashboard-host"),
)
raise ValueError(
"Cannot specify both --webui-host and --dashboard-host,"
" please specify only the latter"
)
else:
dashboard_host = webui_host
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if address is not None:
(redis_address, redis_address_ip, redis_address_port) = (
services.validate_redis_address(address)
)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.", cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold('--resources=\'"CustomResource3": 1, "CustomResource2": 2}\''),
)
raise Exception(
"Unable to parse the --resources argument using "
"json.loads. Try using a format like\n\n"
' --resources=\'{"CustomResource1": 3, '
'"CustomReseource2": 2}\''
)
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
java_worker_options=java_worker_options,
load_code_from_local=load_code_from_local,
code_search_path=code_search_path,
_system_config=system_config,
lru_evict=lru_evict,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port,
)
if head:
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
if num_redis_shards is None:
num_redis_shards = len(redis_shard_ports)
# Check that the arguments match.
if len(redis_shard_ports) != num_redis_shards:
cli_logger.error(
"`{}` must be a comma-separated list of ports, "
"with length equal to `{}` (which defaults to {})",
cf.bold("--redis-shard-ports"),
cf.bold("--num-redis-shards"),
cf.bold("1"),
)
cli_logger.abort(
"Example: `{}`",
cf.bold("--num-redis-shards 3 --redis_shard_ports 6380,6381,6382"),
)
raise Exception(
"If --redis-shard-ports is provided, it must "
"have the form '6380,6381,6382', and the "
"number of ports provided must equal "
"--num-redis-shards (which is 1 if not "
"provided)"
)
if redis_address is not None:
cli_logger.abort(
"`{}` starts a new Redis server, `{}` should not be set.",
cf.bold("--head"),
cf.bold("--address"),
)
raise Exception(
"If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided."
)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(node_ip_address=services.get_node_ip_address())
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(
logger, "Using IP address {} for this node.", ray_params.node_ip_address
)
ray_params.update_if_absent(
redis_port=port or redis_port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
autoscaling_config=autoscaling_config,
)
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block
)
redis_address = node.redis_address
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print("To connect to this Ray runtime from another node, run")
cli_logger.print(
cf.bold(" ray start --address='{}'{}"),
redis_address,
f" --redis-password='{redis_password}'" if redis_password else "",
)
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})",
c.magenta("."),
c.magenta("="),
c.yellow("'auto'"),
", redis_password{}{}".format(
c.magenta("="), c.yellow("'" + redis_password + "'")
)
if redis_password
else "",
)
cli_logger.newline()
cli_logger.print(
cf.underlined(
"If connection fails, check your "
"firewall settings other "
"network configuration."
)
)
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\nStarted Ray on this node. You can add additional nodes to "
"the cluster by calling\n\n"
" ray start --address='{}'{}\n\n"
"from the node you wish to add. You can connect a driver to the "
"cluster from Python by running\n\n"
" import ray\n"
" ray.init(address='auto'{})\n\n"
"If you have trouble connecting from a different machine, check "
"that your firewall is configured properly. If you wish to "
"terminate the processes that have been started, run\n\n"
" ray stop".format(
redis_address,
" --redis-password='" + redis_password + "'" if redis_password else "",
", _redis_password='" + redis_password + "'" if redis_password else "",
),
)
else:
# Start Ray on a non-head node.
if not (redis_port is None and port is None):
cli_logger.abort(
"`{}/{}` should not be specified without `{}`.",
cf.bold("--port"),
cf.bold("--redis-port"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --port and --redis-port are not allowed."
)
if redis_shard_ports is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --redis-shard-ports is not allowed."
)
if redis_address is None:
cli_logger.abort(
"`{}` is required unless starting with `{}`.",
cf.bold("--address"),
cf.bold("--head"),
)
raise Exception("If --head is not passed in, --address must be provided.")
if num_redis_shards is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--num-redis-shards"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --num-redis-shards must not be provided."
)
if redis_max_clients is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--redis-max-clients"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --redis-max-clients must not be provided."
)
if include_webui:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--include-web-ui"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, the --include-webuiflag is not relevant."
)
if include_dashboard:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"),
cf.bold("--head"),
)
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant."
)
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password
)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password
)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address)
)
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(
logger, "Using IP address {} for this node.", ray_params.node_ip_address
)
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address, redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block
)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\nStarted Ray on this node. If you wish to terminate the "
"processes that have been started, run\n\n"
" ray stop",
)
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal."
)
cli_logger.print(
"Runing subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly."
)
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
cli_logger.old_error(logger, "Ray processes died unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)},
)
cli_logger.old_error(
logger,
"\t{} died with exit code {}".format(
process_type, process.returncode
),
)
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
cli_logger.old_error(
logger, "Killing remaining processes and exiting..."
)
sys.exit(1)
|
def start(
node_ip_address,
redis_address,
address,
redis_port,
port,
num_redis_shards,
redis_max_clients,
redis_password,
redis_shard_ports,
object_manager_port,
node_manager_port,
gcs_server_port,
min_worker_port,
max_worker_port,
memory,
object_store_memory,
redis_max_memory,
num_cpus,
num_gpus,
resources,
head,
include_webui,
webui_host,
include_dashboard,
dashboard_host,
dashboard_port,
block,
plasma_directory,
huge_pages,
autoscaling_config,
no_redirect_worker_output,
no_redirect_output,
plasma_store_socket_name,
raylet_socket_name,
temp_dir,
java_worker_options,
code_search_path,
load_code_from_local,
system_config,
lru_evict,
enable_object_reconstruction,
metrics_export_port,
log_style,
log_color,
verbose,
):
"""Start Ray processes manually on the local machine."""
cli_logger.log_style = log_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.detect_colors()
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head."
)
if redis_address is not None:
cli_logger.abort(
"{} is deprecated. Use {} instead.",
cf.bold("--redis-address"),
cf.bold("--address"),
)
raise DeprecationWarning(
"The --redis-address argument is deprecated. Please use --address instead."
)
if redis_port is not None:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--redis-port"),
cf.bold("--port"),
)
cli_logger.old_warning(
logger,
"The --redis-port argument will be deprecated soon. "
"Please use --port instead.",
)
if port is not None and port != redis_port:
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--port"),
cf.bold("--redis-port"),
cf.bold("--port"),
)
raise ValueError(
"Cannot specify both --port and --redis-port "
"as port is a rename of deprecated redis-port"
)
if include_webui is not None:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--include-webui"),
cf.bold("--include-dashboard"),
)
cli_logger.old_warning(
logger,
"The --include-webui argument will be deprecated soon"
"Please use --include-dashboard instead.",
)
if include_dashboard is not None:
include_dashboard = include_webui
dashboard_host_default = "localhost"
if webui_host != dashboard_host_default:
cli_logger.warning(
"{} is being deprecated. Use {} instead.",
cf.bold("--webui-host"),
cf.bold("--dashboard-host"),
)
cli_logger.old_warning(
logger,
"The --webui-host argument will be deprecated"
" soon. Please use --dashboard-host instead.",
)
if webui_host != dashboard_host and dashboard_host != "localhost":
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--dashboard-host"),
cf.bold("--webui-host"),
cf.bold("--dashboard-host"),
)
raise ValueError(
"Cannot specify both --webui-host and --dashboard-host,"
" please specify only the latter"
)
else:
dashboard_host = webui_host
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if address is not None:
(redis_address, redis_address_ip, redis_address_port) = (
services.validate_redis_address(address)
)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.", cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold('--resources=\'"CustomResource3": 1, "CustomResource2": 2}\''),
)
raise Exception(
"Unable to parse the --resources argument using "
"json.loads. Try using a format like\n\n"
' --resources=\'{"CustomResource1": 3, '
'"CustomReseource2": 2}\''
)
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
java_worker_options=java_worker_options,
load_code_from_local=load_code_from_local,
code_search_path=code_search_path,
_system_config=system_config,
lru_evict=lru_evict,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port,
)
if head:
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
if num_redis_shards is None:
num_redis_shards = len(redis_shard_ports)
# Check that the arguments match.
if len(redis_shard_ports) != num_redis_shards:
cli_logger.error(
"`{}` must be a comma-separated list of ports, "
"with length equal to `{}` (which defaults to {})",
cf.bold("--redis-shard-ports"),
cf.bold("--num-redis-shards"),
cf.bold("1"),
)
cli_logger.abort(
"Example: `{}`",
cf.bold("--num-redis-shards 3 --redis_shard_ports 6380,6381,6382"),
)
raise Exception(
"If --redis-shard-ports is provided, it must "
"have the form '6380,6381,6382', and the "
"number of ports provided must equal "
"--num-redis-shards (which is 1 if not "
"provided)"
)
if redis_address is not None:
cli_logger.abort(
"`{}` starts a new Redis server, `{}` should not be set.",
cf.bold("--head"),
cf.bold("--address"),
)
raise Exception(
"If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided."
)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(node_ip_address=services.get_node_ip_address())
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(
logger, "Using IP address {} for this node.", ray_params.node_ip_address
)
ray_params.update_if_absent(
redis_port=port or redis_port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
autoscaling_config=autoscaling_config,
)
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block
)
redis_address = node.redis_address
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print("To connect to this Ray runtime from another node, run")
cli_logger.print(
cf.bold(" ray start --address='{}'{}"),
redis_address,
f" --redis-password='{redis_password}'" if redis_password else "",
)
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})",
c.magenta("."),
c.magenta("="),
c.yellow("'auto'"),
", redis_password{}{}".format(
c.magenta("="), c.yellow("'" + redis_password + "'")
)
if redis_password
else "",
)
cli_logger.newline()
cli_logger.print(
cf.underlined(
"If connection fails, check your "
"firewall settings other "
"network configuration."
)
)
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\nStarted Ray on this node. You can add additional nodes to "
"the cluster by calling\n\n"
" ray start --address='{}'{}\n\n"
"from the node you wish to add. You can connect a driver to the "
"cluster from Python by running\n\n"
" import ray\n"
" ray.init(address='auto'{})\n\n"
"If you have trouble connecting from a different machine, check "
"that your firewall is configured properly. If you wish to "
"terminate the processes that have been started, run\n\n"
" ray stop".format(
redis_address,
" --redis-password='" + redis_password + "'" if redis_password else "",
", redis_password='" + redis_password + "'" if redis_password else "",
),
)
else:
# Start Ray on a non-head node.
if not (redis_port is None and port is None):
cli_logger.abort(
"`{}/{}` should not be specified without `{}`.",
cf.bold("--port"),
cf.bold("--redis-port"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --port and --redis-port are not allowed."
)
if redis_shard_ports is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --redis-shard-ports is not allowed."
)
if redis_address is None:
cli_logger.abort(
"`{}` is required unless starting with `{}`.",
cf.bold("--address"),
cf.bold("--head"),
)
raise Exception("If --head is not passed in, --address must be provided.")
if num_redis_shards is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--num-redis-shards"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --num-redis-shards must not be provided."
)
if redis_max_clients is not None:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--redis-max-clients"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, --redis-max-clients must not be provided."
)
if include_webui:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--include-web-ui"),
cf.bold("--head"),
)
raise Exception(
"If --head is not passed in, the --include-webuiflag is not relevant."
)
if include_dashboard:
cli_logger.abort(
"`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"),
cf.bold("--head"),
)
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant."
)
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password
)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password
)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address)
)
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(
logger, "Using IP address {} for this node.", ray_params.node_ip_address
)
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address, redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block
)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\nStarted Ray on this node. If you wish to terminate the "
"processes that have been started, run\n\n"
" ray stop",
)
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal."
)
cli_logger.print(
"Runing subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly."
)
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
cli_logger.old_error(logger, "Ray processes died unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)},
)
cli_logger.old_error(
logger,
"\t{} died with exit code {}".format(
process_type, process.returncode
),
)
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
cli_logger.old_error(
logger, "Killing remaining processes and exiting..."
)
sys.exit(1)
|
https://github.com/ray-project/ray/issues/10668
|
$ ray memory
2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main
return cli()
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory
ray.init(address=address, redis_password=redis_password)
TypeError: init() got an unexpected keyword argument 'redis_password'
|
TypeError
|
def memory(address, redis_password):
"""Print object references held in a Ray cluster."""
if not address:
address = services.find_redis_address_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, _redis_password=redis_password)
print(ray.internal.internal_api.memory_summary())
|
def memory(address, redis_password):
"""Print object references held in a Ray cluster."""
if not address:
address = services.find_redis_address_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, redis_password=redis_password)
print(ray.internal.internal_api.memory_summary())
|
https://github.com/ray-project/ray/issues/10668
|
$ ray memory
2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main
return cli()
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory
ray.init(address=address, redis_password=redis_password)
TypeError: init() got an unexpected keyword argument 'redis_password'
|
TypeError
|
def choose_trial_to_run(self, trial_runner, allow_recurse=True):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in scrubbed:
for trial in bracket.current_trials():
if trial.status == Trial.PENDING and trial_runner.has_resources(
trial.resources
):
return trial
# MAIN CHANGE HERE!
if not any(t.status == Trial.RUNNING for t in trial_runner.get_trials()):
for hyperband in self._hyperbands:
for bracket in hyperband:
if bracket and any(
trial.status == Trial.PAUSED for trial in bracket.current_trials()
):
# This will change the trial state
self._process_bracket(trial_runner, bracket)
# If there are pending trials now, suggest one.
# This is because there might be both PENDING and
# PAUSED trials now, and PAUSED trials will raise
# an error before the trial runner tries again.
if allow_recurse and any(
trial.status == Trial.PENDING
for trial in bracket.current_trials()
):
return self.choose_trial_to_run(
trial_runner, allow_recurse=False
)
# MAIN CHANGE HERE!
return None
|
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in scrubbed:
for trial in bracket.current_trials():
if trial.status == Trial.PENDING and trial_runner.has_resources(
trial.resources
):
return trial
# MAIN CHANGE HERE!
if not any(t.status == Trial.RUNNING for t in trial_runner.get_trials()):
for hyperband in self._hyperbands:
for bracket in hyperband:
if bracket and any(
trial.status == Trial.PAUSED for trial in bracket.current_trials()
):
# This will change the trial state and let the
# trial runner retry.
self._process_bracket(trial_runner, bracket)
# MAIN CHANGE HERE!
return None
|
https://github.com/ray-project/ray/issues/9245
|
== Status ==
Memory usage on this node: 7.0/15.6 GiB
Using HyperBand: num_stopped=832 total_brackets=3
Round #0:
None
Bracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}
Bracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}
Resources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects
Result logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB
Number of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |
|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|
| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |
| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |
| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |
| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |
| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |
| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |
| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |
| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |
| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |
| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |
| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |
| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |
| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |
| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |
| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |
| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |
| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |
| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |
| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |
| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |
| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
... 980 more trials not shown (156 PAUSED, 823 TERMINATED)
Traceback (most recent call last):
File "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>
verbose=1,
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run
runner.step()
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step
self.trial_executor.on_no_available_trials(self)
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials
raise TuneError("There are paused trials, but no more pending "
ray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.
Process finished with exit code 1
|
ray.tune.error.TuneError
|
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands)
)
for i, band in enumerate(self._hyperbands):
out += "\nRound #{}:".format(i)
for bracket in band:
if bracket:
out += "\n {}".format(bracket)
return out
|
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands)
)
for i, band in enumerate(self._hyperbands):
out += "\nRound #{}:".format(i)
for bracket in band:
out += "\n {}".format(bracket)
return out
|
https://github.com/ray-project/ray/issues/9245
|
== Status ==
Memory usage on this node: 7.0/15.6 GiB
Using HyperBand: num_stopped=832 total_brackets=3
Round #0:
None
Bracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}
Bracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}
Resources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects
Result logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB
Number of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |
|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|
| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |
| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |
| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |
| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |
| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |
| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |
| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |
| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |
| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |
| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |
| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |
| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |
| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |
| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |
| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |
| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |
| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |
| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |
| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |
| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |
| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
... 980 more trials not shown (156 PAUSED, 823 TERMINATED)
Traceback (most recent call last):
File "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>
verbose=1,
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run
runner.step()
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step
self.trial_executor.on_no_available_trials(self)
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials
raise TuneError("There are paused trials, but no more pending "
ray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.
Process finished with exit code 1
|
ray.tune.error.TuneError
|
def run_rsync_up(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call(
[
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
]
)
except Exception as e:
logger.warning(
self.log_prefix
+ "rsync failed: '{}'. Falling back to 'kubectl cp'".format(e)
)
self.run_cp_up(source, target)
|
def run_rsync_up(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call(
[
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
]
)
except Exception as e:
logger.warning(
self.log_prefix
+ "rsync failed: '{}'. Falling back to 'kubectl cp'".format(e)
)
self.process_runner.check_call(
self.kubectl
+ ["cp", source, "{}/{}:{}".format(self.namespace, self.node_id, target)]
)
|
https://github.com/ray-project/ray/issues/9558
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def run_rsync_down(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call(
[
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
]
)
except Exception as e:
logger.warning(
self.log_prefix
+ "rsync failed: '{}'. Falling back to 'kubectl cp'".format(e)
)
self.run_cp_down(source, target)
|
def run_rsync_down(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call(
[
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
]
)
except Exception as e:
logger.warning(
self.log_prefix
+ "rsync failed: '{}'. Falling back to 'kubectl cp'".format(e)
)
self.process_runner.check_call(
self.kubectl
+ ["cp", "{}/{}:{}".format(self.namespace, self.node_id, source), target]
)
|
https://github.com/ray-project/ray/issues/9558
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif isclass(sync_function) and issubclass(sync_function, Syncer):
_syncers[key] = sync_function(local_dir, remote_dir, None)
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
https://github.com/ray-project/ray/issues/9558
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def action_prob(self, batch: SampleBatchType) -> np.ndarray:
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
log_likelihoods: TensorType = self.policy.compute_log_likelihoods(
actions=batch[SampleBatch.ACTIONS],
obs_batch=batch[SampleBatch.CUR_OBS],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=batch.data.get(SampleBatch.PREV_REWARDS),
)
return convert_to_numpy(log_likelihoods)
|
def action_prob(self, batch: SampleBatchType) -> TensorType:
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
log_likelihoods = self.policy.compute_log_likelihoods(
actions=batch[SampleBatch.ACTIONS],
obs_batch=batch[SampleBatch.CUR_OBS],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=batch.data.get(SampleBatch.PREV_REWARDS),
)
return log_likelihoods
|
https://github.com/ray-project/ray/issues/10117
|
Traceback (most recent call last):
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\tune\trial_runner.py", line 497, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\tune\ray_trial_executor.py", line 434, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\worker.py", line 1553, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): ray::MARWIL.train() (pid=9136, ip=10.0.0.18)
File "python\ray\_raylet.pyx", line 474, in ray._raylet.execute_task
File "python\ray\_raylet.pyx", line 427, in ray._raylet.execute_task.function_executor
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\function_manager.py", line 567, in actor_method_executor
raise e
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\function_manager.py", line 559, in actor_method_executor
method_returns = method(actor, *args, **kwargs)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\rllib\agents\trainer.py", line 522, in train
raise e
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\rllib\agents\trainer.py", line 508, in train
result = Trainable.train(self)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\tune\trainable.py", line 337, in train
result = self.step()
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\rllib\agents\trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\util\iter.py", line 758, in __next__
return next(self.built_iterator)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\util\iter.py", line 793, in apply_foreach
result = fn(item)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\rllib\execution\metric_ops.py", line 87, in __call__
res = summarize_episodes(episodes, orig_episodes)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\ray\rllib\evaluation\metrics.py", line 173, in summarize_episodes
metrics[k] = np.mean(v_list)
File "<__array_function__ internals>", line 6, in mean
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\numpy\core\fromnumeric.py", line 3335, in mean
out=out, **kwargs)
File "C:\Users\Julius\Anaconda3\envs\ray\lib\site-packages\numpy\core\_methods.py", line 161, in _mean
ret = ret.dtype.type(ret / rcount)
AttributeError: 'torch.dtype' object has no attribute 'type'
|
AttributeError
|
def run_rsync_up(self, source, target):
# TODO(ilr) Expose this to before NodeUpdater::sync_file_mounts
protected_path = target
if target.find("/root") == 0:
target = target.replace("/root", "/tmp/root")
self.ssh_command_runner.run(f"mkdir -p {os.path.dirname(target.rstrip('/'))}")
self.ssh_command_runner.run_rsync_up(source, target)
if self._check_container_status():
self.ssh_command_runner.run(
"docker cp {} {}:{}".format(
target, self.docker_name, self._docker_expand_user(protected_path)
)
)
|
def run_rsync_up(self, source, target):
protected_path = target
if target.find("/root") == 0:
target = target.replace("/root", "/tmp/root")
self.ssh_command_runner.run(f"mkdir -p {os.path.dirname(target.rstrip('/'))}")
self.ssh_command_runner.run_rsync_up(source, target)
if self._check_container_status():
self.ssh_command_runner.run(
"docker cp {} {}:{}".format(
target, self.docker_name, self._docker_expand_user(protected_path)
)
)
|
https://github.com/ray-project/ray/issues/10077
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def sync_file_mounts(self, sync_cmd, step_numbers=(0, 2)):
# step_numbers is (# of previous steps, total steps)
previous_steps, total_steps = step_numbers
nolog_paths = []
if cli_logger.verbosity == 0:
nolog_paths = ["~/ray_bootstrap_key.pem", "~/ray_bootstrap_config.yaml"]
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(
self.log_prefix + "Synced {} to {}".format(local_path, remote_path)
):
self.cmd_runner.run(
"mkdir -p {}".format(os.path.dirname(remote_path)), run_env="host"
)
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print(
"{} from {}", cf.bold(remote_path), cf.bold(local_path)
)
# Rsync file mounts
with cli_logger.group(
"Processing file mounts", _numbered=("[]", previous_steps + 1, total_steps)
):
for remote_path, local_path in self.file_mounts.items():
do_sync(remote_path, local_path)
if self.cluster_synced_files:
with cli_logger.group(
"Processing worker file mounts",
_numbered=("[]", previous_steps + 2, total_steps),
):
for path in self.cluster_synced_files:
do_sync(path, path, allow_non_existing_paths=True)
else:
cli_logger.print(
"No worker file mounts to sync",
_numbered=("[]", previous_steps + 2, total_steps),
)
|
def sync_file_mounts(self, sync_cmd, step_numbers=(0, 2)):
# step_numbers is (# of previous steps, total steps)
previous_steps, total_steps = step_numbers
nolog_paths = []
if cli_logger.verbosity == 0:
nolog_paths = ["~/ray_bootstrap_key.pem", "~/ray_bootstrap_config.yaml"]
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(
self.log_prefix + "Synced {} to {}".format(local_path, remote_path)
):
self.cmd_runner.run("mkdir -p {}".format(os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print(
"{} from {}", cf.bold(remote_path), cf.bold(local_path)
)
# Rsync file mounts
with cli_logger.group(
"Processing file mounts", _numbered=("[]", previous_steps + 1, total_steps)
):
for remote_path, local_path in self.file_mounts.items():
do_sync(remote_path, local_path)
if self.cluster_synced_files:
with cli_logger.group(
"Processing worker file mounts",
_numbered=("[]", previous_steps + 2, total_steps),
):
for path in self.cluster_synced_files:
do_sync(path, path, allow_non_existing_paths=True)
else:
cli_logger.print(
"No worker file mounts to sync",
_numbered=("[]", previous_steps + 2, total_steps),
)
|
https://github.com/ray-project/ray/issues/10077
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix + "Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run(
"mkdir -p {}".format(os.path.dirname(remote_path)), run_env="host"
)
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path), cf.bold(local_path))
|
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix + "Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run("mkdir -p {}".format(os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path), cf.bold(local_path))
|
https://github.com/ray-project/ray/issues/10077
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def wait_ready(self, deadline):
with cli_logger.group(
"Waiting for SSH to become available", _numbered=("[]", 1, 6)
):
with LogTimer(self.log_prefix + "Got remote shell"):
cli_logger.old_info(
logger, "{}Waiting for remote shell...", self.log_prefix
)
cli_logger.print("Running `{}` as a test.", cf.bold("uptime"))
first_conn_refused_time = None
while time.time() < deadline and not self.provider.is_terminated(
self.node_id
):
try:
cli_logger.old_debug(
logger, "{}Waiting for remote shell...", self.log_prefix
)
self.cmd_runner.run("uptime", run_env="host")
cli_logger.old_debug(logger, "Uptime succeeded.")
cli_logger.success("Success.")
return True
except ProcessRunnerError as e:
first_conn_refused_time = cmd_output_util.handle_ssh_fails(
e, first_conn_refused_time, retry_interval=READY_CHECK_INTERVAL
)
time.sleep(READY_CHECK_INTERVAL)
except Exception as e:
# TODO(maximsmol): we should not be ignoring
# exceptions if they get filtered properly
# (new style log + non-interactive shells)
#
# however threading this configuration state
# is a pain and I'm leaving it for later
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd)
)
cli_logger.print(
"SSH still not available {}, retrying in {} seconds.",
cf.gray(retry_str),
cf.bold(str(READY_CHECK_INTERVAL)),
)
cli_logger.old_debug(
logger,
"{}Node not up, retrying: {}",
self.log_prefix,
retry_str,
)
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
|
def wait_ready(self, deadline):
with cli_logger.group(
"Waiting for SSH to become available", _numbered=("[]", 1, 6)
):
with LogTimer(self.log_prefix + "Got remote shell"):
cli_logger.old_info(
logger, "{}Waiting for remote shell...", self.log_prefix
)
cli_logger.print("Running `{}` as a test.", cf.bold("uptime"))
first_conn_refused_time = None
while time.time() < deadline and not self.provider.is_terminated(
self.node_id
):
try:
cli_logger.old_debug(
logger, "{}Waiting for remote shell...", self.log_prefix
)
self.cmd_runner.run("uptime")
cli_logger.old_debug(logger, "Uptime succeeded.")
cli_logger.success("Success.")
return True
except ProcessRunnerError as e:
first_conn_refused_time = cmd_output_util.handle_ssh_fails(
e, first_conn_refused_time, retry_interval=READY_CHECK_INTERVAL
)
time.sleep(READY_CHECK_INTERVAL)
except Exception as e:
# TODO(maximsmol): we should not be ignoring
# exceptions if they get filtered properly
# (new style log + non-interactive shells)
#
# however threading this configuration state
# is a pain and I'm leaving it for later
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd)
)
cli_logger.print(
"SSH still not available {}, retrying in {} seconds.",
cf.gray(retry_str),
cf.bold(str(READY_CHECK_INTERVAL)),
)
cli_logger.old_debug(
logger,
"{}Node not up, retrying: {}",
self.log_prefix,
retry_str,
)
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
|
https://github.com/ray-project/ray/issues/10077
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def create_or_update_cluster(
config_file: str,
override_min_workers: Optional[int],
override_max_workers: Optional[int],
no_restart: bool,
restart_only: bool,
yes: bool,
override_cluster_name: Optional[str],
no_config_cache: bool,
dump_command_output: bool = True,
use_login_shells: bool = True,
) -> None:
"""Create or updates an autoscaling Ray cluster from a config json."""
set_using_login_shells(use_login_shells)
cmd_output_util.set_output_redirected(not dump_command_output)
if use_login_shells:
cli_logger.warning(
"Commands running under a login shell can produce more "
"output than special processing can handle."
)
cli_logger.warning("Thus, the output from subcommands will be logged as is.")
cli_logger.warning(
"Consider using {}, {}.",
cf.bold("--use-normal-shells"),
cf.underlined("if you tested your workflow and it is compatible"),
)
cli_logger.newline()
cli_logger.detect_colors()
def handle_yaml_error(e):
cli_logger.error("Cluster config invalid\n")
cli_logger.error("Failed to load YAML file " + cf.bold("{}"), config_file)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("PyYAML error:"):
cli_logger.error(e)
cli_logger.abort()
try:
config = yaml.safe_load(open(config_file).read())
except FileNotFoundError:
cli_logger.abort(
"Provided cluster configuration file ({}) does not exist",
cf.bold(config_file),
)
except yaml.parser.ParserError as e:
handle_yaml_error(e)
except yaml.scanner.ScannerError as e:
handle_yaml_error(e)
# todo: validate file_mounts, ssh keys, etc.
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
cli_logger.abort(
"Unknown provider type " + cf.bold("{}") + "\nAvailable providers are: {}",
config["provider"]["type"],
cli_logger.render_list(
[k for k in NODE_PROVIDERS.keys() if NODE_PROVIDERS[k] is not None]
),
)
raise NotImplementedError("Unsupported provider {}".format(config["provider"]))
cli_logger.success("Cluster configuration valid\n")
printed_overrides = False
def handle_cli_override(key, override):
if override is not None:
if key in config:
nonlocal printed_overrides
printed_overrides = True
cli_logger.warning(
"`{}` override provided on the command line.\n"
" Using "
+ cf.bold("{}")
+ cf.dimmed(" [configuration file has " + cf.bold("{}") + "]"),
key,
override,
config[key],
)
config[key] = override
handle_cli_override("min_workers", override_min_workers)
handle_cli_override("max_workers", override_max_workers)
handle_cli_override("cluster_name", override_cluster_name)
if printed_overrides:
cli_logger.newline()
cli_logger.labeled_value("Cluster", config["cluster_name"])
# disable the cli_logger here if needed
# because it only supports aws
if config["provider"]["type"] != "aws":
cli_logger.old_style = True
cli_logger.newline()
config = _bootstrap_config(config, no_config_cache)
if config["provider"]["type"] != "aws":
cli_logger.old_style = False
try_logging_config(config)
get_or_create_head_node(
config, config_file, no_restart, restart_only, yes, override_cluster_name
)
|
def create_or_update_cluster(
config_file: str,
override_min_workers: Optional[int],
override_max_workers: Optional[int],
no_restart: bool,
restart_only: bool,
yes: bool,
override_cluster_name: Optional[str],
no_config_cache: bool,
dump_command_output: bool,
use_login_shells: bool,
) -> None:
"""Create or updates an autoscaling Ray cluster from a config json."""
set_using_login_shells(use_login_shells)
cmd_output_util.set_output_redirected(not dump_command_output)
if use_login_shells:
cli_logger.warning(
"Commands running under a login shell can produce more "
"output than special processing can handle."
)
cli_logger.warning("Thus, the output from subcommands will be logged as is.")
cli_logger.warning(
"Consider using {}, {}.",
cf.bold("--use-normal-shells"),
cf.underlined("if you tested your workflow and it is compatible"),
)
cli_logger.newline()
cli_logger.detect_colors()
def handle_yaml_error(e):
cli_logger.error("Cluster config invalid\n")
cli_logger.error("Failed to load YAML file " + cf.bold("{}"), config_file)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("PyYAML error:"):
cli_logger.error(e)
cli_logger.abort()
try:
config = yaml.safe_load(open(config_file).read())
except FileNotFoundError:
cli_logger.abort(
"Provided cluster configuration file ({}) does not exist",
cf.bold(config_file),
)
except yaml.parser.ParserError as e:
handle_yaml_error(e)
except yaml.scanner.ScannerError as e:
handle_yaml_error(e)
# todo: validate file_mounts, ssh keys, etc.
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
cli_logger.abort(
"Unknown provider type " + cf.bold("{}") + "\nAvailable providers are: {}",
config["provider"]["type"],
cli_logger.render_list(
[k for k in NODE_PROVIDERS.keys() if NODE_PROVIDERS[k] is not None]
),
)
raise NotImplementedError("Unsupported provider {}".format(config["provider"]))
cli_logger.success("Cluster configuration valid\n")
printed_overrides = False
def handle_cli_override(key, override):
if override is not None:
if key in config:
nonlocal printed_overrides
printed_overrides = True
cli_logger.warning(
"`{}` override provided on the command line.\n"
" Using "
+ cf.bold("{}")
+ cf.dimmed(" [configuration file has " + cf.bold("{}") + "]"),
key,
override,
config[key],
)
config[key] = override
handle_cli_override("min_workers", override_min_workers)
handle_cli_override("max_workers", override_max_workers)
handle_cli_override("cluster_name", override_cluster_name)
if printed_overrides:
cli_logger.newline()
cli_logger.labeled_value("Cluster", config["cluster_name"])
# disable the cli_logger here if needed
# because it only supports aws
if config["provider"]["type"] != "aws":
cli_logger.old_style = True
cli_logger.newline()
config = _bootstrap_config(config, no_config_cache)
if config["provider"]["type"] != "aws":
cli_logger.old_style = False
try_logging_config(config)
get_or_create_head_node(
config, config_file, no_restart, restart_only, yes, override_cluster_name
)
|
https://github.com/ray-project/ray/issues/10082
|
(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py
Traceback (most recent call last):
File "/Users/alex/miniconda3/bin/ray", line 11, in <module>
load_entry_point('ray', 'console_scripts', 'ray')()
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main
return cli()
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit
True, cluster_name, False)
TypeError: create_or_update_cluster() missing 2 required positional arguments: 'dump_command_output' and 'use_login_shells'
|
TypeError
|
def submit(
cluster_config_file,
screen,
tmux,
stop,
start,
cluster_name,
port_forward,
script,
args,
script_args,
log_new_style,
log_color,
verbose,
):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.old_style = not log_new_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
set_output_redirected(False)
cli_logger.doassert(
not (screen and tmux),
"`{}` and `{}` are incompatible.",
cf.bold("--screen"),
cf.bold("--tmux"),
)
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\nExample: `{2}`",
cf.bold("--args"),
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"),
)
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.", cf.bold("--args")
)
cli_logger.warning(
"Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"),
)
cli_logger.newline()
cli_logger.old_warning(
logger,
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead.",
)
if start:
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=None,
override_max_workers=None,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=cluster_name,
no_config_cache=False,
dump_command_output=True,
use_login_shells=True,
)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward,
)
|
def submit(
cluster_config_file,
screen,
tmux,
stop,
start,
cluster_name,
port_forward,
script,
args,
script_args,
log_new_style,
log_color,
verbose,
):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.old_style = not log_new_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
set_output_redirected(False)
cli_logger.doassert(
not (screen and tmux),
"`{}` and `{}` are incompatible.",
cf.bold("--screen"),
cf.bold("--tmux"),
)
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\nExample: `{2}`",
cf.bold("--args"),
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"),
)
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.", cf.bold("--args")
)
cli_logger.warning(
"Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"),
)
cli_logger.newline()
cli_logger.old_warning(
logger,
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead.",
)
if start:
create_or_update_cluster(
cluster_config_file, None, None, False, False, True, cluster_name, False
)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward,
)
|
https://github.com/ray-project/ray/issues/10082
|
(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py
Traceback (most recent call last):
File "/Users/alex/miniconda3/bin/ray", line 11, in <module>
load_entry_point('ray', 'console_scripts', 'ray')()
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main
return cli()
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit
True, cluster_name, False)
TypeError: create_or_update_cluster() missing 2 required positional arguments: 'dump_command_output' and 'use_login_shells'
|
TypeError
|
def handle_boto_error(exc, msg, *args, **kwargs):
if cli_logger.old_style:
# old-style logging doesn't do anything here
# so we exit early
return
error_code = None
error_info = None
# todo: not sure if these exceptions always have response
if hasattr(exc, "response"):
error_info = exc.response.get("Error", None)
if error_info is not None:
error_code = error_info.get("Code", None)
generic_message_args = [
"{}\nError code: {}",
msg.format(*args, **kwargs),
cf.bold(error_code),
]
# apparently
# ExpiredTokenException
# ExpiredToken
# RequestExpired
# are all the same pretty much
credentials_expiration_codes = [
"ExpiredTokenException",
"ExpiredToken",
"RequestExpired",
]
if error_code in credentials_expiration_codes:
# "An error occurred (ExpiredToken) when calling the
# GetInstanceProfile operation: The security token
# included in the request is expired"
# "An error occurred (RequestExpired) when calling the
# DescribeKeyPairs operation: Request has expired."
token_command = (
"aws sts get-session-token "
"--serial-number arn:aws:iam::"
+ cf.underlined("ROOT_ACCOUNT_ID")
+ ":mfa/"
+ cf.underlined("AWS_USERNAME")
+ " --token-code "
+ cf.underlined("TWO_FACTOR_AUTH_CODE")
)
secret_key_var = (
"export AWS_SECRET_ACCESS_KEY = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.SecretAccessKey"
)
session_token_var = (
"export AWS_SESSION_TOKEN = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.SessionToken"
)
access_key_id_var = (
"export AWS_ACCESS_KEY_ID = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.AccessKeyId"
)
# fixme: replace with a Github URL that points
# to our repo
aws_session_script_url = (
"https://gist.github.com/maximsmol/a0284e1d97b25d417bd9ae02e5f450cf"
)
cli_logger.verbose_error(*generic_message_args)
cli_logger.verbose(vars(exc))
cli_logger.abort(
"Your AWS session has expired.\n\n"
"You can request a new one using\n{}\n"
"then expose it to Ray by setting\n{}\n{}\n{}\n\n"
"You can find a script that automates this at:\n{}",
cf.bold(token_command),
cf.bold(secret_key_var),
cf.bold(session_token_var),
cf.bold(access_key_id_var),
cf.underlined(aws_session_script_url),
)
# todo: any other errors that we should catch separately?
cli_logger.error(*generic_message_args)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("Boto3 error:"):
cli_logger.verbose("{}", str(vars(exc)))
cli_logger.error("{}", str(exc))
cli_logger.abort()
|
def handle_boto_error(exc, msg, *args, **kwargs):
if cli_logger.old_style:
# old-style logging doesn't do anything here
# so we exit early
return
error_code = None
error_info = None
# todo: not sure if these exceptions always have response
if hasattr(exc, "response"):
error_info = exc.response.get("Error", None)
if error_info is not None:
error_code = error_info.get("Code", None)
generic_message_args = [
"{}\nError code: {}",
msg.format(*args, **kwargs),
cf.bold(error_code),
]
# apparently
# ExpiredTokenException
# ExpiredToken
# RequestExpired
# are all the same pretty much
credentials_expiration_codes = [
"ExpiredTokenException",
"ExpiredToken",
"RequestExpired",
]
if error_code in credentials_expiration_codes:
# "An error occurred (ExpiredToken) when calling the
# GetInstanceProfile operation: The security token
# included in the request is expired"
# "An error occurred (RequestExpired) when calling the
# DescribeKeyPairs operation: Request has expired."
token_command = (
"aws sts get-session-token "
"--serial-number arn:aws:iam::"
+ cf.underlined("ROOT_ACCOUNT_ID")
+ ":mfa/"
+ cf.underlined("AWS_USERNAME")
+ " --token-code "
+ cf.underlined("TWO_FACTOR_AUTH_CODE")
)
secret_key_var = (
"export AWS_SECRET_ACCESS_KEY = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.SecretAccessKey"
)
session_token_var = (
"export AWS_SESSION_TOKEN = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.SessionToken"
)
access_key_id_var = (
"export AWS_ACCESS_KEY_ID = "
+ cf.underlined("REPLACE_ME")
+ " # found at Credentials.AccessKeyId"
)
# fixme: replace with a Github URL that points
# to our repo
aws_session_script_url = (
"https://gist.github.com/maximsmol/a0284e1d97b25d417bd9ae02e5f450cf"
)
cli_logger.verbose_error(*generic_message_args)
cli_logger.verbose(vars(exc))
cli_logger.abort(
"Your AWS session has expired.\n\n"
"You can request a new one using\n{}\n"
"then expose it to Ray by setting\n{}\n{}\n{}\n\n"
"You can find a script that automates this at:\n{}",
cf.bold(token_command),
cf.bold(secret_key_var),
cf.bold(session_token_var),
cf.bold(access_key_id_var),
cf.underlined(aws_session_script_url),
)
# todo: any other errors that we should catch separately?
cli_logger.error(*generic_message_args)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("Boto3 error:"):
cli_logger.verbose(vars(exc))
cli_logger.error(exc)
cli_logger.abort()
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def group(self, msg, *args, **kwargs):
"""Print a group title in a special color and start an indented block.
For arguments, see `_format_msg`.
"""
self.print(cf.cornflowerBlue(msg), *args, **kwargs)
return self.indented()
|
def group(self, msg, *args, **kwargs):
"""Print a group title in a special color and start an indented block.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(cf.cornflowerBlue(msg), *args, **kwargs))
return self.indented()
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def verbatim_error_ctx(self, msg, *args, **kwargs):
"""Context manager for printing multi-line error messages.
Displays a start sequence "!!! {optional message}"
and a matching end sequence "!!!".
The string "!!!" can be used as a "tombstone" for searching.
For arguments, see `_format_msg`.
"""
cli_logger = self
class VerbatimErorContextManager:
def __enter__(self):
cli_logger.error(cf.bold("!!! ") + "{}", msg, *args, **kwargs)
def __exit__(self, type, value, tb):
cli_logger.error(cf.bold("!!!"))
return VerbatimErorContextManager()
|
def verbatim_error_ctx(self, msg, *args, **kwargs):
"""Context manager for printing multi-line error messages.
Displays a start sequence "!!! {optional message}"
and a matching end sequence "!!!".
The string "!!!" can be used as a "tombstone" for searching.
For arguments, see `_format_msg`.
"""
cli_logger = self
class VerbatimErorContextManager:
def __enter__(self):
cli_logger.error(cf.bold("!!! ") + msg, *args, **kwargs)
def __exit__(self, type, value, tb):
cli_logger.error(cf.bold("!!!"))
return VerbatimErorContextManager()
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def __enter__(self):
cli_logger.error(cf.bold("!!! ") + "{}", msg, *args, **kwargs)
|
def __enter__(self):
cli_logger.error(cf.bold("!!! ") + msg, *args, **kwargs)
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def labeled_value(self, key, msg, *args, **kwargs):
"""Displays a key-value pair with special formatting.
Args:
key (str): Label that is prepended to the message.
For other arguments, see `_format_msg`.
"""
if self.old_style:
return
self._print(cf.cyan(key) + ": " + _format_msg(cf.bold(msg), *args, **kwargs))
|
def labeled_value(self, key, msg, *args, **kwargs):
"""Displays a key-value pair with special formatting.
Args:
key (str): Label that is prepended to the message.
For other arguments, see `_format_msg`.
"""
self._print(cf.cyan(key) + ": " + _format_msg(cf.bold(msg), *args, **kwargs))
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def success(self, msg, *args, **kwargs):
"""Prints a formatted success message.
For arguments, see `_format_msg`.
"""
self.print(cf.green(msg), *args, **kwargs)
|
def success(self, msg, *args, **kwargs):
"""Prints a formatted success message.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(cf.green(msg), *args, **kwargs))
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def warning(self, msg, *args, **kwargs):
"""Prints a formatted warning message.
For arguments, see `_format_msg`.
"""
self.print(cf.yellow(msg), *args, **kwargs)
|
def warning(self, msg, *args, **kwargs):
"""Prints a formatted warning message.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(cf.yellow(msg), *args, **kwargs))
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def error(self, msg, *args, **kwargs):
"""Prints a formatted error message.
For arguments, see `_format_msg`.
"""
self.print(cf.red(msg), *args, **kwargs)
|
def error(self, msg, *args, **kwargs):
"""Prints a formatted error message.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(cf.red(msg), *args, **kwargs))
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def print(self, msg, *args, **kwargs):
"""Prints a message.
For arguments, see `_format_msg`.
"""
if self.old_style:
return
self._print(_format_msg(msg, *args, **kwargs))
|
def print(self, msg, *args, **kwargs):
"""Prints a message.
For arguments, see `_format_msg`.
"""
self._print(_format_msg(msg, *args, **kwargs))
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def _set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self.wait_for_ip(deadline)
cli_logger.doassert(ip is not None, "Could not get node IP.") # todo: msg
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
try:
os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)
except OSError as e:
cli_logger.warning("{}", str(e)) # todo: msg
cli_logger.old_warning(logger, "{}", str(e))
|
def _set_ssh_ip_if_required(self):
if self.ssh_ip is not None:
return
# We assume that this never changes.
# I think that's reasonable.
deadline = time.time() + NODE_START_WAIT_S
with LogTimer(self.log_prefix + "Got IP"):
ip = self.wait_for_ip(deadline)
cli_logger.doassert(ip is not None, "Could not get node IP.") # todo: msg
assert ip is not None, "Unable to find IP of node"
self.ssh_ip = ip
# This should run before any SSH commands and therefore ensure that
# the ControlPath directory exists, allowing SSH to maintain
# persistent sessions later on.
try:
os.makedirs(self.ssh_control_path, mode=0o700, exist_ok=True)
except OSError as e:
cli_logger.warning(e) # todo: msg
cli_logger.old_warning(logger, e)
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def teardown_cluster(
config_file: str,
yes: bool,
workers_only: bool,
override_cluster_name: Optional[str],
keep_min_workers: bool,
log_old_style: bool,
log_color: str,
verbose: int,
):
"""Destroys all nodes of a Ray cluster described by a config json."""
cli_logger.old_style = log_old_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.dump_command_output = verbose == 3 # todo: add a separate flag?
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = prepare_config(config)
validate_config(config)
cli_logger.confirm(yes, "Destroying cluster.", _abort=True)
cli_logger.old_confirm("This will destroy your cluster", yes)
if not workers_only:
try:
exec_cluster(
config_file,
cmd="ray stop",
run_env="auto",
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=override_cluster_name,
port_forward=None,
with_output=False,
)
except Exception as e:
# todo: add better exception info
cli_logger.verbose_error("{}", str(e))
cli_logger.warning(
"Exception occured when stopping the cluster Ray runtime "
"(use -v to dump teardown exceptions)."
)
cli_logger.warning(
"Ignoring the exception and "
"attempting to shut down the cluster nodes anyway."
)
cli_logger.old_exception(
logger, "Ignoring error attempting a clean shutdown."
)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
def remaining_nodes():
workers = provider.non_terminated_nodes(
{TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER}
)
if keep_min_workers:
min_workers = config.get("min_workers", 0)
cli_logger.print(
"{} random worker nodes will not be shut down. "
+ cf.gray("(due to {})"),
cf.bold(min_workers),
cf.bold("--keep-min-workers"),
)
cli_logger.old_info(
logger, "teardown_cluster: Keeping {} nodes...", min_workers
)
workers = random.sample(workers, len(workers) - min_workers)
# todo: it's weird to kill the head node but not all workers
if workers_only:
cli_logger.print(
"The head node will not be shut down. " + cf.gray("(due to {})"),
cf.bold("--workers-only"),
)
return workers
head = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD})
return head + workers
# Loop here to check that both the head and worker nodes are actually
# really gone
A = remaining_nodes()
with LogTimer("teardown_cluster: done."):
while A:
cli_logger.old_info(
logger, "teardown_cluster: Shutting down {} nodes...", len(A)
)
provider.terminate_nodes(A)
cli_logger.print(
"Requested {} nodes to shut down.",
cf.bold(len(A)),
_tags=dict(interval="1s"),
)
time.sleep(1) # todo: interval should be a variable
A = remaining_nodes()
cli_logger.print("{} nodes remaining after 1 second.", cf.bold(len(A)))
finally:
provider.cleanup()
|
def teardown_cluster(
config_file: str,
yes: bool,
workers_only: bool,
override_cluster_name: Optional[str],
keep_min_workers: bool,
log_old_style: bool,
log_color: str,
verbose: int,
):
"""Destroys all nodes of a Ray cluster described by a config json."""
cli_logger.old_style = log_old_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.dump_command_output = verbose == 3 # todo: add a separate flag?
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = prepare_config(config)
validate_config(config)
cli_logger.confirm(yes, "Destroying cluster.", _abort=True)
cli_logger.old_confirm("This will destroy your cluster", yes)
if not workers_only:
try:
exec_cluster(
config_file,
cmd="ray stop",
run_env="auto",
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=override_cluster_name,
port_forward=None,
with_output=False,
)
except Exception as e:
cli_logger.verbose_error(e) # todo: add better exception info
cli_logger.warning(
"Exception occured when stopping the cluster Ray runtime "
"(use -v to dump teardown exceptions)."
)
cli_logger.warning(
"Ignoring the exception and "
"attempting to shut down the cluster nodes anyway."
)
cli_logger.old_exception(
logger, "Ignoring error attempting a clean shutdown."
)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
def remaining_nodes():
workers = provider.non_terminated_nodes(
{TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER}
)
if keep_min_workers:
min_workers = config.get("min_workers", 0)
cli_logger.print(
"{} random worker nodes will not be shut down. "
+ cf.gray("(due to {})"),
cf.bold(min_workers),
cf.bold("--keep-min-workers"),
)
cli_logger.old_info(
logger, "teardown_cluster: Keeping {} nodes...", min_workers
)
workers = random.sample(workers, len(workers) - min_workers)
# todo: it's weird to kill the head node but not all workers
if workers_only:
cli_logger.print(
"The head node will not be shut down. " + cf.gray("(due to {})"),
cf.bold("--workers-only"),
)
return workers
head = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD})
return head + workers
# Loop here to check that both the head and worker nodes are actually
# really gone
A = remaining_nodes()
with LogTimer("teardown_cluster: done."):
while A:
cli_logger.old_info(
logger, "teardown_cluster: Shutting down {} nodes...", len(A)
)
provider.terminate_nodes(A)
cli_logger.print(
"Requested {} nodes to shut down.",
cf.bold(len(A)),
_tags=dict(interval="1s"),
)
time.sleep(1) # todo: interval should be a variable
A = remaining_nodes()
cli_logger.print("{} nodes remaining after 1 second.", cf.bold(len(A)))
finally:
provider.cleanup()
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def run(self):
cli_logger.old_info(logger, "{}Updating to {}", self.log_prefix, self.runtime_hash)
try:
with LogTimer(self.log_prefix + "Applied config {}".format(self.runtime_hash)):
self.do_update()
except Exception as e:
error_str = str(e)
if hasattr(e, "cmd"):
error_str = "(Exit Status {}) {}".format(e.returncode, " ".join(e.cmd))
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED}
)
cli_logger.error("New status: {}", cf.bold(STATUS_UPDATE_FAILED))
cli_logger.old_error(
logger, "{}Error executing: {}\n", self.log_prefix, error_str
)
cli_logger.error("!!!")
if hasattr(e, "cmd"):
cli_logger.error(
"Setup command `{}` failed with exit code {}. stderr:",
cf.bold(e.cmd),
e.returncode,
)
else:
cli_logger.verbose_error("{}", str(vars(e)))
# todo: handle this better somehow?
cli_logger.error("{}", str(e))
# todo: print stderr here
cli_logger.error("!!!")
cli_logger.newline()
if isinstance(e, click.ClickException):
# todo: why do we ignore this here
return
raise
tags_to_set = {
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_RUNTIME_CONFIG: self.runtime_hash,
}
if self.file_mounts_contents_hash is not None:
tags_to_set[TAG_RAY_FILE_MOUNTS_CONTENTS] = self.file_mounts_contents_hash
self.provider.set_node_tags(self.node_id, tags_to_set)
cli_logger.labeled_value("New status", STATUS_UP_TO_DATE)
self.exitcode = 0
|
def run(self):
cli_logger.old_info(logger, "{}Updating to {}", self.log_prefix, self.runtime_hash)
try:
with LogTimer(self.log_prefix + "Applied config {}".format(self.runtime_hash)):
self.do_update()
except Exception as e:
error_str = str(e)
if hasattr(e, "cmd"):
error_str = "(Exit Status {}) {}".format(e.returncode, " ".join(e.cmd))
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED}
)
cli_logger.error("New status: {}", cf.bold(STATUS_UPDATE_FAILED))
cli_logger.old_error(
logger, "{}Error executing: {}\n", self.log_prefix, error_str
)
cli_logger.error("!!!")
if hasattr(e, "cmd"):
cli_logger.error(
"Setup command `{}` failed with exit code {}. stderr:",
cf.bold(e.cmd),
e.returncode,
)
else:
cli_logger.verbose_error(vars(e), _no_format=True)
cli_logger.error(str(e)) # todo: handle this better somehow?
# todo: print stderr here
cli_logger.error("!!!")
cli_logger.newline()
if isinstance(e, click.ClickException):
# todo: why do we ignore this here
return
raise
tags_to_set = {
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_RUNTIME_CONFIG: self.runtime_hash,
}
if self.file_mounts_contents_hash is not None:
tags_to_set[TAG_RAY_FILE_MOUNTS_CONTENTS] = self.file_mounts_contents_hash
self.provider.set_node_tags(self.node_id, tags_to_set)
cli_logger.labeled_value("New status", STATUS_UP_TO_DATE)
self.exitcode = 0
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def do_update(self):
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_WAITING_FOR_SSH}
)
cli_logger.labeled_value("New status", STATUS_WAITING_FOR_SSH)
deadline = time.time() + NODE_START_WAIT_S
self.wait_ready(deadline)
node_tags = self.provider.node_tags(self.node_id)
logger.debug("Node tags: {}".format(str(node_tags)))
# runtime_hash will only change whenever the user restarts
# or updates their cluster with `get_or_create_head_node`
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) == self.runtime_hash and (
self.file_mounts_contents_hash is None
or node_tags.get(TAG_RAY_FILE_MOUNTS_CONTENTS) == self.file_mounts_contents_hash
):
# todo: we lie in the confirmation message since
# full setup might be cancelled here
cli_logger.print(
"Configuration already up to date, "
"skipping file mounts, initalization and setup commands."
)
cli_logger.old_info(
logger,
"{}{} already up-to-date, skip to ray start",
self.log_prefix,
self.node_id,
)
else:
cli_logger.print(
"Updating cluster configuration.", _tags=dict(hash=self.runtime_hash)
)
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SYNCING_FILES}
)
cli_logger.labeled_value("New status", STATUS_SYNCING_FILES)
self.sync_file_mounts(self.rsync_up)
# Only run setup commands if runtime_hash has changed because
# we don't want to run setup_commands every time the head node
# file_mounts folders have changed.
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) != self.runtime_hash:
# Run init commands
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SETTING_UP}
)
cli_logger.labeled_value("New status", STATUS_SETTING_UP)
if self.initialization_commands:
with cli_logger.group(
"Running initialization commands", _numbered=("[]", 4, 6)
): # todo: fix command numbering
with LogTimer(
self.log_prefix + "Initialization commands", show_status=True
):
for cmd in self.initialization_commands:
self.cmd_runner.run(
cmd,
ssh_options_override=SSHOptions(
self.auth_config.get("ssh_private_key")
),
)
else:
cli_logger.print(
"No initialization commands to run.", _numbered=("[]", 4, 6)
)
if self.setup_commands:
with cli_logger.group(
"Running setup commands", _numbered=("[]", 5, 6)
): # todo: fix command numbering
with LogTimer(self.log_prefix + "Setup commands", show_status=True):
total = len(self.setup_commands)
for i, cmd in enumerate(self.setup_commands):
if cli_logger.verbosity == 0:
cmd_to_print = cf.bold(cmd[:30]) + "..."
else:
cmd_to_print = cf.bold(cmd)
cli_logger.print(
"{}", cmd_to_print, _numbered=("()", i, total)
)
self.cmd_runner.run(cmd)
else:
cli_logger.print("No setup commands to run.", _numbered=("[]", 5, 6))
with cli_logger.group("Starting the Ray runtime", _numbered=("[]", 6, 6)):
with LogTimer(self.log_prefix + "Ray start commands", show_status=True):
for cmd in self.ray_start_commands:
self.cmd_runner.run(cmd)
|
def do_update(self):
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_WAITING_FOR_SSH}
)
cli_logger.labeled_value("New status", STATUS_WAITING_FOR_SSH)
deadline = time.time() + NODE_START_WAIT_S
self.wait_ready(deadline)
node_tags = self.provider.node_tags(self.node_id)
logger.debug("Node tags: {}".format(str(node_tags)))
# runtime_hash will only change whenever the user restarts
# or updates their cluster with `get_or_create_head_node`
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) == self.runtime_hash and (
self.file_mounts_contents_hash is None
or node_tags.get(TAG_RAY_FILE_MOUNTS_CONTENTS) == self.file_mounts_contents_hash
):
# todo: we lie in the confirmation message since
# full setup might be cancelled here
cli_logger.print(
"Configuration already up to date, "
"skipping file mounts, initalization and setup commands."
)
cli_logger.old_info(
logger,
"{}{} already up-to-date, skip to ray start",
self.log_prefix,
self.node_id,
)
else:
cli_logger.print(
"Updating cluster configuration.", _tags=dict(hash=self.runtime_hash)
)
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SYNCING_FILES}
)
cli_logger.labeled_value("New status", STATUS_SYNCING_FILES)
self.sync_file_mounts(self.rsync_up)
# Only run setup commands if runtime_hash has changed because
# we don't want to run setup_commands every time the head node
# file_mounts folders have changed.
if node_tags.get(TAG_RAY_RUNTIME_CONFIG) != self.runtime_hash:
# Run init commands
self.provider.set_node_tags(
self.node_id, {TAG_RAY_NODE_STATUS: STATUS_SETTING_UP}
)
cli_logger.labeled_value("New status", STATUS_SETTING_UP)
if self.initialization_commands:
with cli_logger.group(
"Running initialization commands", _numbered=("[]", 4, 6)
): # todo: fix command numbering
with LogTimer(
self.log_prefix + "Initialization commands", show_status=True
):
for cmd in self.initialization_commands:
self.cmd_runner.run(
cmd,
ssh_options_override=SSHOptions(
self.auth_config.get("ssh_private_key")
),
)
else:
cli_logger.print(
"No initialization commands to run.", _numbered=("[]", 4, 6)
)
if self.setup_commands:
with cli_logger.group(
"Running setup commands", _numbered=("[]", 5, 6)
): # todo: fix command numbering
with LogTimer(self.log_prefix + "Setup commands", show_status=True):
total = len(self.setup_commands)
for i, cmd in enumerate(self.setup_commands):
if cli_logger.verbosity == 0:
cmd_to_print = cf.bold(cmd[:30]) + "..."
else:
cmd_to_print = cf.bold(cmd)
cli_logger.print(cmd_to_print, _numbered=("()", i, total))
self.cmd_runner.run(cmd)
else:
cli_logger.print("No setup commands to run.", _numbered=("[]", 5, 6))
with cli_logger.group("Starting the Ray runtime", _numbered=("[]", 6, 6)):
with LogTimer(self.log_prefix + "Ray start commands", show_status=True):
for cmd in self.ray_start_commands:
self.cmd_runner.run(cmd)
|
https://github.com/ray-project/ray/issues/9948
|
Error executing: unmatched '{' in format spec
Exception in thread Thread-2:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 74, in run
self.do_update()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 285, in do_update
cmd_to_print, _numbered=("()", i, total))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 323, in print
self._print(_format_msg(msg, *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: unmatched '{' in format spec
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/allenyin/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/updater.py", line 95, in run
cli_logger.error(str(e)) # todo: handle this better somehow?
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 316, in error
self._print(_format_msg(cf.red(msg), *args, **kwargs))
File "/Users/allenyin/anaconda3/lib/python3.7/site-packages/ray/autoscaler/cli_logger.py", line 96, in _format_msg
return numbering_str + msg.format(*args, **kwargs) + tags_str
ValueError: expected '}' before end of string
|
ValueError
|
def stats(self):
if not self.count:
_quantiles = []
else:
_quantiles = np.nanpercentile(
self.items[: self.count], [0, 10, 50, 90, 100]
).tolist()
return {
self.name + "_count": int(self.count),
self.name + "_mean": float(np.nanmean(self.items[: self.count])),
self.name + "_std": float(np.nanstd(self.items[: self.count])),
self.name + "_quantiles": _quantiles,
}
|
def stats(self):
if not self.count:
quantiles = []
else:
quantiles = np.percentile(
self.items[: self.count], [0, 10, 50, 90, 100]
).tolist()
return {
self.name + "_count": int(self.count),
self.name + "_mean": float(np.mean(self.items[: self.count])),
self.name + "_std": float(np.std(self.items[: self.count])),
self.name + "_quantiles": quantiles,
}
|
https://github.com/ray-project/ray/issues/7910
|
/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/numpy/core/fromnumeric.py:3257: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
Traceback (most recent call last):
File "/home/axion/anaconda3/envs/trading/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/home/axion/anaconda3/envs/trading/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/axion/git/jengu/jengu/train/impala_train.py", line 43, in <module>
result = agent.train()
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 505, in train
raise e
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 491, in train
result = Trainable.train(self)
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/tune/trainable.py", line 261, in train
result = self._train()
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 161, in _train
res = self.collect_metrics()
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 899, in collect_metrics
selected_workers=selected_workers)
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/optimizers/policy_optimizer.py", line 113, in collect_metrics
res.update(info=self.stats())
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/optimizers/async_samples_optimizer.py", line 166, in stats
stats["learner_queue"] = self.learner.learner_queue_size.stats()
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/ray/rllib/utils/window_stat.py", line 25, in stats
self.name + "_mean": float(np.mean(self.items[:self.count])),
File "<__array_function__ internals>", line 6, in mean
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/numpy/core/fromnumeric.py", line 3257, in mean
out=out, **kwargs)
File "/home/axion/anaconda3/envs/trading/lib/python3.7/site-packages/numpy/core/_methods.py", line 161, in _mean
ret = ret.dtype.type(ret / rcount)
FloatingPointError: invalid value encountered in double_scalars
|
FloatingPointError
|
def compute_collision_identifier(self, function_or_class):
"""The identifier is used to detect excessive duplicate exports.
The identifier is used to determine when the same function or class is
exported many times. This can yield false positives.
Args:
function_or_class: The function or class to compute an identifier
for.
Returns:
The identifier. Note that different functions or classes can give
rise to same identifier. However, the same function should
hopefully always give rise to the same identifier. TODO(rkn):
verify if this is actually the case. Note that if the
identifier is incorrect in any way, then we may give warnings
unnecessarily or fail to give warnings, but the application's
behavior won't change.
"""
import io
string_file = io.StringIO()
if sys.version_info[1] >= 7:
dis.dis(function_or_class, file=string_file, depth=2)
else:
dis.dis(function_or_class, file=string_file)
collision_identifier = function_or_class.__name__ + ":" + string_file.getvalue()
# Return a hash of the identifier in case it is too large.
return hashlib.sha1(collision_identifier.encode("utf-8")).digest()
|
def compute_collision_identifier(self, function_or_class):
"""The identifier is used to detect excessive duplicate exports.
The identifier is used to determine when the same function or class is
exported many times. This can yield false positives.
Args:
function_or_class: The function or class to compute an identifier
for.
Returns:
The identifier. Note that different functions or classes can give
rise to same identifier. However, the same function should
hopefully always give rise to the same identifier. TODO(rkn):
verify if this is actually the case. Note that if the
identifier is incorrect in any way, then we may give warnings
unnecessarily or fail to give warnings, but the application's
behavior won't change.
"""
import io
string_file = io.StringIO()
if sys.version_info[1] >= 7:
dis.dis(function_or_class, file=string_file, depth=2)
else:
dis.dis(function_or_class, file=string_file)
collision_identifier = function_or_class.__name__ + ":" + string_file.getvalue()
# Return a hash of the identifier in case it is too large.
return hashlib.sha1(collision_identifier.encode("ascii")).digest()
|
https://github.com/ray-project/ray/issues/9585
|
---------------------------------------------------------------------------
UnicodeEncodeError Traceback (most recent call last)
<ipython-input-6-b5dd33a57b06> in <module>
----> 1 fut = test.remote()
~/l/anaconda3/lib/python3.6/site-packages/ray/remote_function.py in _remote_proxy(*args, **kwargs)
93 @wraps(function)
94 def _remote_proxy(*args, **kwargs):
---> 95 return self._remote(args=args, kwargs=kwargs)
96
97 self.remote = _remote_proxy
~/l/anaconda3/lib/python3.6/site-packages/ray/remote_function.py in _remote(self, args, kwargs, num_return_vals, is_direct_call, num_cpus, num_gpus, memory, object_store_memory, resources, max_retries)
174
175 self._last_export_session_and_job = worker.current_session_and_job
--> 176 worker.function_actor_manager.export(self)
177
178 kwargs = {} if kwargs is None else kwargs
~/l/anaconda3/lib/python3.6/site-packages/ray/function_manager.py in export(self, remote_function)
149 "function": pickled_function,
150 "collision_identifier": self.compute_collision_identifier(
--> 151 function),
152 "max_calls": remote_function._max_calls
153 })
~/l/anaconda3/lib/python3.6/site-packages/ray/function_manager.py in compute_collision_identifier(self, function_or_class)
121
122 # Return a hash of the identifier in case it is too large.
--> 123 return hashlib.sha1(collision_identifier.encode("ascii")).digest()
124
125 def export(self, remote_function):
UnicodeEncodeError: 'ascii' codec can't encode character '\u03c6' in position 101: ordinal not in range(128)
|
UnicodeEncodeError
|
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = [resource_id for resource_id, _ in all_resource_ids.get("GPU", [])]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
return global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
|
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = [resource_id for resource_id, _ in all_resource_ids.get("GPU", [])]
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
return assigned_ids
|
https://github.com/ray-project/ray/issues/8838
|
$ CUDA_VISIBLE_DEVICES=1 python ./scripts/ray_cuda_issue.py 1 # 0.8.5
...
2020-06-08 18:11:57.675648: E tensorflow/stream_executor/cuda/cuda_driver.cc:351] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
...
[]
...
2020-06-08 18:11:57,694 ERROR trial_runner.py:519 -- Trial DebugRunner_00000: Error processing event.
Traceback (most recent call last):
File "/users/krinen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 467, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/users/krinen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 431, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/users/krinen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/worker.py", line 1515, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AssertionError): ray::DebugRunner.train() (pid=11658, ip=163.1.88.121)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 417, in ray._raylet.execute_task.function_executor
File "/users/krinen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trainable.py", line 261, in train
result = self._train()
File "./scripts/ray_cuda_issue.py", line 21, in _train
AssertionError: ('1', '')
== Status ==
Memory usage on this node: 307.8/503.8 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/40 CPUs, 0/1 GPUs, 0.0/130.32 GiB heap, 0.0/41.26 GiB objects
Result logdir: /users/krinen/ray_results/DebugRunner
Number of trials: 1 (1 ERROR)
+-------------------+----------+-------+
| Trial name | status | loc |
|-------------------+----------+-------|
| DebugRunner_00000 | ERROR | |
+-------------------+----------+-------+
Number of errored trials: 1
+-------------------+--------------+-------------------------------------------------------------------------------------------+
| Trial name | # failures | error file |
|-------------------+--------------+-------------------------------------------------------------------------------------------|
| DebugRunner_00000 | 1 | /users/krinen/ray_results/DebugRunner/DebugRunner_0_2020-06-08_18-11-54uby7h2dg/error.txt |
+-------------------+--------------+-------------------------------------------------------------------------------------------+
Traceback (most recent call last):
File "./scripts/ray_cuda_issue.py", line 30, in <module>
tune.run(DebugRunner, resources_per_trial={'gpu': 1})
File "/users/krinen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/tune.py", line 347, in run
raise TuneError("Trials did not complete", incomplete_trials)
ray.tune.error.TuneError: ('Trials did not complete', [DebugRunner_00000])
|
AssertionError
|
def __init__(
self,
ray_params,
head=False,
shutdown_at_exit=True,
spawn_reaper=True,
connect_only=False,
):
"""Start a node.
Args:
ray_params (ray.params.RayParams): The parameters to use to
configure the node.
head (bool): True if this is the head node, which means it will
start additional processes like the Redis servers, monitor
processes, and web UI.
shutdown_at_exit (bool): If true, spawned processes will be cleaned
up if this process exits normally.
spawn_reaper (bool): If true, spawns a process that will clean up
other spawned processes if this process dies unexpectedly.
connect_only (bool): If true, connect to the node without starting
new processes.
"""
if shutdown_at_exit:
if connect_only:
raise ValueError(
"'shutdown_at_exit' and 'connect_only' cannot both be true."
)
self._register_shutdown_hooks()
self.head = head
self.kernel_fate_share = bool(
spawn_reaper and ray.utils.detect_fate_sharing_support()
)
self.all_processes = {}
# Try to get node IP address with the parameters.
if ray_params.node_ip_address:
node_ip_address = ray_params.node_ip_address
elif ray_params.redis_address:
node_ip_address = ray.services.get_node_ip_address(ray_params.redis_address)
else:
node_ip_address = ray.services.get_node_ip_address()
self._node_ip_address = node_ip_address
if ray_params.raylet_ip_address:
raylet_ip_address = ray_params.raylet_ip_address
else:
raylet_ip_address = node_ip_address
if raylet_ip_address != node_ip_address and (not connect_only or head):
raise ValueError(
"The raylet IP address should only be different than the node "
"IP address when connecting to an existing raylet; i.e., when "
"head=False and connect_only=True."
)
self._raylet_ip_address = raylet_ip_address
ray_params.update_if_absent(
include_log_monitor=True,
resources={},
temp_dir=ray.utils.get_ray_temp_dir(),
worker_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "workers/default_worker.py"
),
)
self._resource_spec = None
self._localhost = socket.gethostbyname("localhost")
self._ray_params = ray_params
self._redis_address = ray_params.redis_address
self._config = ray_params._internal_config
if head:
redis_client = None
# date including microsecond
date_str = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
self.session_name = "session_{date_str}_{pid}".format(
pid=os.getpid(), date_str=date_str
)
else:
redis_client = self.create_redis_client()
self.session_name = ray.utils.decode(redis_client.get("session_name"))
self._init_temp(redis_client)
if connect_only:
# Get socket names from the configuration.
self._plasma_store_socket_name = ray_params.plasma_store_socket_name
self._raylet_socket_name = ray_params.raylet_socket_name
# If user does not provide the socket name, get it from Redis.
if (
self._plasma_store_socket_name is None
or self._raylet_socket_name is None
or self._ray_params.node_manager_port is None
):
# Get the address info of the processes to connect to
# from Redis.
address_info = ray.services.get_address_info_from_redis(
self.redis_address,
self._raylet_ip_address,
redis_password=self.redis_password,
)
self._plasma_store_socket_name = address_info["object_store_address"]
self._raylet_socket_name = address_info["raylet_socket_name"]
self._ray_params.node_manager_port = address_info["node_manager_port"]
else:
# If the user specified a socket name, use it.
self._plasma_store_socket_name = self._prepare_socket_file(
self._ray_params.plasma_store_socket_name, default_prefix="plasma_store"
)
self._raylet_socket_name = self._prepare_socket_file(
self._ray_params.raylet_socket_name, default_prefix="raylet"
)
if head:
ray_params.update_if_absent(num_redis_shards=1)
self._webui_url = None
else:
self._webui_url = ray.services.get_webui_url_from_redis(redis_client)
ray_params.include_java = ray.services.include_java_from_redis(redis_client)
if head or not connect_only:
# We need to start a local raylet.
if (
self._ray_params.node_manager_port is None
or self._ray_params.node_manager_port == 0
):
# No port specified. Pick a random port for the raylet to use.
# NOTE: There is a possible but unlikely race condition where
# the port is bound by another process between now and when the
# raylet starts.
self._ray_params.node_manager_port, self._socket = self._get_unused_port(
close_on_exit=False
)
if not connect_only and spawn_reaper and not self.kernel_fate_share:
self.start_reaper_process()
# Start processes.
if head:
self.start_head_processes()
redis_client = self.create_redis_client()
redis_client.set("session_name", self.session_name)
redis_client.set("session_dir", self._session_dir)
redis_client.set("temp_dir", self._temp_dir)
if not connect_only:
self.start_ray_processes()
|
def __init__(
self,
ray_params,
head=False,
shutdown_at_exit=True,
spawn_reaper=True,
connect_only=False,
):
"""Start a node.
Args:
ray_params (ray.params.RayParams): The parameters to use to
configure the node.
head (bool): True if this is the head node, which means it will
start additional processes like the Redis servers, monitor
processes, and web UI.
shutdown_at_exit (bool): If true, spawned processes will be cleaned
up if this process exits normally.
spawn_reaper (bool): If true, spawns a process that will clean up
other spawned processes if this process dies unexpectedly.
connect_only (bool): If true, connect to the node without starting
new processes.
"""
if shutdown_at_exit:
if connect_only:
raise ValueError(
"'shutdown_at_exit' and 'connect_only' cannot both be true."
)
self._register_shutdown_hooks()
self.head = head
self.kernel_fate_share = bool(
spawn_reaper and ray.utils.detect_fate_sharing_support()
)
self.all_processes = {}
# Try to get node IP address with the parameters.
if ray_params.node_ip_address:
node_ip_address = ray_params.node_ip_address
elif ray_params.redis_address:
node_ip_address = ray.services.get_node_ip_address(ray_params.redis_address)
else:
node_ip_address = ray.services.get_node_ip_address()
self._node_ip_address = node_ip_address
if ray_params.raylet_ip_address:
raylet_ip_address = ray_params.raylet_ip_address
else:
raylet_ip_address = node_ip_address
if raylet_ip_address != node_ip_address and (not connect_only or head):
raise ValueError(
"The raylet IP address should only be different than the node "
"IP address when connecting to an existing raylet; i.e., when "
"head=False and connect_only=True."
)
self._raylet_ip_address = raylet_ip_address
ray_params.update_if_absent(
include_log_monitor=True,
resources={},
temp_dir=ray.utils.get_ray_temp_dir(),
worker_path=os.path.join(
os.path.dirname(os.path.abspath(__file__)), "workers/default_worker.py"
),
)
self._resource_spec = None
self._localhost = socket.gethostbyname("localhost")
self._ray_params = ray_params
self._redis_address = ray_params.redis_address
self._config = ray_params._internal_config
if head:
redis_client = None
# date including microsecond
date_str = datetime.datetime.today().strftime("%Y-%m-%d_%H-%M-%S_%f")
self.session_name = "session_{date_str}_{pid}".format(
pid=os.getpid(), date_str=date_str
)
else:
redis_client = self.create_redis_client()
self.session_name = ray.utils.decode(redis_client.get("session_name"))
self._init_temp(redis_client)
if connect_only:
# Get socket names from the configuration.
self._plasma_store_socket_name = ray_params.plasma_store_socket_name
self._raylet_socket_name = ray_params.raylet_socket_name
# If user does not provide the socket name, get it from Redis.
if (
self._plasma_store_socket_name is None
or self._raylet_socket_name is None
or self._ray_params.node_manager_port is None
):
# Get the address info of the processes to connect to
# from Redis.
address_info = ray.services.get_address_info_from_redis(
self.redis_address,
self._raylet_ip_address,
redis_password=self.redis_password,
)
self._plasma_store_socket_name = address_info["object_store_address"]
self._raylet_socket_name = address_info["raylet_socket_name"]
self._ray_params.node_manager_port = address_info["node_manager_port"]
else:
# If the user specified a socket name, use it.
self._plasma_store_socket_name = self._prepare_socket_file(
self._ray_params.plasma_store_socket_name, default_prefix="plasma_store"
)
self._raylet_socket_name = self._prepare_socket_file(
self._ray_params.raylet_socket_name, default_prefix="raylet"
)
if head:
ray_params.update_if_absent(num_redis_shards=1)
self._webui_url = None
else:
self._webui_url = ray.services.get_webui_url_from_redis(redis_client)
ray_params.include_java = ray.services.include_java_from_redis(redis_client)
if head or not connect_only:
# We need to start a local raylet.
if (
self._ray_params.node_manager_port is None
or self._ray_params.node_manager_port == 0
):
# No port specified. Pick a random port for the raylet to use.
# NOTE: There is a possible but unlikely race condition where
# the port is bound by another process between now and when the
# raylet starts.
self._ray_params.node_manager_port = self._get_unused_port()
if not connect_only and spawn_reaper and not self.kernel_fate_share:
self.start_reaper_process()
# Start processes.
if head:
self.start_head_processes()
redis_client = self.create_redis_client()
redis_client.set("session_name", self.session_name)
redis_client.set("session_dir", self._session_dir)
redis_client.set("temp_dir", self._temp_dir)
if not connect_only:
self.start_ray_processes()
|
https://github.com/ray-project/ray/issues/8254
|
�[2m�[33m(pid=raylet)�[0m E0429 02:32:06.263886 22036 process.cc:274] Failed to wait for process 22047 with error system:10: No child processes
E0429 02:32:12.346844 23272 task_manager.cc:288] 3 retries left for task b48f33dc1265b526ffffffff0100, attempting to resubmit.
E0429 02:32:12.346899 23272 core_worker.cc:373] Will resubmit task after a 5000ms delay: Type=NORMAL_TASK, Language=PYTHON, function_descriptor={type=PythonFunctionDescriptor, module_name=__main__, class_name=, function_name=f, function_hash=7d2c6c88e5e801d48a350076f2117e717fe12224}, task_id=b48f33dc1265b526ffffffff0100, job_id=0100, num_args=2, num_returns=1
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.347446 22089 process.cc:274] Failed to wait for process 22100 with error system:10: No child processes
2020-04-29 02:32:12,653 INFO resource_spec.py:212 -- Starting Ray with 27.88 GiB memory available for workers and up to 0.15 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.732946757 22142 server_chttp2.cc:40] {"created":"@1588127532.732848116","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1588127532.732846227","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_posix.cc","file_line":341,"referenced_errors":[{"created":"@1588127532.732832876","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732823689","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]},{"created":"@1588127532.732845812","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732843382","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]}]}]}
�[2m�[33m(pid=raylet)�[0m *** Aborted at 1588127532 (unix time) try "date -d @1588127532" if you are using GNU date ***
�[2m�[33m(pid=raylet)�[0m PC: @ 0x0 (unknown)
�[2m�[33m(pid=raylet)�[0m *** SIGSEGV (@0x58) received by PID 22142 (TID 0x7fc3a66d37c0) from PID 88; stack trace: ***
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5c32390 (unknown)
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3957692 grpc::ServerInterface::RegisteredAsyncRequest::IssueRequest()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35b2149 ray::rpc::NodeManagerService::WithAsyncMethod_RequestWorkerLease<>::RequestRequestWorkerLease()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35c7b1b ray::rpc::ServerCallFactoryImpl<>::CreateCall()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e380bfe1 ray::rpc::GrpcServer::Run()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3629acc ray::raylet::NodeManager::NodeManager()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35cbc07 ray::raylet::Raylet::Raylet()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e359848d main
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5459830 __libc_start_main
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35a9391 (unknown)
�[2m�[36m(pid=22153)�[0m E0429 02:32:13.864451 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 1, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.364712 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 2, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.864863 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 3, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:15.365000 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 4, num_retries = 5)
�[2m�[36m(pid=22153)�[0m F0429 02:32:15.865115 22153 raylet_client.cc:78] Could not connect to socket /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8
�[2m�[36m(pid=22153)�[0m *** Check failure stack trace: ***
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b40ed google::LogMessage::Fail()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b555c google::LogMessage::SendToLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3dc9 google::LogMessage::Flush()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3fe1 google::LogMessage::~LogMessage()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b03bb39 ray::RayLog::~RayLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55133 ray::raylet::RayletConnection::RayletConnection()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55abf ray::raylet::RayletClient::RayletClient()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf513b ray::CoreWorker::CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8984 ray::CoreWorkerProcess::CreateWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8efb ray::CoreWorkerProcess::CoreWorkerProcess()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf93fb ray::CoreWorkerProcess::Initialize()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6c06c __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6d155 __pyx_tp_new_3ray_7_raylet_CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47965 type_call
�[2m�[36m(pid=22153)�[0m @ 0x55db24db7d7b _PyObject_FastCallDict
�[2m�[36m(pid=22153)�[0m @ 0x55db24e477ce call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e69cba _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e40dae _PyEval_EvalCodeWithName
�[2m�[36m(pid=22153)�[0m @ 0x55db24e41941 fast_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47755 call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e6aa7a _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e42459 PyEval_EvalCodeEx
�[2m�[36m(pid=22153)�[0m @ 0x55db24e431ec PyEval_EvalCode
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebd9a4 run_mod
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdda1 PyRun_FileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdfa4 PyRun_SimpleFileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ec1a9e Py_Main
�[2m�[36m(pid=22153)�[0m @ 0x55db24d894be main
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3cd85830 __libc_start_main
�[2m�[36m(pid=22153)�[0m @ 0x55db24e70773 (unknown)
Traceback (most recent call last):
File "workloads/node_failures.py", line 57, in <module>
cluster.add_node()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 115, in add_node
self._wait_for_node(node)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 165, in _wait_for_node
raise TimeoutError("Timed out while waiting for nodes to join.")
TimeoutError: Timed out while waiting for nodes to join.
�[2m�[33m(pid=raylet)�[0m E0429 02:32:42.965368 13125 process.cc:274] Failed to wait for process 13136 with error system:10: No child processes
�[2m�[33m(pid=raylet)�[0m E0429 02:32:43.045863 1167 process.cc:274] Failed to wait for process 1178 with error system:10: No child processes
2020-04-29 02:32:43,942 ERROR import_thread.py:93 -- ImportThread: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:996 -- print_logs: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:1096 -- listen_error_messages_raylet: Connection closed by server.
E0429 02:32:45.999132 22870 raylet_client.cc:90] IOError: [RayletClient] Connection closed unexpectedly. [RayletClient] Failed to disconnect from raylet.
|
TimeoutError
|
def _get_unused_port(self, close_on_exit=True):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
# Try to generate a port that is far above the 'next available' one.
# This solves issue #8254 where GRPC fails because the port assigned
# from this method has been used by a different process.
for _ in range(NUMBER_OF_PORT_RETRIES):
new_port = random.randint(port, 65535)
new_s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
new_s.bind(("", new_port))
except OSError:
new_s.close()
continue
s.close()
if close_on_exit:
new_s.close()
return new_port, new_s
logger.error("Unable to succeed in selecting a random port.")
if close_on_exit:
s.close()
return port, s
|
def _get_unused_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
return port
|
https://github.com/ray-project/ray/issues/8254
|
�[2m�[33m(pid=raylet)�[0m E0429 02:32:06.263886 22036 process.cc:274] Failed to wait for process 22047 with error system:10: No child processes
E0429 02:32:12.346844 23272 task_manager.cc:288] 3 retries left for task b48f33dc1265b526ffffffff0100, attempting to resubmit.
E0429 02:32:12.346899 23272 core_worker.cc:373] Will resubmit task after a 5000ms delay: Type=NORMAL_TASK, Language=PYTHON, function_descriptor={type=PythonFunctionDescriptor, module_name=__main__, class_name=, function_name=f, function_hash=7d2c6c88e5e801d48a350076f2117e717fe12224}, task_id=b48f33dc1265b526ffffffff0100, job_id=0100, num_args=2, num_returns=1
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.347446 22089 process.cc:274] Failed to wait for process 22100 with error system:10: No child processes
2020-04-29 02:32:12,653 INFO resource_spec.py:212 -- Starting Ray with 27.88 GiB memory available for workers and up to 0.15 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.732946757 22142 server_chttp2.cc:40] {"created":"@1588127532.732848116","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1588127532.732846227","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_posix.cc","file_line":341,"referenced_errors":[{"created":"@1588127532.732832876","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732823689","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]},{"created":"@1588127532.732845812","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732843382","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]}]}]}
�[2m�[33m(pid=raylet)�[0m *** Aborted at 1588127532 (unix time) try "date -d @1588127532" if you are using GNU date ***
�[2m�[33m(pid=raylet)�[0m PC: @ 0x0 (unknown)
�[2m�[33m(pid=raylet)�[0m *** SIGSEGV (@0x58) received by PID 22142 (TID 0x7fc3a66d37c0) from PID 88; stack trace: ***
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5c32390 (unknown)
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3957692 grpc::ServerInterface::RegisteredAsyncRequest::IssueRequest()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35b2149 ray::rpc::NodeManagerService::WithAsyncMethod_RequestWorkerLease<>::RequestRequestWorkerLease()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35c7b1b ray::rpc::ServerCallFactoryImpl<>::CreateCall()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e380bfe1 ray::rpc::GrpcServer::Run()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3629acc ray::raylet::NodeManager::NodeManager()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35cbc07 ray::raylet::Raylet::Raylet()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e359848d main
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5459830 __libc_start_main
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35a9391 (unknown)
�[2m�[36m(pid=22153)�[0m E0429 02:32:13.864451 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 1, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.364712 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 2, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.864863 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 3, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:15.365000 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 4, num_retries = 5)
�[2m�[36m(pid=22153)�[0m F0429 02:32:15.865115 22153 raylet_client.cc:78] Could not connect to socket /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8
�[2m�[36m(pid=22153)�[0m *** Check failure stack trace: ***
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b40ed google::LogMessage::Fail()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b555c google::LogMessage::SendToLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3dc9 google::LogMessage::Flush()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3fe1 google::LogMessage::~LogMessage()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b03bb39 ray::RayLog::~RayLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55133 ray::raylet::RayletConnection::RayletConnection()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55abf ray::raylet::RayletClient::RayletClient()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf513b ray::CoreWorker::CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8984 ray::CoreWorkerProcess::CreateWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8efb ray::CoreWorkerProcess::CoreWorkerProcess()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf93fb ray::CoreWorkerProcess::Initialize()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6c06c __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6d155 __pyx_tp_new_3ray_7_raylet_CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47965 type_call
�[2m�[36m(pid=22153)�[0m @ 0x55db24db7d7b _PyObject_FastCallDict
�[2m�[36m(pid=22153)�[0m @ 0x55db24e477ce call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e69cba _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e40dae _PyEval_EvalCodeWithName
�[2m�[36m(pid=22153)�[0m @ 0x55db24e41941 fast_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47755 call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e6aa7a _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e42459 PyEval_EvalCodeEx
�[2m�[36m(pid=22153)�[0m @ 0x55db24e431ec PyEval_EvalCode
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebd9a4 run_mod
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdda1 PyRun_FileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdfa4 PyRun_SimpleFileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ec1a9e Py_Main
�[2m�[36m(pid=22153)�[0m @ 0x55db24d894be main
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3cd85830 __libc_start_main
�[2m�[36m(pid=22153)�[0m @ 0x55db24e70773 (unknown)
Traceback (most recent call last):
File "workloads/node_failures.py", line 57, in <module>
cluster.add_node()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 115, in add_node
self._wait_for_node(node)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 165, in _wait_for_node
raise TimeoutError("Timed out while waiting for nodes to join.")
TimeoutError: Timed out while waiting for nodes to join.
�[2m�[33m(pid=raylet)�[0m E0429 02:32:42.965368 13125 process.cc:274] Failed to wait for process 13136 with error system:10: No child processes
�[2m�[33m(pid=raylet)�[0m E0429 02:32:43.045863 1167 process.cc:274] Failed to wait for process 1178 with error system:10: No child processes
2020-04-29 02:32:43,942 ERROR import_thread.py:93 -- ImportThread: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:996 -- print_logs: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:1096 -- listen_error_messages_raylet: Connection closed by server.
E0429 02:32:45.999132 22870 raylet_client.cc:90] IOError: [RayletClient] Connection closed unexpectedly. [RayletClient] Failed to disconnect from raylet.
|
TimeoutError
|
def _prepare_socket_file(self, socket_path, default_prefix):
"""Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare.
"""
result = socket_path
is_mac = sys.platform.startswith("darwin")
if sys.platform == "win32":
if socket_path is None:
result = "tcp://{}:{}".format(self._localhost, self._get_unused_port()[0])
else:
if socket_path is None:
result = self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir
)
else:
if os.path.exists(socket_path):
raise RuntimeError("Socket file {} exists!".format(socket_path))
try_to_create_directory(os.path.dirname(socket_path))
# Check socket path length to make sure it's short enough
maxlen = (104 if is_mac else 108) - 1 # sockaddr_un->sun_path
if len(result.split("://", 1)[-1].encode("utf-8")) > maxlen:
raise OSError(
"AF_UNIX path length cannot exceed {} bytes: {!r}".format(
maxlen, result
)
)
return result
|
def _prepare_socket_file(self, socket_path, default_prefix):
"""Prepare the socket file for raylet and plasma.
This method helps to prepare a socket file.
1. Make the directory if the directory does not exist.
2. If the socket file exists, raise exception.
Args:
socket_path (string): the socket file to prepare.
"""
result = socket_path
is_mac = sys.platform.startswith("darwin")
if sys.platform == "win32":
if socket_path is None:
result = "tcp://{}:{}".format(self._localhost, self._get_unused_port())
else:
if socket_path is None:
result = self._make_inc_temp(
prefix=default_prefix, directory_name=self._sockets_dir
)
else:
if os.path.exists(socket_path):
raise RuntimeError("Socket file {} exists!".format(socket_path))
try_to_create_directory(os.path.dirname(socket_path))
# Check socket path length to make sure it's short enough
maxlen = (104 if is_mac else 108) - 1 # sockaddr_un->sun_path
if len(result.split("://", 1)[-1].encode("utf-8")) > maxlen:
raise OSError(
"AF_UNIX path length cannot exceed {} bytes: {!r}".format(
maxlen, result
)
)
return result
|
https://github.com/ray-project/ray/issues/8254
|
�[2m�[33m(pid=raylet)�[0m E0429 02:32:06.263886 22036 process.cc:274] Failed to wait for process 22047 with error system:10: No child processes
E0429 02:32:12.346844 23272 task_manager.cc:288] 3 retries left for task b48f33dc1265b526ffffffff0100, attempting to resubmit.
E0429 02:32:12.346899 23272 core_worker.cc:373] Will resubmit task after a 5000ms delay: Type=NORMAL_TASK, Language=PYTHON, function_descriptor={type=PythonFunctionDescriptor, module_name=__main__, class_name=, function_name=f, function_hash=7d2c6c88e5e801d48a350076f2117e717fe12224}, task_id=b48f33dc1265b526ffffffff0100, job_id=0100, num_args=2, num_returns=1
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.347446 22089 process.cc:274] Failed to wait for process 22100 with error system:10: No child processes
2020-04-29 02:32:12,653 INFO resource_spec.py:212 -- Starting Ray with 27.88 GiB memory available for workers and up to 0.15 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.732946757 22142 server_chttp2.cc:40] {"created":"@1588127532.732848116","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1588127532.732846227","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_posix.cc","file_line":341,"referenced_errors":[{"created":"@1588127532.732832876","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732823689","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]},{"created":"@1588127532.732845812","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732843382","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]}]}]}
�[2m�[33m(pid=raylet)�[0m *** Aborted at 1588127532 (unix time) try "date -d @1588127532" if you are using GNU date ***
�[2m�[33m(pid=raylet)�[0m PC: @ 0x0 (unknown)
�[2m�[33m(pid=raylet)�[0m *** SIGSEGV (@0x58) received by PID 22142 (TID 0x7fc3a66d37c0) from PID 88; stack trace: ***
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5c32390 (unknown)
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3957692 grpc::ServerInterface::RegisteredAsyncRequest::IssueRequest()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35b2149 ray::rpc::NodeManagerService::WithAsyncMethod_RequestWorkerLease<>::RequestRequestWorkerLease()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35c7b1b ray::rpc::ServerCallFactoryImpl<>::CreateCall()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e380bfe1 ray::rpc::GrpcServer::Run()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3629acc ray::raylet::NodeManager::NodeManager()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35cbc07 ray::raylet::Raylet::Raylet()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e359848d main
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5459830 __libc_start_main
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35a9391 (unknown)
�[2m�[36m(pid=22153)�[0m E0429 02:32:13.864451 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 1, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.364712 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 2, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.864863 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 3, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:15.365000 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 4, num_retries = 5)
�[2m�[36m(pid=22153)�[0m F0429 02:32:15.865115 22153 raylet_client.cc:78] Could not connect to socket /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8
�[2m�[36m(pid=22153)�[0m *** Check failure stack trace: ***
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b40ed google::LogMessage::Fail()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b555c google::LogMessage::SendToLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3dc9 google::LogMessage::Flush()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3fe1 google::LogMessage::~LogMessage()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b03bb39 ray::RayLog::~RayLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55133 ray::raylet::RayletConnection::RayletConnection()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55abf ray::raylet::RayletClient::RayletClient()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf513b ray::CoreWorker::CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8984 ray::CoreWorkerProcess::CreateWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8efb ray::CoreWorkerProcess::CoreWorkerProcess()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf93fb ray::CoreWorkerProcess::Initialize()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6c06c __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6d155 __pyx_tp_new_3ray_7_raylet_CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47965 type_call
�[2m�[36m(pid=22153)�[0m @ 0x55db24db7d7b _PyObject_FastCallDict
�[2m�[36m(pid=22153)�[0m @ 0x55db24e477ce call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e69cba _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e40dae _PyEval_EvalCodeWithName
�[2m�[36m(pid=22153)�[0m @ 0x55db24e41941 fast_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47755 call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e6aa7a _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e42459 PyEval_EvalCodeEx
�[2m�[36m(pid=22153)�[0m @ 0x55db24e431ec PyEval_EvalCode
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebd9a4 run_mod
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdda1 PyRun_FileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdfa4 PyRun_SimpleFileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ec1a9e Py_Main
�[2m�[36m(pid=22153)�[0m @ 0x55db24d894be main
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3cd85830 __libc_start_main
�[2m�[36m(pid=22153)�[0m @ 0x55db24e70773 (unknown)
Traceback (most recent call last):
File "workloads/node_failures.py", line 57, in <module>
cluster.add_node()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 115, in add_node
self._wait_for_node(node)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 165, in _wait_for_node
raise TimeoutError("Timed out while waiting for nodes to join.")
TimeoutError: Timed out while waiting for nodes to join.
�[2m�[33m(pid=raylet)�[0m E0429 02:32:42.965368 13125 process.cc:274] Failed to wait for process 13136 with error system:10: No child processes
�[2m�[33m(pid=raylet)�[0m E0429 02:32:43.045863 1167 process.cc:274] Failed to wait for process 1178 with error system:10: No child processes
2020-04-29 02:32:43,942 ERROR import_thread.py:93 -- ImportThread: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:996 -- print_logs: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:1096 -- listen_error_messages_raylet: Connection closed by server.
E0429 02:32:45.999132 22870 raylet_client.cc:90] IOError: [RayletClient] Connection closed unexpectedly. [RayletClient] Failed to disconnect from raylet.
|
TimeoutError
|
def start_raylet(self, use_valgrind=False, use_profiler=False):
"""Start the raylet.
Args:
use_valgrind (bool): True if we should start the process in
valgrind.
use_profiler (bool): True if we should start the process in the
valgrind profiler.
"""
stdout_file, stderr_file = self.new_log_files("raylet")
process_info = ray.services.start_raylet(
self._redis_address,
self._node_ip_address,
self._ray_params.node_manager_port,
self._raylet_socket_name,
self._plasma_store_socket_name,
self._ray_params.worker_path,
self._temp_dir,
self._session_dir,
self.get_resource_spec(),
self._ray_params.min_worker_port,
self._ray_params.max_worker_port,
self._ray_params.object_manager_port,
self._ray_params.redis_password,
use_valgrind=use_valgrind,
use_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file,
config=self._config,
include_java=self._ray_params.include_java,
java_worker_options=self._ray_params.java_worker_options,
load_code_from_local=self._ray_params.load_code_from_local,
fate_share=self.kernel_fate_share,
socket_to_use=self.socket,
)
assert ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info]
|
def start_raylet(self, use_valgrind=False, use_profiler=False):
"""Start the raylet.
Args:
use_valgrind (bool): True if we should start the process in
valgrind.
use_profiler (bool): True if we should start the process in the
valgrind profiler.
"""
stdout_file, stderr_file = self.new_log_files("raylet")
process_info = ray.services.start_raylet(
self._redis_address,
self._node_ip_address,
self._ray_params.node_manager_port,
self._raylet_socket_name,
self._plasma_store_socket_name,
self._ray_params.worker_path,
self._temp_dir,
self._session_dir,
self.get_resource_spec(),
self._ray_params.min_worker_port,
self._ray_params.max_worker_port,
self._ray_params.object_manager_port,
self._ray_params.redis_password,
use_valgrind=use_valgrind,
use_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file,
config=self._config,
include_java=self._ray_params.include_java,
java_worker_options=self._ray_params.java_worker_options,
load_code_from_local=self._ray_params.load_code_from_local,
fate_share=self.kernel_fate_share,
)
assert ray_constants.PROCESS_TYPE_RAYLET not in self.all_processes
self.all_processes[ray_constants.PROCESS_TYPE_RAYLET] = [process_info]
|
https://github.com/ray-project/ray/issues/8254
|
�[2m�[33m(pid=raylet)�[0m E0429 02:32:06.263886 22036 process.cc:274] Failed to wait for process 22047 with error system:10: No child processes
E0429 02:32:12.346844 23272 task_manager.cc:288] 3 retries left for task b48f33dc1265b526ffffffff0100, attempting to resubmit.
E0429 02:32:12.346899 23272 core_worker.cc:373] Will resubmit task after a 5000ms delay: Type=NORMAL_TASK, Language=PYTHON, function_descriptor={type=PythonFunctionDescriptor, module_name=__main__, class_name=, function_name=f, function_hash=7d2c6c88e5e801d48a350076f2117e717fe12224}, task_id=b48f33dc1265b526ffffffff0100, job_id=0100, num_args=2, num_returns=1
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.347446 22089 process.cc:274] Failed to wait for process 22100 with error system:10: No child processes
2020-04-29 02:32:12,653 INFO resource_spec.py:212 -- Starting Ray with 27.88 GiB memory available for workers and up to 0.15 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.732946757 22142 server_chttp2.cc:40] {"created":"@1588127532.732848116","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1588127532.732846227","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_posix.cc","file_line":341,"referenced_errors":[{"created":"@1588127532.732832876","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732823689","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]},{"created":"@1588127532.732845812","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732843382","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]}]}]}
�[2m�[33m(pid=raylet)�[0m *** Aborted at 1588127532 (unix time) try "date -d @1588127532" if you are using GNU date ***
�[2m�[33m(pid=raylet)�[0m PC: @ 0x0 (unknown)
�[2m�[33m(pid=raylet)�[0m *** SIGSEGV (@0x58) received by PID 22142 (TID 0x7fc3a66d37c0) from PID 88; stack trace: ***
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5c32390 (unknown)
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3957692 grpc::ServerInterface::RegisteredAsyncRequest::IssueRequest()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35b2149 ray::rpc::NodeManagerService::WithAsyncMethod_RequestWorkerLease<>::RequestRequestWorkerLease()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35c7b1b ray::rpc::ServerCallFactoryImpl<>::CreateCall()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e380bfe1 ray::rpc::GrpcServer::Run()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3629acc ray::raylet::NodeManager::NodeManager()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35cbc07 ray::raylet::Raylet::Raylet()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e359848d main
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5459830 __libc_start_main
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35a9391 (unknown)
�[2m�[36m(pid=22153)�[0m E0429 02:32:13.864451 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 1, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.364712 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 2, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.864863 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 3, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:15.365000 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 4, num_retries = 5)
�[2m�[36m(pid=22153)�[0m F0429 02:32:15.865115 22153 raylet_client.cc:78] Could not connect to socket /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8
�[2m�[36m(pid=22153)�[0m *** Check failure stack trace: ***
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b40ed google::LogMessage::Fail()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b555c google::LogMessage::SendToLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3dc9 google::LogMessage::Flush()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3fe1 google::LogMessage::~LogMessage()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b03bb39 ray::RayLog::~RayLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55133 ray::raylet::RayletConnection::RayletConnection()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55abf ray::raylet::RayletClient::RayletClient()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf513b ray::CoreWorker::CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8984 ray::CoreWorkerProcess::CreateWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8efb ray::CoreWorkerProcess::CoreWorkerProcess()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf93fb ray::CoreWorkerProcess::Initialize()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6c06c __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6d155 __pyx_tp_new_3ray_7_raylet_CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47965 type_call
�[2m�[36m(pid=22153)�[0m @ 0x55db24db7d7b _PyObject_FastCallDict
�[2m�[36m(pid=22153)�[0m @ 0x55db24e477ce call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e69cba _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e40dae _PyEval_EvalCodeWithName
�[2m�[36m(pid=22153)�[0m @ 0x55db24e41941 fast_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47755 call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e6aa7a _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e42459 PyEval_EvalCodeEx
�[2m�[36m(pid=22153)�[0m @ 0x55db24e431ec PyEval_EvalCode
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebd9a4 run_mod
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdda1 PyRun_FileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdfa4 PyRun_SimpleFileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ec1a9e Py_Main
�[2m�[36m(pid=22153)�[0m @ 0x55db24d894be main
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3cd85830 __libc_start_main
�[2m�[36m(pid=22153)�[0m @ 0x55db24e70773 (unknown)
Traceback (most recent call last):
File "workloads/node_failures.py", line 57, in <module>
cluster.add_node()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 115, in add_node
self._wait_for_node(node)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 165, in _wait_for_node
raise TimeoutError("Timed out while waiting for nodes to join.")
TimeoutError: Timed out while waiting for nodes to join.
�[2m�[33m(pid=raylet)�[0m E0429 02:32:42.965368 13125 process.cc:274] Failed to wait for process 13136 with error system:10: No child processes
�[2m�[33m(pid=raylet)�[0m E0429 02:32:43.045863 1167 process.cc:274] Failed to wait for process 1178 with error system:10: No child processes
2020-04-29 02:32:43,942 ERROR import_thread.py:93 -- ImportThread: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:996 -- print_logs: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:1096 -- listen_error_messages_raylet: Connection closed by server.
E0429 02:32:45.999132 22870 raylet_client.cc:90] IOError: [RayletClient] Connection closed unexpectedly. [RayletClient] Failed to disconnect from raylet.
|
TimeoutError
|
def start_raylet(
redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
resource_spec,
min_worker_port=None,
max_worker_port=None,
object_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
fate_share=None,
socket_to_use=None,
):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
min_worker_port (int): The lowest port number that workers will bind
on. If not set, random ports will be chosen.
max_worker_port (int): The highest port number that workers will bind
on. If set, min_worker_port must also be set.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (list): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static)
)
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()]
)
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
default_cp = os.pathsep.join(DEFAULT_JAVA_WORKER_CLASSPATH)
java_worker_command = build_java_worker_command(
json.loads(java_worker_options)
if java_worker_options
else ["-classpath", default_cp],
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
java_worker_command = []
# Create the command that the Raylet will use to start workers.
start_worker_command = [
sys.executable,
worker_path,
"--node-ip-address={}".format(node_ip_address),
"--node-manager-port={}".format(node_manager_port),
"--object-store-name={}".format(plasma_store_name),
"--raylet-name={}".format(raylet_name),
"--redis-address={}".format(redis_address),
"--config-list={}".format(config_str),
"--temp-dir={}".format(temp_dir),
]
if redis_password:
start_worker_command += ["--redis-password={}".format(redis_password)]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if min_worker_port is None:
min_worker_port = 0
if max_worker_port is None:
max_worker_port = 0
if load_code_from_local:
start_worker_command += ["--load-code-from-local"]
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--min_worker_port={}".format(min_worker_port),
"--max_worker_port={}".format(max_worker_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(
subprocess.list2cmdline(start_worker_command)
),
"--java_worker_command={}".format(subprocess.list2cmdline(java_worker_command)),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
"--session_dir={}".format(session_dir),
]
if socket_to_use:
socket_to_use.close()
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
|
def start_raylet(
redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
resource_spec,
min_worker_port=None,
max_worker_port=None,
object_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False,
fate_share=None,
):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
min_worker_port (int): The lowest port number that workers will bind
on. If not set, random ports will be chosen.
max_worker_port (int): The highest port number that workers will bind
on. If set, min_worker_port must also be set.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (list): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static)
)
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()]
)
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
default_cp = os.pathsep.join(DEFAULT_JAVA_WORKER_CLASSPATH)
java_worker_command = build_java_worker_command(
json.loads(java_worker_options)
if java_worker_options
else ["-classpath", default_cp],
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
java_worker_command = []
# Create the command that the Raylet will use to start workers.
start_worker_command = [
sys.executable,
worker_path,
"--node-ip-address={}".format(node_ip_address),
"--node-manager-port={}".format(node_manager_port),
"--object-store-name={}".format(plasma_store_name),
"--raylet-name={}".format(raylet_name),
"--redis-address={}".format(redis_address),
"--config-list={}".format(config_str),
"--temp-dir={}".format(temp_dir),
]
if redis_password:
start_worker_command += ["--redis-password={}".format(redis_password)]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if min_worker_port is None:
min_worker_port = 0
if max_worker_port is None:
max_worker_port = 0
if load_code_from_local:
start_worker_command += ["--load-code-from-local"]
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--min_worker_port={}".format(min_worker_port),
"--max_worker_port={}".format(max_worker_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(
subprocess.list2cmdline(start_worker_command)
),
"--java_worker_command={}".format(subprocess.list2cmdline(java_worker_command)),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
"--session_dir={}".format(session_dir),
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
|
https://github.com/ray-project/ray/issues/8254
|
�[2m�[33m(pid=raylet)�[0m E0429 02:32:06.263886 22036 process.cc:274] Failed to wait for process 22047 with error system:10: No child processes
E0429 02:32:12.346844 23272 task_manager.cc:288] 3 retries left for task b48f33dc1265b526ffffffff0100, attempting to resubmit.
E0429 02:32:12.346899 23272 core_worker.cc:373] Will resubmit task after a 5000ms delay: Type=NORMAL_TASK, Language=PYTHON, function_descriptor={type=PythonFunctionDescriptor, module_name=__main__, class_name=, function_name=f, function_hash=7d2c6c88e5e801d48a350076f2117e717fe12224}, task_id=b48f33dc1265b526ffffffff0100, job_id=0100, num_args=2, num_returns=1
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.347446 22089 process.cc:274] Failed to wait for process 22100 with error system:10: No child processes
2020-04-29 02:32:12,653 INFO resource_spec.py:212 -- Starting Ray with 27.88 GiB memory available for workers and up to 0.15 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
�[2m�[33m(pid=raylet)�[0m E0429 02:32:12.732946757 22142 server_chttp2.cc:40] {"created":"@1588127532.732848116","description":"No address added out of total 1 resolved","file":"external/com_github_grpc_grpc/src/core/ext/transport/chttp2/server/chttp2_server.cc","file_line":394,"referenced_errors":[{"created":"@1588127532.732846227","description":"Failed to add any wildcard listeners","file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_posix.cc","file_line":341,"referenced_errors":[{"created":"@1588127532.732832876","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732823689","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]},{"created":"@1588127532.732845812","description":"Unable to configure socket","fd":44,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":208,"referenced_errors":[{"created":"@1588127532.732843382","description":"Address already in use","errno":98,"file":"external/com_github_grpc_grpc/src/core/lib/iomgr/tcp_server_utils_posix_common.cc","file_line":181,"os_error":"Address already in use","syscall":"bind"}]}]}]}
�[2m�[33m(pid=raylet)�[0m *** Aborted at 1588127532 (unix time) try "date -d @1588127532" if you are using GNU date ***
�[2m�[33m(pid=raylet)�[0m PC: @ 0x0 (unknown)
�[2m�[33m(pid=raylet)�[0m *** SIGSEGV (@0x58) received by PID 22142 (TID 0x7fc3a66d37c0) from PID 88; stack trace: ***
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5c32390 (unknown)
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3957692 grpc::ServerInterface::RegisteredAsyncRequest::IssueRequest()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35b2149 ray::rpc::NodeManagerService::WithAsyncMethod_RequestWorkerLease<>::RequestRequestWorkerLease()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35c7b1b ray::rpc::ServerCallFactoryImpl<>::CreateCall()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e380bfe1 ray::rpc::GrpcServer::Run()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e3629acc ray::raylet::NodeManager::NodeManager()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35cbc07 ray::raylet::Raylet::Raylet()
�[2m�[33m(pid=raylet)�[0m @ 0x5596e359848d main
�[2m�[33m(pid=raylet)�[0m @ 0x7fc3a5459830 __libc_start_main
�[2m�[33m(pid=raylet)�[0m @ 0x5596e35a9391 (unknown)
�[2m�[36m(pid=22153)�[0m E0429 02:32:13.864451 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 1, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.364712 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 2, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:14.864863 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 3, num_retries = 5)
�[2m�[36m(pid=22153)�[0m E0429 02:32:15.365000 22153 raylet_client.cc:69] Retrying to connect to socket for pathname /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8 (num_attempts = 4, num_retries = 5)
�[2m�[36m(pid=22153)�[0m F0429 02:32:15.865115 22153 raylet_client.cc:78] Could not connect to socket /tmp/ray/session_2020-04-28_20-19-44_770473_22870/sockets/raylet.8
�[2m�[36m(pid=22153)�[0m *** Check failure stack trace: ***
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b40ed google::LogMessage::Fail()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b555c google::LogMessage::SendToLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3dc9 google::LogMessage::Flush()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b2b3fe1 google::LogMessage::~LogMessage()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3b03bb39 ray::RayLog::~RayLog()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55133 ray::raylet::RayletConnection::RayletConnection()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ae55abf ray::raylet::RayletClient::RayletClient()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf513b ray::CoreWorker::CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8984 ray::CoreWorkerProcess::CreateWorker()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf8efb ray::CoreWorkerProcess::CoreWorkerProcess()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3adf93fb ray::CoreWorkerProcess::Initialize()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6c06c __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3ad6d155 __pyx_tp_new_3ray_7_raylet_CoreWorker()
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47965 type_call
�[2m�[36m(pid=22153)�[0m @ 0x55db24db7d7b _PyObject_FastCallDict
�[2m�[36m(pid=22153)�[0m @ 0x55db24e477ce call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e69cba _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e40dae _PyEval_EvalCodeWithName
�[2m�[36m(pid=22153)�[0m @ 0x55db24e41941 fast_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e47755 call_function
�[2m�[36m(pid=22153)�[0m @ 0x55db24e6aa7a _PyEval_EvalFrameDefault
�[2m�[36m(pid=22153)�[0m @ 0x55db24e42459 PyEval_EvalCodeEx
�[2m�[36m(pid=22153)�[0m @ 0x55db24e431ec PyEval_EvalCode
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebd9a4 run_mod
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdda1 PyRun_FileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ebdfa4 PyRun_SimpleFileExFlags
�[2m�[36m(pid=22153)�[0m @ 0x55db24ec1a9e Py_Main
�[2m�[36m(pid=22153)�[0m @ 0x55db24d894be main
�[2m�[36m(pid=22153)�[0m @ 0x7f5d3cd85830 __libc_start_main
�[2m�[36m(pid=22153)�[0m @ 0x55db24e70773 (unknown)
Traceback (most recent call last):
File "workloads/node_failures.py", line 57, in <module>
cluster.add_node()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 115, in add_node
self._wait_for_node(node)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/cluster_utils.py", line 165, in _wait_for_node
raise TimeoutError("Timed out while waiting for nodes to join.")
TimeoutError: Timed out while waiting for nodes to join.
�[2m�[33m(pid=raylet)�[0m E0429 02:32:42.965368 13125 process.cc:274] Failed to wait for process 13136 with error system:10: No child processes
�[2m�[33m(pid=raylet)�[0m E0429 02:32:43.045863 1167 process.cc:274] Failed to wait for process 1178 with error system:10: No child processes
2020-04-29 02:32:43,942 ERROR import_thread.py:93 -- ImportThread: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:996 -- print_logs: Connection closed by server.
2020-04-29 02:32:43,942 ERROR worker.py:1096 -- listen_error_messages_raylet: Connection closed by server.
E0429 02:32:45.999132 22870 raylet_client.cc:90] IOError: [RayletClient] Connection closed unexpectedly. [RayletClient] Failed to disconnect from raylet.
|
TimeoutError
|
def checkpoint(self, force=False):
"""Saves execution state to `self._local_checkpoint_dir`.
Overwrites the current session checkpoint, which starts when self
is instantiated. Throttle depends on self._checkpoint_period.
Args:
force (bool): Forces a checkpoint despite checkpoint_period.
"""
if not self._local_checkpoint_dir:
return
now = time.time()
if now - self._last_checkpoint_time < self._checkpoint_period and (not force):
return
self._last_checkpoint_time = now
runner_state = {
"checkpoints": list(self.trial_executor.get_checkpoints().values()),
"runner_data": self.__getstate__(),
"stats": {
"start_time": self._start_time,
"timestamp": self._last_checkpoint_time,
},
}
tmp_file_name = os.path.join(self._local_checkpoint_dir, ".tmp_checkpoint")
with open(tmp_file_name, "w") as f:
json.dump(runner_state, f, indent=2, cls=_TuneFunctionEncoder)
os.replace(tmp_file_name, self.checkpoint_file)
if force:
self._syncer.sync_up()
else:
self._syncer.sync_up_if_needed()
return self._local_checkpoint_dir
|
def checkpoint(self, force=False):
"""Saves execution state to `self._local_checkpoint_dir`.
Overwrites the current session checkpoint, which starts when self
is instantiated. Throttle depends on self._checkpoint_period.
Args:
force (bool): Forces a checkpoint despite checkpoint_period.
"""
if not self._local_checkpoint_dir:
return
now = time.time()
if now - self._last_checkpoint_time < self._checkpoint_period and (not force):
return
self._last_checkpoint_time = now
runner_state = {
"checkpoints": list(self.trial_executor.get_checkpoints().values()),
"runner_data": self.__getstate__(),
"stats": {
"start_time": self._start_time,
"timestamp": self._last_checkpoint_time,
},
}
tmp_file_name = os.path.join(self._local_checkpoint_dir, ".tmp_checkpoint")
with open(tmp_file_name, "w") as f:
json.dump(runner_state, f, indent=2, cls=_TuneFunctionEncoder)
os.rename(tmp_file_name, self.checkpoint_file)
if force:
self._syncer.sync_up()
else:
self._syncer.sync_up_if_needed()
return self._local_checkpoint_dir
|
https://github.com/ray-project/ray/issues/9128
|
...\envs\ray\python.exe <project-dir>/train_a2c.py PongNoFrameskip-v4 --gpus=0
2020-06-24 16:06:40,329 INFO resource_spec.py:212 -- Starting Ray with 9.67 GiB memory available for workers and up to 4.86 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-06-24 16:06:41,104 INFO services.py:1165 -- View the Ray dashboard at localhost:8265
2020-06-24 16:06:42,789 WARNING worker.py:1047 -- The dashboard on node Julius-Desktop failed with the following error:
Traceback (most recent call last):
File "...\envs\ray\lib\site-packages\ray\dashboard/dashboard.py", line 960, in <module>
metrics_export_address=metrics_export_address)
File "...\envs\ray\lib\site-packages\ray\dashboard/dashboard.py", line 513, in __init__
build_dir = setup_static_dir(self.app)
File "...\envs\ray\lib\site-packages\ray\dashboard/dashboard.py", line 414, in setup_static_dir
"&& npm run build)", build_dir)
FileNotFoundError: [Errno 2] Dashboard build directory not found. If installing from source, please follow the additional steps required to build the dashboard(cd python/ray/dashboard/client && npm ci && npm run build): 'C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\dashboard\\client/build'
2020-06-24 16:06:44,532 ERROR syncer.py:46 -- Log sync requires rsync to be installed.
== Status ==
Memory usage on this node: 16.4/31.9 GiB
Using FIFO scheduling algorithm.
Resources requested: 6/6 CPUs, 0/0 GPUs, 0.0/9.67 GiB heap, 0.0/3.32 GiB objects
Result logdir: <project-dir>\results\A2C-Atari
Number of trials: 1 (1 RUNNING)
+------------------------------------+----------+-------+
| Trial name | status | loc |
|------------------------------------+----------+-------|
| A2C_PongNoFrameskip-v4_3a054_00000 | RUNNING | |
+------------------------------------+----------+-------+
(pid=21072) 2020-06-24 16:06:47,281 WARNING deprecation.py:30 -- DeprecationWarning: `use_pytorch` has been deprecated. Use `framework=torch` instead. This will raise an error in the future!
(pid=21072) 2020-06-24 16:06:47,281 INFO trainer.py:612 -- Current log_level is WARN. For more information, set 'log_level': 'INFO' / 'DEBUG' or use the -v and -vv flags.
(pid=21072) 2020-06-24 16:06:51,895 WARNING util.py:37 -- Install gputil for GPU system monitoring.
(pid=3444) ..\torch\csrc\utils\tensor_numpy.cpp:141: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program.
(pid=17404) ..\torch\csrc\utils\tensor_numpy.cpp:141: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program.
(pid=11736) ..\torch\csrc\utils\tensor_numpy.cpp:141: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program.
(pid=10920) ..\torch\csrc\utils\tensor_numpy.cpp:141: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program.
(pid=8356) ..\torch\csrc\utils\tensor_numpy.cpp:141: UserWarning: The given NumPy array is not writeable, and PyTorch does not support non-writeable tensors. This means you can write to the underlying (supposedly non-writeable) NumPy array using the tensor. You may want to copy the array to protect its data or make it writeable before converting it to a tensor. This type of warning will be suppressed for the rest of this program.
Result for A2C_PongNoFrameskip-v4_3a054_00000:
custom_metrics: {}
date: 2020-06-24_16-07-07
done: false
episode_len_mean: .nan
episode_reward_max: .nan
episode_reward_mean: .nan
episode_reward_min: .nan
episodes_this_iter: 0
episodes_total: 0
experiment_id: 64b5943e39e94245b0e446eb0c7c7a58
experiment_tag: '0'
hostname: Desktop
info:
learner:
default_policy:
allreduce_latency: 0.0
grad_gnorm: 3233.239013671875
policy_entropy: 0.00603322172537446
policy_loss: -0.6445010304450989
vf_loss: 4.446309566497803
num_steps_sampled: 500
num_steps_trained: 500
iterations_since_restore: 1
node_ip: 10.0.0.18
num_healthy_workers: 5
off_policy_estimator: {}
perf:
cpu_util_percent: 86.59999999999998
ram_util_percent: 63.64782608695651
pid: 21072
policy_reward_max: {}
policy_reward_mean: {}
policy_reward_min: {}
sampler_perf: {}
time_since_restore: 15.717210054397583
time_this_iter_s: 15.717210054397583
time_total_s: 15.717210054397583
timers:
learn_throughput: 313.668
learn_time_ms: 1594.043
sample_throughput: 35.436
sample_time_ms: 14109.902
update_time_ms: 6.017
timestamp: 1593029227
timesteps_since_restore: 0
timesteps_total: 500
training_iteration: 1
trial_id: 3a054_00000
2020-06-24 16:07:07,624 ERROR trial_runner.py:520 -- Trial A2C_PongNoFrameskip-v4_3a054_00000: Error processing event.
Traceback (most recent call last):
File "...\envs\ray\lib\site-packages\ray\tune\trial_runner.py", line 503, in _process_trial
result, terminate=(decision == TrialScheduler.STOP))
File "...\envs\ray\lib\site-packages\ray\tune\trial.py", line 479, in update_last_result
self.result_logger.on_result(self.last_result)
File "...\envs\ray\lib\site-packages\ray\tune\logger.py", line 336, in on_result
_logger.on_result(result)
File "...\envs\ray\lib\site-packages\ray\tune\logger.py", line 221, in on_result
elif type(value) in [list, np.ndarray] and len(value) > 0:
TypeError: len() of unsized object
== Status ==
Memory usage on this node: 25.9/31.9 GiB
Using FIFO scheduling algorithm.
Resources requested: 0/6 CPUs, 0/0 GPUs, 0.0/9.67 GiB heap, 0.0/3.32 GiB objects
Result logdir: <project-dir>\results\A2C-Atari
Number of trials: 1 (1 ERROR)
+------------------------------------+----------+-------+--------+------------------+------+----------+
| Trial name | status | loc | iter | total time (s) | ts | reward |
|------------------------------------+----------+-------+--------+------------------+------+----------|
| A2C_PongNoFrameskip-v4_3a054_00000 | ERROR | | 1 | 15.7172 | 500 | nan |
+------------------------------------+----------+-------+--------+------------------+------+----------+
Number of errored trials: 1
+------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------+
| Trial name | # failures | error file |
|------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------|
| A2C_PongNoFrameskip-v4_3a054_00000 | 1 | <project-dir>\results\A2C-Atari\A2C_PongNoFrameskip-v4_0_2020-06-24_16-06-44r41b4z_6\error.txt |
+------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------+
2020-06-24 16:07:07,631 ERROR tune.py:334 -- Trial Runner checkpointing failed.
Traceback (most recent call last):
File "...\envs\ray\lib\site-packages\ray\tune\tune.py", line 332, in run
runner.checkpoint(force=True)
File "...\envs\ray\lib\site-packages\ray\tune\trial_runner.py", line 279, in checkpoint
os.rename(tmp_file_name, self.checkpoint_file)
FileExistsError: [WinError 183] Cannot create a file when that file already exists: 'C:\\Users\\Julius\\Documents\\GitHub\\cfrl-rllib\\results\\A2C-Atari\\.tmp_checkpoint' -> 'C:\\Users\\Julius\\Documents\\GitHub\\cfrl-rllib\\results\\A2C-Atari\\experiment_state-2020-06-24_16-06-44.json'
Traceback (most recent call last):
File "<project-dir>/train_a2c.py", line 46, in <module>
main()
File "<project-dir>/train_a2c.py", line 40, in main
== Status ==
'use_pytorch': True,
Memory usage on this node: 25.9/31.9 GiB
File "...\envs\ray\lib\site-packages\ray\tune\tune.py", line 349, in run
raise TuneError("Trials did not complete", incomplete_trials)
ray.tune.error.TuneError: ('Trials did not complete', [A2C_PongNoFrameskip-v4_3a054_00000])
Using FIFO scheduling algorithm.
Resources requested: 0/6 CPUs, 0/0 GPUs, 0.0/9.67 GiB heap, 0.0/3.32 GiB objects
Result logdir: <project-dir>\results\A2C-Atari
Number of trials: 1 (1 ERROR)
+------------------------------------+----------+-------+--------+------------------+------+----------+
| Trial name | status | loc | iter | total time (s) | ts | reward |
|------------------------------------+----------+-------+--------+------------------+------+----------|
| A2C_PongNoFrameskip-v4_3a054_00000 | ERROR | | 1 | 15.7172 | 500 | nan |
+------------------------------------+----------+-------+--------+------------------+------+----------+
Number of errored trials: 1
+------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------+
| Trial name | # failures | error file |
|------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------|
| A2C_PongNoFrameskip-v4_3a054_00000 | 1 | <project-dir>\results\A2C-Atari\A2C_PongNoFrameskip-v4_0_2020-06-24_16-06-44r41b4z_6\error.txt |
+------------------------------------+--------------+------------------------------------------------------------------------------------------------------------------------------+
(pid=21072) F0624 16:07:07.635511 21072 22432 redis_async_context.cc:57] Check failed: redis_async_context_ redis_async_context_ must not be NULL here
(pid=21072) *** Check failure stack trace: ***
(pid=21072) @ 00007FFE72593A8C public: __cdecl google::LogMessage::~LogMessage(void) __ptr64
(pid=21072) @ 00007FFE72408954 public: virtual __cdecl google::NullStreamFatal::~NullStreamFatal(void) __ptr64
(pid=21072) @ 00007FFE727DC1A0 bool __cdecl google::Symbolize(void * __ptr64,char * __ptr64,int)
(pid=21072) @ 00007FFE727C33E8 bool __cdecl google::Symbolize(void * __ptr64,char * __ptr64,int)
(pid=21072) @ 00007FFE727C3FC9 bool __cdecl google::Symbolize(void * __ptr64,char * __ptr64,int)
(pid=21072) @ 00007FFE724269B0 public: void __cdecl google::NullStreamFatal::`vbase destructor'(void) __ptr64
(pid=21072) @ 00007FFE72420550 public: void __cdecl google::NullStreamFatal::`vbase destructor'(void) __ptr64
(pid=21072) @ 00007FFE7242049B public: void __cdecl google::NullStreamFatal::`vbase destructor'(void) __ptr64
(pid=21072) @ 00007FFE723B2D81 public: class google::LogMessageVoidify & __ptr64 __cdecl google::LogMessageVoidify::operator=(class google::LogMessageVoidify const & __ptr64) __ptr64
(pid=21072) @ 00007FFE7237D439 public: class google::LogMessageVoidify & __ptr64 __cdecl google::LogMessageVoidify::operator=(class google::LogMessageVoidify const & __ptr64) __ptr64
(pid=21072) @ 00007FFEEF0B0E82 _beginthreadex
(pid=21072) @ 00007FFEEF927BD4 BaseThreadInitThunk
(pid=21072) @ 00007FFEF18CCE51 RtlUserThreadStart
Process finished with exit code 1
|
FileNotFoundError
|
def __init__(self, obs_space, action_space, config):
_validate(obs_space, action_space)
config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
self.framework = "torch"
super().__init__(obs_space, action_space, config)
self.n_agents = len(obs_space.original_space.spaces)
self.n_actions = action_space.spaces[0].n
self.h_size = config["model"]["lstm_cell_size"]
self.has_env_global_state = False
self.has_action_mask = False
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
agent_obs_space = obs_space.original_space.spaces[0]
if isinstance(agent_obs_space, Dict):
space_keys = set(agent_obs_space.spaces.keys())
if "obs" not in space_keys:
raise ValueError("Dict obs space must have subspace labeled `obs`")
self.obs_size = _get_size(agent_obs_space.spaces["obs"])
if "action_mask" in space_keys:
mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
if mask_shape != (self.n_actions,):
raise ValueError(
"Action mask shape must be {}, got {}".format(
(self.n_actions,), mask_shape
)
)
self.has_action_mask = True
if ENV_STATE in space_keys:
self.env_global_state_shape = _get_size(agent_obs_space.spaces[ENV_STATE])
self.has_env_global_state = True
else:
self.env_global_state_shape = (self.obs_size, self.n_agents)
# The real agent obs space is nested inside the dict
config["model"]["full_obs_space"] = agent_obs_space
agent_obs_space = agent_obs_space.spaces["obs"]
else:
self.obs_size = _get_size(agent_obs_space)
self.env_global_state_shape = (self.obs_size, self.n_agents)
self.model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="model",
default_model=RNNModel,
).to(self.device)
self.target_model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="target_model",
default_model=RNNModel,
).to(self.device)
self.exploration = self._create_exploration()
# Setup the mixer network.
if config["mixer"] is None:
self.mixer = None
self.target_mixer = None
elif config["mixer"] == "qmix":
self.mixer = QMixer(
self.n_agents, self.env_global_state_shape, config["mixing_embed_dim"]
).to(self.device)
self.target_mixer = QMixer(
self.n_agents, self.env_global_state_shape, config["mixing_embed_dim"]
).to(self.device)
elif config["mixer"] == "vdn":
self.mixer = VDNMixer().to(self.device)
self.target_mixer = VDNMixer().to(self.device)
else:
raise ValueError("Unknown mixer type {}".format(config["mixer"]))
self.cur_epsilon = 1.0
self.update_target() # initial sync
# Setup optimizer
self.params = list(self.model.parameters())
if self.mixer:
self.params += list(self.mixer.parameters())
self.loss = QMixLoss(
self.model,
self.target_model,
self.mixer,
self.target_mixer,
self.n_agents,
self.n_actions,
self.config["double_q"],
self.config["gamma"],
)
from torch.optim import RMSprop
self.optimiser = RMSprop(
params=self.params,
lr=config["lr"],
alpha=config["optim_alpha"],
eps=config["optim_eps"],
)
|
def __init__(self, obs_space, action_space, config):
_validate(obs_space, action_space)
config = dict(ray.rllib.agents.qmix.qmix.DEFAULT_CONFIG, **config)
self.framework = "torch"
super().__init__(obs_space, action_space, config)
self.n_agents = len(obs_space.original_space.spaces)
self.n_actions = action_space.spaces[0].n
self.h_size = config["model"]["lstm_cell_size"]
self.has_env_global_state = False
self.has_action_mask = False
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
agent_obs_space = obs_space.original_space.spaces[0]
if isinstance(agent_obs_space, Dict):
space_keys = set(agent_obs_space.spaces.keys())
if "obs" not in space_keys:
raise ValueError("Dict obs space must have subspace labeled `obs`")
self.obs_size = _get_size(agent_obs_space.spaces["obs"])
if "action_mask" in space_keys:
mask_shape = tuple(agent_obs_space.spaces["action_mask"].shape)
if mask_shape != (self.n_actions,):
raise ValueError(
"Action mask shape must be {}, got {}".format(
(self.n_actions,), mask_shape
)
)
self.has_action_mask = True
if ENV_STATE in space_keys:
self.env_global_state_shape = _get_size(agent_obs_space.spaces[ENV_STATE])
self.has_env_global_state = True
else:
self.env_global_state_shape = (self.obs_size, self.n_agents)
# The real agent obs space is nested inside the dict
config["model"]["full_obs_space"] = agent_obs_space
agent_obs_space = agent_obs_space.spaces["obs"]
else:
self.obs_size = _get_size(agent_obs_space)
self.model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="model",
default_model=RNNModel,
).to(self.device)
self.target_model = ModelCatalog.get_model_v2(
agent_obs_space,
action_space.spaces[0],
self.n_actions,
config["model"],
framework="torch",
name="target_model",
default_model=RNNModel,
).to(self.device)
self.exploration = self._create_exploration()
# Setup the mixer network.
if config["mixer"] is None:
self.mixer = None
self.target_mixer = None
elif config["mixer"] == "qmix":
self.mixer = QMixer(
self.n_agents, self.env_global_state_shape, config["mixing_embed_dim"]
).to(self.device)
self.target_mixer = QMixer(
self.n_agents, self.env_global_state_shape, config["mixing_embed_dim"]
).to(self.device)
elif config["mixer"] == "vdn":
self.mixer = VDNMixer().to(self.device)
self.target_mixer = VDNMixer().to(self.device)
else:
raise ValueError("Unknown mixer type {}".format(config["mixer"]))
self.cur_epsilon = 1.0
self.update_target() # initial sync
# Setup optimizer
self.params = list(self.model.parameters())
if self.mixer:
self.params += list(self.mixer.parameters())
self.loss = QMixLoss(
self.model,
self.target_model,
self.mixer,
self.target_mixer,
self.n_agents,
self.n_actions,
self.config["double_q"],
self.config["gamma"],
)
from torch.optim import RMSprop
self.optimiser = RMSprop(
params=self.params,
lr=config["lr"],
alpha=config["optim_alpha"],
eps=config["optim_eps"],
)
|
https://github.com/ray-project/ray/issues/8714
|
Failure # 1 (occurred at 2020-06-01_14-46-39)
Traceback (most recent call last):
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 467, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/worker.py", line 1522, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): �[36mray::QMIX.train()�[39m (pid=3884, ip=10.0.2.15)
File "python/ray/_raylet.pyx", line 421, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 456, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 459, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 460, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 414, in ray._raylet.execute_task.function_executor
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 90, in __init__
Trainer.__init__(self, config, env, logger_creator)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 444, in __init__
super().__init__(config, logger_creator)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/tune/trainable.py", line 174, in __init__
self._setup(copy.deepcopy(self.config))
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 613, in _setup
self._init(self.config, self.env_creator)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 115, in _init
self.config["num_workers"])
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 684, in _make_workers
logdir=self.logdir)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/evaluation/worker_set.py", line 59, in __init__
RolloutWorker, env_creator, policy, 0, self._local_config)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/evaluation/worker_set.py", line 282, in _make_worker
extra_python_environs=extra_python_environs)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/evaluation/rollout_worker.py", line 393, in __init__
policy_dict, policy_config)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/evaluation/rollout_worker.py", line 932, in _build_policy_map
policy_map[name] = cls(obs_space, act_space, merged_conf)
File "/home/ravi/dev/gym-rl/qmix/lib/python3.6/site-packages/ray/rllib/agents/qmix/qmix_policy.py", line 223, in __init__
self.mixer = QMixer(self.n_agents, self.env_global_state_shape,
AttributeError: 'QMixTorchPolicy' object has no attribute 'env_global_state_shape'
|
AttributeError
|
def recover_if_needed(self, node_id, now):
if not self.can_update(node_id):
return
key = self.provider.internal_ip(node_id)
if key not in self.load_metrics.last_heartbeat_time_by_ip:
self.load_metrics.last_heartbeat_time_by_ip[key] = now
last_heartbeat_time = self.load_metrics.last_heartbeat_time_by_ip[key]
delta = now - last_heartbeat_time
if delta < AUTOSCALER_HEARTBEAT_TIMEOUT_S:
return
logger.warning(
"StandardAutoscaler: "
"{}: No heartbeat in {}s, "
"restarting Ray to recover...".format(node_id, delta)
)
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts={},
initialization_commands=[],
setup_commands=[],
ray_start_commands=with_head_node_ip(self.config["worker_start_ray_commands"]),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True,
docker_config=self.config["docker"],
)
updater.start()
self.updaters[node_id] = updater
|
def recover_if_needed(self, node_id, now):
if not self.can_update(node_id):
return
key = self.provider.internal_ip(node_id)
if key not in self.load_metrics.last_heartbeat_time_by_ip:
self.load_metrics.last_heartbeat_time_by_ip[key] = now
last_heartbeat_time = self.load_metrics.last_heartbeat_time_by_ip[key]
delta = now - last_heartbeat_time
if delta < AUTOSCALER_HEARTBEAT_TIMEOUT_S:
return
logger.warning(
"StandardAutoscaler: "
"{}: No heartbeat in {}s, "
"restarting Ray to recover...".format(node_id, delta)
)
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts={},
initialization_commands=[],
setup_commands=[],
ray_start_commands=with_head_node_ip(self.config["worker_start_ray_commands"]),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True,
)
updater.start()
self.updaters[node_id] = updater
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def spawn_updater(self, node_id, init_commands, ray_start_commands):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts=self.config["file_mounts"],
initialization_commands=with_head_node_ip(
self.config["initialization_commands"]
),
setup_commands=with_head_node_ip(init_commands),
ray_start_commands=with_head_node_ip(ray_start_commands),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True,
docker_config=self.config["docker"],
)
updater.start()
self.updaters[node_id] = updater
|
def spawn_updater(self, node_id, init_commands, ray_start_commands):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=self.config["provider"],
provider=self.provider,
auth_config=self.config["auth"],
cluster_name=self.config["cluster_name"],
file_mounts=self.config["file_mounts"],
initialization_commands=with_head_node_ip(
self.config["initialization_commands"]
),
setup_commands=with_head_node_ip(init_commands),
ray_start_commands=with_head_node_ip(ray_start_commands),
runtime_hash=self.runtime_hash,
process_runner=self.process_runner,
use_internal_ip=True,
)
updater.start()
self.updaters[node_id] = updater
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def kill_node(config_file, yes, hard, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
confirm("This will kill a node in your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER})
node = random.choice(nodes)
logger.info("kill_node: Shutdown worker {}".format(node))
if hard:
provider.terminate_node(node)
else:
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
docker_config=config["docker"],
)
_exec(updater, "ray stop", False, False)
time.sleep(5)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip
|
def kill_node(config_file, yes, hard, override_cluster_name):
"""Kills a random Raylet worker."""
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
confirm("This will kill a node in your cluster", yes)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = provider.non_terminated_nodes({TAG_RAY_NODE_TYPE: NODE_TYPE_WORKER})
node = random.choice(nodes)
logger.info("kill_node: Shutdown worker {}".format(node))
if hard:
provider.terminate_node(node)
else:
updater = NodeUpdaterThread(
node_id=node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
_exec(updater, "ray stop", False, False)
time.sleep(5)
if config.get("provider", {}).get("use_internal_ips", False) is True:
node_ip = provider.internal_ip(node)
else:
node_ip = provider.external_ip(node)
finally:
provider.cleanup()
return node_ip
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def get_or_create_head_node(
config, config_file, no_restart, restart_only, yes, override_cluster_name
):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
config_file = os.path.abspath(config_file)
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if (
head_node is None
or provider.node_tags(head_node).get(TAG_RAY_LAUNCH_CONFIG) != launch_hash
):
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated", yes)
logger.info(
"get_or_create_head_node: "
"Shutting down outdated head node {}".format(head_node)
)
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"]
)
provider.create_node(config["head_node"], head_node_tags, 1)
start = time.time()
head_node = None
while True:
if time.time() - start > 5:
raise RuntimeError("Failed to create head node.")
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) == 1:
head_node = nodes[0]
break
time.sleep(1)
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_config = copy.deepcopy(config)
if config["provider"]["type"] != "kubernetes":
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile("w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update(
{"~/ray_bootstrap_config.yaml": remote_config_file.name}
)
if config["provider"]["type"] != "kubernetes":
config["file_mounts"].update(
{
remote_key_path: config["auth"]["ssh_private_key"],
}
)
if restart_only:
init_commands = []
ray_start_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
ray_start_commands = []
else:
init_commands = config["head_setup_commands"]
ray_start_commands = config["head_start_ray_commands"]
if not no_restart:
warn_about_bad_start_command(ray_start_commands)
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
ray_start_commands=ray_start_commands,
runtime_hash=runtime_hash,
docker_config=config["docker"],
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error(
"get_or_create_head_node: Updating {} failed".format(head_node_ip)
)
sys.exit(1)
logger.info(
"get_or_create_head_node: Head node up-to-date, IP address is: {}".format(
head_node_ip
)
)
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = "docker" in config and bool(config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(quote(override_cluster_name))
else:
modifiers = ""
print(
"To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file,
"--docker " if use_docker else "",
quote(monitor_str),
modifiers,
)
)
print(
"To open a console on the cluster:\n\n ray attach {}{}\n".format(
config_file, modifiers
)
)
print(
"To get a remote shell to the cluster manually, run:\n\n {}\n".format(
updater.cmd_runner.remote_shell_command_str()
)
)
finally:
provider.cleanup()
|
def get_or_create_head_node(
config, config_file, no_restart, restart_only, yes, override_cluster_name
):
"""Create the cluster head node, which in turn creates the workers."""
provider = get_node_provider(config["provider"], config["cluster_name"])
config_file = os.path.abspath(config_file)
try:
head_node_tags = {
TAG_RAY_NODE_TYPE: NODE_TYPE_HEAD,
}
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) > 0:
head_node = nodes[0]
else:
head_node = None
if not head_node:
confirm("This will create a new cluster", yes)
elif not no_restart:
confirm("This will restart cluster services", yes)
launch_hash = hash_launch_conf(config["head_node"], config["auth"])
if (
head_node is None
or provider.node_tags(head_node).get(TAG_RAY_LAUNCH_CONFIG) != launch_hash
):
if head_node is not None:
confirm("Head node config out-of-date. It will be terminated", yes)
logger.info(
"get_or_create_head_node: "
"Shutting down outdated head node {}".format(head_node)
)
provider.terminate_node(head_node)
logger.info("get_or_create_head_node: Launching new head node...")
head_node_tags[TAG_RAY_LAUNCH_CONFIG] = launch_hash
head_node_tags[TAG_RAY_NODE_NAME] = "ray-{}-head".format(
config["cluster_name"]
)
provider.create_node(config["head_node"], head_node_tags, 1)
start = time.time()
head_node = None
while True:
if time.time() - start > 5:
raise RuntimeError("Failed to create head node.")
nodes = provider.non_terminated_nodes(head_node_tags)
if len(nodes) == 1:
head_node = nodes[0]
break
time.sleep(1)
# TODO(ekl) right now we always update the head node even if the hash
# matches. We could prompt the user for what they want to do here.
runtime_hash = hash_runtime_conf(config["file_mounts"], config)
logger.info("get_or_create_head_node: Updating files on head node...")
# Rewrite the auth config so that the head node can update the workers
remote_config = copy.deepcopy(config)
if config["provider"]["type"] != "kubernetes":
remote_key_path = "~/ray_bootstrap_key.pem"
remote_config["auth"]["ssh_private_key"] = remote_key_path
# Adjust for new file locations
new_mounts = {}
for remote_path in config["file_mounts"]:
new_mounts[remote_path] = remote_path
remote_config["file_mounts"] = new_mounts
remote_config["no_restart"] = no_restart
# Now inject the rewritten config and SSH key into the head node
remote_config_file = tempfile.NamedTemporaryFile("w", prefix="ray-bootstrap-")
remote_config_file.write(json.dumps(remote_config))
remote_config_file.flush()
config["file_mounts"].update(
{"~/ray_bootstrap_config.yaml": remote_config_file.name}
)
if config["provider"]["type"] != "kubernetes":
config["file_mounts"].update(
{
remote_key_path: config["auth"]["ssh_private_key"],
}
)
if restart_only:
init_commands = []
ray_start_commands = config["head_start_ray_commands"]
elif no_restart:
init_commands = config["head_setup_commands"]
ray_start_commands = []
else:
init_commands = config["head_setup_commands"]
ray_start_commands = config["head_start_ray_commands"]
if not no_restart:
warn_about_bad_start_command(ray_start_commands)
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=config["initialization_commands"],
setup_commands=init_commands,
ray_start_commands=ray_start_commands,
runtime_hash=runtime_hash,
)
updater.start()
updater.join()
# Refresh the node cache so we see the external ip if available
provider.non_terminated_nodes(head_node_tags)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
if updater.exitcode != 0:
logger.error(
"get_or_create_head_node: Updating {} failed".format(head_node_ip)
)
sys.exit(1)
logger.info(
"get_or_create_head_node: Head node up-to-date, IP address is: {}".format(
head_node_ip
)
)
monitor_str = "tail -n 100 -f /tmp/ray/session_*/logs/monitor*"
use_docker = "docker" in config and bool(config["docker"]["container_name"])
if override_cluster_name:
modifiers = " --cluster-name={}".format(quote(override_cluster_name))
else:
modifiers = ""
print(
"To monitor auto-scaling activity, you can run:\n\n"
" ray exec {} {}{}{}\n".format(
config_file,
"--docker " if use_docker else "",
quote(monitor_str),
modifiers,
)
)
print(
"To open a console on the cluster:\n\n ray attach {}{}\n".format(
config_file, modifiers
)
)
print(
"To get a remote shell to the cluster manually, run:\n\n {}\n".format(
updater.cmd_runner.remote_shell_command_str()
)
)
finally:
provider.cleanup()
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def exec_cluster(
config_file,
cmd=None,
docker=False,
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=None,
port_forward=None,
with_output=False,
):
"""Runs a command on the specified cluster.
Arguments:
config_file: path to the cluster yaml
cmd: command to run
docker: whether to run command in docker container of config
screen: whether to run in a screen
tmux: whether to run in a tmux session
stop: whether to stop the cluster after command run
start: whether to start the cluster if it isn't up
override_cluster_name: set the name of the cluster
port_forward (int or list[int]): port(s) to forward
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=start
)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
docker_config=config["docker"],
)
def wrap_docker(command):
container_name = config["docker"]["container_name"]
if not container_name:
raise ValueError("Docker container not specified in config.")
return with_docker_exec([command], container_name=container_name)[0]
if cmd:
cmd = wrap_docker(cmd) if docker else cmd
if stop:
shutdown_cmd = (
"ray stop; ray teardown ~/ray_bootstrap_config.yaml "
"--yes --workers-only"
)
if docker:
shutdown_cmd = wrap_docker(shutdown_cmd)
cmd += "; {}; sudo shutdown -h now".format(shutdown_cmd)
result = _exec(
updater,
cmd,
screen,
tmux,
port_forward=port_forward,
with_output=with_output,
)
if tmux or screen:
attach_command_parts = ["ray attach", config_file]
if override_cluster_name is not None:
attach_command_parts.append(
"--cluster-name={}".format(override_cluster_name)
)
if tmux:
attach_command_parts.append("--tmux")
elif screen:
attach_command_parts.append("--screen")
attach_command = " ".join(attach_command_parts)
attach_info = "Use `{}` to check on command status.".format(attach_command)
logger.info(attach_info)
return result
finally:
provider.cleanup()
|
def exec_cluster(
config_file,
cmd=None,
docker=False,
screen=False,
tmux=False,
stop=False,
start=False,
override_cluster_name=None,
port_forward=None,
with_output=False,
):
"""Runs a command on the specified cluster.
Arguments:
config_file: path to the cluster yaml
cmd: command to run
docker: whether to run command in docker container of config
screen: whether to run in a screen
tmux: whether to run in a tmux session
stop: whether to stop the cluster after command run
start: whether to start the cluster if it isn't up
override_cluster_name: set the name of the cluster
port_forward (int or list[int]): port(s) to forward
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=start
)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
updater = NodeUpdaterThread(
node_id=head_node,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
def wrap_docker(command):
container_name = config["docker"]["container_name"]
if not container_name:
raise ValueError("Docker container not specified in config.")
return with_docker_exec([command], container_name=container_name)[0]
if cmd:
cmd = wrap_docker(cmd) if docker else cmd
if stop:
shutdown_cmd = (
"ray stop; ray teardown ~/ray_bootstrap_config.yaml "
"--yes --workers-only"
)
if docker:
shutdown_cmd = wrap_docker(shutdown_cmd)
cmd += "; {}; sudo shutdown -h now".format(shutdown_cmd)
result = _exec(
updater,
cmd,
screen,
tmux,
port_forward=port_forward,
with_output=with_output,
)
if tmux or screen:
attach_command_parts = ["ray attach", config_file]
if override_cluster_name is not None:
attach_command_parts.append(
"--cluster-name={}".format(override_cluster_name)
)
if tmux:
attach_command_parts.append("--tmux")
elif screen:
attach_command_parts.append("--screen")
attach_command = " ".join(attach_command_parts)
attach_info = "Use `{}` to check on command status.".format(attach_command)
logger.info(attach_info)
return result
finally:
provider.cleanup()
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def rsync(config_file, source, target, override_cluster_name, down, all_nodes=False):
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
all_nodes: whether to sync worker nodes in addition to the head node
"""
assert bool(source) == bool(target), (
"Must either provide both or neither source and target."
)
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = []
if all_nodes:
# technically we re-open the provider for no reason
# in get_worker_nodes but it's cleaner this way
# and _get_head_node does this too
nodes = _get_worker_nodes(config, override_cluster_name)
nodes += [
_get_head_node(
config, config_file, override_cluster_name, create_if_needed=False
)
]
for node_id in nodes:
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
docker_config=config["docker"],
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
rsync(source, target)
else:
updater.sync_file_mounts(rsync)
finally:
provider.cleanup()
|
def rsync(config_file, source, target, override_cluster_name, down, all_nodes=False):
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
all_nodes: whether to sync worker nodes in addition to the head node
"""
assert bool(source) == bool(target), (
"Must either provide both or neither source and target."
)
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config)
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
nodes = []
if all_nodes:
# technically we re-open the provider for no reason
# in get_worker_nodes but it's cleaner this way
# and _get_head_node does this too
nodes = _get_worker_nodes(config, override_cluster_name)
nodes += [
_get_head_node(
config, config_file, override_cluster_name, create_if_needed=False
)
]
for node_id in nodes:
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
rsync(source, target)
else:
updater.sync_file_mounts(rsync)
finally:
provider.cleanup()
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def docker_start_cmds(user, image, mount, cname, user_options):
cmds = []
# create flags
# ports for the redis, object manager, and tune client
port_flags = " ".join(
["-p {port}:{port}".format(port=port) for port in ["6379", "8076", "4321"]]
)
mount_flags = " ".join(
["-v {src}:{dest}".format(src=k, dest=v) for k, v in mount.items()]
)
# for click, used in ray cli
env_vars = {"LC_ALL": "C.UTF-8", "LANG": "C.UTF-8"}
env_flags = " ".join(
["-e {name}={val}".format(name=k, val=v) for k, v in env_vars.items()]
)
user_options_str = " ".join(user_options)
# docker run command
docker_check = check_docker_running_cmd(cname) + " || "
docker_run = [
"docker",
"run",
"--rm",
"--name {}".format(cname),
"-d",
"-it",
port_flags,
mount_flags,
env_flags,
user_options_str,
"--net=host",
image,
"bash",
]
cmds.append(docker_check + " ".join(docker_run))
return cmds
|
def docker_start_cmds(user, image, mount, cname, user_options):
cmds = []
# create flags
# ports for the redis, object manager, and tune client
port_flags = " ".join(
["-p {port}:{port}".format(port=port) for port in ["6379", "8076", "4321"]]
)
mount_flags = " ".join(
["-v {src}:{dest}".format(src=k, dest=v) for k, v in mount.items()]
)
# for click, used in ray cli
env_vars = {"LC_ALL": "C.UTF-8", "LANG": "C.UTF-8"}
env_flags = " ".join(
["-e {name}={val}".format(name=k, val=v) for k, v in env_vars.items()]
)
user_options_str = " ".join(user_options)
# docker run command
docker_check = ["docker", "inspect", "-f", "'{{.State.Running}}'", cname, "||"]
docker_run = [
"docker",
"run",
"--rm",
"--name {}".format(cname),
"-d",
"-it",
port_flags,
mount_flags,
env_flags,
user_options_str,
"--net=host",
image,
"bash",
]
cmds.append(" ".join(docker_check + docker_run))
return cmds
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def get_command_runner(
self,
log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
docker_config=None,
):
return KubernetesCommandRunner(
log_prefix, self.namespace, node_id, auth_config, process_runner
)
|
def get_command_runner(
self,
log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
):
return KubernetesCommandRunner(
log_prefix, self.namespace, node_id, auth_config, process_runner
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def get_command_runner(
self,
log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
docker_config=None,
):
"""Returns the CommandRunner class used to perform SSH commands.
Args:
log_prefix(str): stores "NodeUpdater: {}: ".format(<node_id>). Used
to print progress in the CommandRunner.
node_id(str): the node ID.
auth_config(dict): the authentication configs from the autoscaler
yaml file.
cluster_name(str): the name of the cluster.
process_runner(module): the module to use to run the commands
in the CommandRunner. E.g., subprocess.
use_internal_ip(bool): whether the node_id belongs to an internal ip
or external ip.
docker_config(dict): If set, the docker information of the docker
container that commands should be run on.
"""
common_args = {
"log_prefix": log_prefix,
"node_id": node_id,
"provider": self,
"auth_config": auth_config,
"cluster_name": cluster_name,
"process_runner": process_runner,
"use_internal_ip": use_internal_ip,
}
if docker_config and docker_config["container_name"] != "":
return DockerCommandRunner(docker_config, **common_args)
else:
return SSHCommandRunner(**common_args)
|
def get_command_runner(
self,
log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
):
"""Returns the CommandRunner class used to perform SSH commands.
Args:
log_prefix(str): stores "NodeUpdater: {}: ".format(<node_id>). Used
to print progress in the CommandRunner.
node_id(str): the node ID.
auth_config(dict): the authentication configs from the autoscaler
yaml file.
cluster_name(str): the name of the cluster.
process_runner(module): the module to use to run the commands
in the CommandRunner. E.g., subprocess.
use_internal_ip(bool): whether the node_id belongs to an internal ip
or external ip.
"""
return SSHCommandRunner(
log_prefix,
node_id,
self,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def __init__(
self,
node_id,
provider_config,
provider,
auth_config,
cluster_name,
file_mounts,
initialization_commands,
setup_commands,
ray_start_commands,
runtime_hash,
process_runner=subprocess,
use_internal_ip=False,
docker_config=None,
):
self.log_prefix = "NodeUpdater: {}: ".format(node_id)
use_internal_ip = use_internal_ip or provider_config.get("use_internal_ips", False)
self.cmd_runner = provider.get_command_runner(
self.log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
docker_config,
)
self.daemon = True
self.process_runner = process_runner
self.node_id = node_id
self.provider = provider
self.file_mounts = {
remote: os.path.expanduser(local) for remote, local in file_mounts.items()
}
self.initialization_commands = initialization_commands
self.setup_commands = setup_commands
self.ray_start_commands = ray_start_commands
self.runtime_hash = runtime_hash
|
def __init__(
self,
node_id,
provider_config,
provider,
auth_config,
cluster_name,
file_mounts,
initialization_commands,
setup_commands,
ray_start_commands,
runtime_hash,
process_runner=subprocess,
use_internal_ip=False,
):
self.log_prefix = "NodeUpdater: {}: ".format(node_id)
use_internal_ip = use_internal_ip or provider_config.get("use_internal_ips", False)
self.cmd_runner = provider.get_command_runner(
self.log_prefix,
node_id,
auth_config,
cluster_name,
process_runner,
use_internal_ip,
)
self.daemon = True
self.process_runner = process_runner
self.node_id = node_id
self.provider = provider
self.file_mounts = {
remote: os.path.expanduser(local) for remote, local in file_mounts.items()
}
self.initialization_commands = initialization_commands
self.setup_commands = setup_commands
self.ray_start_commands = ray_start_commands
self.runtime_hash = runtime_hash
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def run_rsync_up(self, source, target):
self.ssh_command_runner.run_rsync_up(source, target)
if self.check_container_status():
self.ssh_command_runner.run(
"docker cp {} {}:{}".format(
target, self.docker_name, self.docker_expand_user(target)
)
)
|
def run_rsync_up(self, source, target):
self.set_ssh_ip_if_required()
self.process_runner.check_call(
[
"rsync",
"--rsh",
" ".join(["ssh"] + self.get_default_ssh_options(120)),
"-avz",
source,
"{}@{}:{}".format(self.ssh_user, self.ssh_ip, target),
]
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def run_rsync_down(self, source, target):
self.ssh_command_runner.run(
"docker cp {}:{} {}".format(
self.docker_name, self.docker_expand_user(source), source
)
)
self.ssh_command_runner.run_rsync_down(source, target)
|
def run_rsync_down(self, source, target):
self.set_ssh_ip_if_required()
self.process_runner.check_call(
[
"rsync",
"--rsh",
" ".join(["ssh"] + self.get_default_ssh_options(120)),
"-avz",
"{}@{}:{}".format(self.ssh_user, self.ssh_ip, source),
target,
]
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def remote_shell_command_str(self):
inner_str = (
self.ssh_command_runner.remote_shell_command_str()
.replace("ssh", "ssh -tt", 1)
.strip("\n")
)
return inner_str + " docker exec -it {} /bin/bash\n".format(self.docker_name)
|
def remote_shell_command_str(self):
return "ssh -o IdentitiesOnly=yes -i {} {}@{}\n".format(
self.ssh_private_key, self.ssh_user, self.ssh_ip
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def submit(
cluster_config_file,
docker,
screen,
tmux,
stop,
start,
cluster_name,
port_forward,
script,
args,
script_args,
):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
logger.warning(
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead."
)
if start:
create_or_update_cluster(
cluster_config_file, None, None, False, False, True, cluster_name
)
target = os.path.basename(script)
if not docker:
target = os.path.join("~", target)
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd,
docker,
screen,
tmux,
stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward,
)
|
def submit(
cluster_config_file,
docker,
screen,
tmux,
stop,
start,
cluster_name,
port_forward,
script,
args,
script_args,
):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
logger.warning(
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead."
)
if start:
create_or_update_cluster(
cluster_config_file, None, None, False, False, True, cluster_name
)
target = os.path.join("~", os.path.basename(script))
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd,
docker,
screen,
tmux,
stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward,
)
|
https://github.com/ray-project/ray/issues/8830
|
ray up config/example_full.yaml
2020-06-08 01:13:10,526 INFO config.py:143 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::<redacted>:instance-profile/ray-autoscaler-v1
2020-06-08 01:13:11,089 INFO config.py:194 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-west-2
2020-06-08 01:13:11,365 INFO config.py:235 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,366 INFO config.py:241 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-<redacted>', 'us-west-2b'), ('subnet-<redacted>', 'us-west-2a')]
2020-06-08 01:13:11,725 INFO config.py:261 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-default (sg-<redacted>)
2020-06-08 01:13:11,725 INFO config.py:268 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-default (sg-<redacted>)
This will restart cluster services [y/N]: y
2020-06-08 01:13:15,214 INFO commands.py:238 -- get_or_create_head_node: Updating files on head node...
2020-06-08 01:13:15,215 INFO updater.py:379 -- NodeUpdater: i-<redacted>: Updating to <redacted>
2020-06-08 01:13:15,216 INFO updater.py:423 -- NodeUpdater: i-<redacted>: Waiting for remote shell...
2020-06-08 01:13:15,422 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-<redacted>
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 383, in run
self.do_update()
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 450, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/ray/python/ray/autoscaler/updater.py", line 444, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-06-08 01:11:59,679 ERROR commands.py:304 -- get_or_create_head_node: Updating <redacted> failed
2020-06-08 01:11:59,701 INFO log_timer.py:22 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-<redacted>'] [LogTimer=333ms]
|
AssertionError
|
def get_async(object_id):
"""Asyncio compatible version of ray.get"""
# Delayed import because raylet import this file and
# it creates circular imports.
from ray.experimental.async_api import init as async_api_init, as_future
from ray.experimental.async_plasma import PlasmaObjectFuture
assert isinstance(object_id, ray.ObjectID), "Batched get is not supported."
# Setup
async_api_init()
loop = asyncio.get_event_loop()
core_worker = ray.worker.global_worker.core_worker
# Here's the callback used to implement async get logic.
# What we want:
# - If direct call, first try to get it from in memory store.
# If the object if promoted to plasma, retry it from plasma API.
# - If not direct call, directly use plasma API to get it.
user_future = loop.create_future()
# We have three future objects here.
# user_future is directly returned to the user from this function.
# and it will be eventually fulfilled by the final result.
# inner_future is the first attempt to retrieve the object. It can be
# fulfilled by either core_worker.get_async or plasma_api.as_future.
# When inner_future completes, done_callback will be invoked. This
# callback set the final object in user_future if the object hasn't
# been promoted by plasma, otherwise it will retry from plasma.
# retry_plasma_future is only created when we are getting objects that's
# promoted to plasma. It will also invoke the done_callback when it's
# fulfilled.
def done_callback(future):
result = future.result()
# Result from async plasma, transparently pass it to user future
if isinstance(future, PlasmaObjectFuture):
if isinstance(result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.as_instanceof_cause())
else:
user_future.set_result(result)
else:
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
# If this future has result set already, we just need to
# skip the set result/exception procedure.
if user_future.done():
return
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.result.as_instanceof_cause())
else:
user_future.set_result(result.result)
else:
# Schedule plasma to async get, use the the same callback.
retry_plasma_future = as_future(result.plasma_fallback_id)
retry_plasma_future.add_done_callback(done_callback)
# A hack to keep reference to the future so it doesn't get GC.
user_future.retry_plasma_future = retry_plasma_future
inner_future = loop.create_future()
# We must add the done_callback before sending to in_memory_store_get
inner_future.add_done_callback(done_callback)
core_worker.in_memory_store_get_async(object_id, inner_future)
# A hack to keep reference to inner_future so it doesn't get GC.
user_future.inner_future = inner_future
# A hack to keep a reference to the object ID for ref counting.
user_future.object_id = object_id
return user_future
|
def get_async(object_id):
"""Asyncio compatible version of ray.get"""
# Delayed import because raylet import this file and
# it creates circular imports.
from ray.experimental.async_api import init as async_api_init, as_future
from ray.experimental.async_plasma import PlasmaObjectFuture
assert isinstance(object_id, ray.ObjectID), "Batched get is not supported."
# Setup
async_api_init()
loop = asyncio.get_event_loop()
core_worker = ray.worker.global_worker.core_worker
# Here's the callback used to implement async get logic.
# What we want:
# - If direct call, first try to get it from in memory store.
# If the object if promoted to plasma, retry it from plasma API.
# - If not direct call, directly use plasma API to get it.
user_future = loop.create_future()
# We have three future objects here.
# user_future is directly returned to the user from this function.
# and it will be eventually fulfilled by the final result.
# inner_future is the first attempt to retrieve the object. It can be
# fulfilled by either core_worker.get_async or plasma_api.as_future.
# When inner_future completes, done_callback will be invoked. This
# callback set the final object in user_future if the object hasn't
# been promoted by plasma, otherwise it will retry from plasma.
# retry_plasma_future is only created when we are getting objects that's
# promoted to plasma. It will also invoke the done_callback when it's
# fulfilled.
def done_callback(future):
result = future.result()
# Result from async plasma, transparently pass it to user future
if isinstance(future, PlasmaObjectFuture):
if isinstance(result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.as_instanceof_cause())
else:
user_future.set_result(result)
else:
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.result.as_instanceof_cause())
else:
user_future.set_result(result.result)
else:
# Schedule plasma to async get, use the the same callback.
retry_plasma_future = as_future(result.plasma_fallback_id)
retry_plasma_future.add_done_callback(done_callback)
# A hack to keep reference to the future so it doesn't get GC.
user_future.retry_plasma_future = retry_plasma_future
inner_future = loop.create_future()
# We must add the done_callback before sending to in_memory_store_get
inner_future.add_done_callback(done_callback)
core_worker.in_memory_store_get_async(object_id, inner_future)
# A hack to keep reference to inner_future so it doesn't get GC.
user_future.inner_future = inner_future
# A hack to keep a reference to the object ID for ref counting.
user_future.object_id = object_id
return user_future
|
https://github.com/ray-project/ray/issues/8841
|
Exception in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65
handle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65>
Traceback (most recent call last):
File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py", line 83, in done_callback
user_future.set_result(result.result)
asyncio.base_futures.InvalidStateError: invalid state
|
asyncio.base_futures.InvalidStateError
|
def done_callback(future):
result = future.result()
# Result from async plasma, transparently pass it to user future
if isinstance(future, PlasmaObjectFuture):
if isinstance(result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.as_instanceof_cause())
else:
user_future.set_result(result)
else:
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
# If this future has result set already, we just need to
# skip the set result/exception procedure.
if user_future.done():
return
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.result.as_instanceof_cause())
else:
user_future.set_result(result.result)
else:
# Schedule plasma to async get, use the the same callback.
retry_plasma_future = as_future(result.plasma_fallback_id)
retry_plasma_future.add_done_callback(done_callback)
# A hack to keep reference to the future so it doesn't get GC.
user_future.retry_plasma_future = retry_plasma_future
|
def done_callback(future):
result = future.result()
# Result from async plasma, transparently pass it to user future
if isinstance(future, PlasmaObjectFuture):
if isinstance(result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.as_instanceof_cause())
else:
user_future.set_result(result)
else:
# Result from direct call.
assert isinstance(result, AsyncGetResponse), result
if result.plasma_fallback_id is None:
if isinstance(result.result, ray.exceptions.RayTaskError):
ray.worker.last_task_error_raise_time = time.time()
user_future.set_exception(result.result.as_instanceof_cause())
else:
user_future.set_result(result.result)
else:
# Schedule plasma to async get, use the the same callback.
retry_plasma_future = as_future(result.plasma_fallback_id)
retry_plasma_future.add_done_callback(done_callback)
# A hack to keep reference to the future so it doesn't get GC.
user_future.retry_plasma_future = retry_plasma_future
|
https://github.com/ray-project/ray/issues/8841
|
Exception in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65
handle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65>
Traceback (most recent call last):
File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py", line 83, in done_callback
user_future.set_result(result.result)
asyncio.base_futures.InvalidStateError: invalid state
|
asyncio.base_futures.InvalidStateError
|
def _unpack_observation(self, obs_batch):
"""Unpacks the observation, action mask, and state (if present)
from agent grouping.
Returns:
obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]
mask (np.ndarray): action mask, if any
state (np.ndarray or None): state tensor of shape [B, state_size]
or None if it is not in the batch
"""
unpacked = _unpack_obs(
np.array(obs_batch, dtype=np.float32),
self.observation_space.original_space,
tensorlib=np,
)
if isinstance(unpacked[0], dict):
unpacked_obs = [np.concatenate(tree.flatten(u["obs"]), 1) for u in unpacked]
else:
unpacked_obs = unpacked
obs = np.concatenate(unpacked_obs, axis=1).reshape(
[len(obs_batch), self.n_agents, self.obs_size]
)
if self.has_action_mask:
action_mask = np.concatenate(
[o["action_mask"] for o in unpacked], axis=1
).reshape([len(obs_batch), self.n_agents, self.n_actions])
else:
action_mask = np.ones(
[len(obs_batch), self.n_agents, self.n_actions], dtype=np.float32
)
if self.has_env_global_state:
state = unpacked[0][ENV_STATE]
else:
state = None
return obs, action_mask, state
|
def _unpack_observation(self, obs_batch):
"""Unpacks the observation, action mask, and state (if present)
from agent grouping.
Returns:
obs (np.ndarray): obs tensor of shape [B, n_agents, obs_size]
mask (np.ndarray): action mask, if any
state (np.ndarray or None): state tensor of shape [B, state_size]
or None if it is not in the batch
"""
unpacked = _unpack_obs(
np.array(obs_batch, dtype=np.float32),
self.observation_space.original_space,
tensorlib=np,
)
if self.has_action_mask:
obs = np.concatenate([o["obs"] for o in unpacked], axis=1).reshape(
[len(obs_batch), self.n_agents, self.obs_size]
)
action_mask = np.concatenate(
[o["action_mask"] for o in unpacked], axis=1
).reshape([len(obs_batch), self.n_agents, self.n_actions])
else:
if isinstance(unpacked[0], dict):
unpacked_obs = [u["obs"] for u in unpacked]
else:
unpacked_obs = unpacked
obs = np.concatenate(unpacked_obs, axis=1).reshape(
[len(obs_batch), self.n_agents, self.obs_size]
)
action_mask = np.ones(
[len(obs_batch), self.n_agents, self.n_actions], dtype=np.float32
)
if self.has_env_global_state:
state = unpacked[0][ENV_STATE]
else:
state = None
return obs, action_mask, state
|
https://github.com/ray-project/ray/issues/8523
|
2020-05-20 15:40:03,477 ERROR trial_runner.py:519 -- Trial QMIX_grouped_twostep_c960c_00002: Error processing event.
Traceback (most recent call last):
File "[...]/ray/tune/trial_runner.py", line 467, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "[...]/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "[...]/ray/worker.py", line 1516, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::QMIX.train() (pid=54834, ip=128.232.69.20)
File "python/ray/_raylet.pyx", line 460, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 414, in ray._raylet.execute_task.function_executor
File "[...]/ray/rllib/agents/trainer.py", line 504, in train
raise e
File "[...]/ray/rllib/agents/trainer.py", line 490, in train
result = Trainable.train(self)
File "[...]/ray/tune/trainable.py", line 260, in train
result = self._train()
File "[...]/ray/rllib/agents/trainer_template.py", line 138, in _train
return self._train_exec_impl()
File "[...]/ray/rllib/agents/trainer_template.py", line 173, in _train_exec_impl
res = next(self.train_exec_impl)
File "[...]/ray/util/iter.py", line 689, in __next__
return next(self.built_iterator)
File "[...]/ray/util/iter.py", line 702, in apply_foreach
for item in it:
File "[...]/ray/util/iter.py", line 772, in apply_filter
for item in it:
File "[...]/ray/util/iter.py", line 772, in apply_filter
for item in it:
File "[...]/ray/util/iter.py", line 702, in apply_foreach
for item in it:
File "[...]/ray/util/iter.py", line 772, in apply_filter
for item in it:
File "[...]/ray/util/iter.py", line 977, in build_union
item = next(it)
File "[...]/ray/util/iter.py", line 689, in __next__
return next(self.built_iterator)
File "[...]/ray/util/iter.py", line 702, in apply_foreach
for item in it:
File "[...]/ray/util/iter.py", line 702, in apply_foreach
for item in it:
File "[...]/ray/util/iter.py", line 702, in apply_foreach
for item in it:
File "[...]/ray/rllib/execution/rollout_ops.py", line 70, in sampler
yield workers.local_worker().sample()
File "[...]/ray/rllib/evaluation/rollout_worker.py", line 515, in sample
batches = [self.input_reader.next()]
File "[...]/ray/rllib/evaluation/sampler.py", line 56, in next
batches = [self.get_data()]
File "[...]/ray/rllib/evaluation/sampler.py", line 101, in get_data
item = next(self.rollout_provider)
File "[...]/ray/rllib/evaluation/sampler.py", line 367, in _env_runner
active_episodes)
File "[...]/ray/rllib/evaluation/sampler.py", line 637, in _do_policy_eval
timestep=policy.global_timestep)
File "[...]/ray/rllib/agents/qmix/qmix_policy.py", line 260, in compute_actions
obs_batch, action_mask, _ = self._unpack_observation(obs_batch)
File "[...]/ray/rllib/agents/qmix/qmix_policy.py", line 486, in _unpack_observation
axis=1).reshape([len(obs_batch), self.n_agents, self.obs_size])
File "<__array_function__ internals>", line 6, in concatenate
ValueError: zero-dimensional arrays cannot be concatenated
|
ValueError
|
def train_epoch(self, *pargs, **kwargs):
def benchmark():
self.optimizer.zero_grad()
output = self.model(self.data)
loss = F.cross_entropy(output, self.target)
loss.backward()
self.optimizer.step()
print("Running warmup...")
if self.global_step == 0:
timeit.timeit(benchmark, number=args.num_warmup_batches)
self.global_step += 1
print("Running benchmark...")
time = timeit.timeit(benchmark, number=args.num_batches_per_iter)
img_sec = args.batch_size * args.num_batches_per_iter / time
return {"img_sec": img_sec}
|
def train_epoch(self, *pargs, **kwargs):
# print(self.model)
def benchmark():
self.optimizer.zero_grad()
output = self.model(self.data)
loss = F.cross_entropy(output, self.target)
loss.backward()
self.optimizer.step()
# print("Running warmup...")
if self.global_step == 0:
timeit.timeit(benchmark, number=args.num_warmup_batches)
self.global_step += 1
# print("Running benchmark...")
time = timeit.timeit(benchmark, number=args.num_batches_per_iter)
img_sec = args.batch_size * args.num_batches_per_iter / time
return {"img_sec": img_sec}
|
https://github.com/ray-project/ray/issues/8002
|
$ python dcgan.py --num-workers 44
2020-04-13 16:32:44,305 INFO resource_spec.py:204 -- Starting Ray with 120.21 GiB memory available for workers and up to 55.52 GiB for objects. You can adjust these settings with ray.init(me
mory=<bytes>, object_store_memory=<bytes>).
2020-04-13 16:32:44,732 INFO services.py:1146 -- View the Ray dashboard at localhost:8265
Traceback (most recent call last):
File "dcgan.py", line 283, in <module>
trainer = train_example(
File "dcgan.py", line 236, in train_example
trainer = TorchTrainer(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/torch_trainer.py", line 233, in __init__
self._start_workers(self.max_replicas)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/torch_trainer.py", line 320, in _start_workers
self.local_worker.setup(address, 0, num_workers)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 46, in setup
self._setup_training()
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 92, in _setup_training
self.training_operator = self.training_operator_cls(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/training_operator.py", line 96, in __init__
self.setup(config)
File "dcgan.py", line 136, in setup
torch.load(config["classification_model_path"]))
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 525, in load
with _open_file_like(f, 'rb') as opened_file:
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 212, in _open_file_like
return _open_file(name_or_buffer, mode)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 193, in __init__
super(_open_file, self).__init__(open(name, mode))
FileNotFoundError: [Errno 2] No such file or directory: '/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/examples/mnist_cnn.pt'
2020-04-13 16:32:49,606 ERROR worker.py:1011 -- Possible unhandled error from worker: ray::DistributedTorchRunner.setup() (pid=67029, ip=10.125.21.189)
File "python/ray/_raylet.pyx", line 452, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 407, in ray._raylet.execute_task.function_executor
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 46, in setup
self._setup_training()
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 92, in _setup_training
self.training_operator = self.training_operator_cls(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/training_operator.py", line 96, in __init__
self.setup(config)
File "dcgan.py", line 136, in setup
torch.load(config["classification_model_path"]))
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 525, in load
with _open_file_like(f, 'rb') as opened_file:
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 212, in _open_file_like
return _open_file(name_or_buffer, mode)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 193, in __init__
super(_open_file, self).__init__(open(name, mode))
FileNotFoundError: [Errno 2] No such file or directory: '/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/examples/mnist_cnn.pt'
|
FileNotFoundError
|
def train_example(num_workers=1, use_gpu=False, test_mode=False):
config = {
"test_mode": test_mode,
"batch_size": 16 if test_mode else 512 // num_workers,
"classification_model_path": MODEL_PATH,
}
trainer = TorchTrainer(
model_creator=model_creator,
data_creator=data_creator,
optimizer_creator=optimizer_creator,
loss_creator=nn.BCELoss,
training_operator_cls=GANOperator,
num_workers=num_workers,
config=config,
use_gpu=use_gpu,
use_tqdm=True,
)
from tabulate import tabulate
pbar = trange(5, unit="epoch")
for itr in pbar:
stats = trainer.train(info=dict(epoch_idx=itr, num_epochs=5))
pbar.set_postfix(dict(loss_g=stats["loss_g"], loss_d=stats["loss_d"]))
formatted = tabulate([stats], headers="keys")
if itr > 0: # Get the last line of the stats.
formatted = formatted.split("\n")[-1]
pbar.write(formatted)
return trainer
|
def train_example(num_workers=1, use_gpu=False, test_mode=False):
config = {
"test_mode": test_mode,
"batch_size": 16 if test_mode else 512 // num_workers,
"classification_model_path": os.path.join(
os.path.dirname(ray.__file__), "util/sgd/torch/examples/mnist_cnn.pt"
),
}
trainer = TorchTrainer(
model_creator=model_creator,
data_creator=data_creator,
optimizer_creator=optimizer_creator,
loss_creator=nn.BCELoss,
training_operator_cls=GANOperator,
num_workers=num_workers,
config=config,
use_gpu=use_gpu,
use_tqdm=True,
)
from tabulate import tabulate
pbar = trange(5, unit="epoch")
for itr in pbar:
stats = trainer.train(info=dict(epoch_idx=itr, num_epochs=5))
pbar.set_postfix(dict(loss_g=stats["loss_g"], loss_d=stats["loss_d"]))
formatted = tabulate([stats], headers="keys")
if itr > 0: # Get the last line of the stats.
formatted = formatted.split("\n")[-1]
pbar.write(formatted)
return trainer
|
https://github.com/ray-project/ray/issues/8002
|
$ python dcgan.py --num-workers 44
2020-04-13 16:32:44,305 INFO resource_spec.py:204 -- Starting Ray with 120.21 GiB memory available for workers and up to 55.52 GiB for objects. You can adjust these settings with ray.init(me
mory=<bytes>, object_store_memory=<bytes>).
2020-04-13 16:32:44,732 INFO services.py:1146 -- View the Ray dashboard at localhost:8265
Traceback (most recent call last):
File "dcgan.py", line 283, in <module>
trainer = train_example(
File "dcgan.py", line 236, in train_example
trainer = TorchTrainer(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/torch_trainer.py", line 233, in __init__
self._start_workers(self.max_replicas)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/torch_trainer.py", line 320, in _start_workers
self.local_worker.setup(address, 0, num_workers)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 46, in setup
self._setup_training()
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 92, in _setup_training
self.training_operator = self.training_operator_cls(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/training_operator.py", line 96, in __init__
self.setup(config)
File "dcgan.py", line 136, in setup
torch.load(config["classification_model_path"]))
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 525, in load
with _open_file_like(f, 'rb') as opened_file:
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 212, in _open_file_like
return _open_file(name_or_buffer, mode)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 193, in __init__
super(_open_file, self).__init__(open(name, mode))
FileNotFoundError: [Errno 2] No such file or directory: '/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/examples/mnist_cnn.pt'
2020-04-13 16:32:49,606 ERROR worker.py:1011 -- Possible unhandled error from worker: ray::DistributedTorchRunner.setup() (pid=67029, ip=10.125.21.189)
File "python/ray/_raylet.pyx", line 452, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 407, in ray._raylet.execute_task.function_executor
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 46, in setup
self._setup_training()
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/distributed_torch_runner.py", line 92, in _setup_training
self.training_operator = self.training_operator_cls(
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/training_operator.py", line 96, in __init__
self.setup(config)
File "dcgan.py", line 136, in setup
torch.load(config["classification_model_path"]))
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 525, in load
with _open_file_like(f, 'rb') as opened_file:
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 212, in _open_file_like
return _open_file(name_or_buffer, mode)
File "/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/torch/serialization.py", line 193, in __init__
super(_open_file, self).__init__(open(name, mode))
FileNotFoundError: [Errno 2] No such file or directory: '/rpovelik/installed/miniconda3/envs/ray/lib/python3.8/site-packages/ray/util/sgd/torch/examples/mnist_cnn.pt'
|
FileNotFoundError
|
def try_import_torch(error=False):
"""
Args:
error (bool): Whether to raise an error if torch cannot be imported.
Returns:
tuple: torch AND torch.nn modules.
"""
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
logger.warning("Not importing Torch for test purposes.")
return None, None
try:
import torch
import torch.nn as nn
return torch, nn
except ImportError as e:
if error:
raise e
nn = NNStub()
nn.Module = ModuleStub
return None, nn
|
def try_import_torch(error=False):
"""
Args:
error (bool): Whether to raise an error if torch cannot be imported.
Returns:
tuple: torch AND torch.nn modules.
"""
if "RLLIB_TEST_NO_TORCH_IMPORT" in os.environ:
logger.warning("Not importing Torch for test purposes.")
return None, None
try:
import torch
import torch.nn as nn
return torch, nn
except ImportError as e:
if error:
raise e
return None, None
|
https://github.com/ray-project/ray/issues/7776
|
Traceback (most recent call last):
File "/private/var/tmp/_bazel_travis/7c557718f3877739c657a427203800b1/execroot/com_github_ray_project_ray/bazel-out/darwin-opt/bin/python/ray/tune/test_trial_runner.runfiles/com_github_ray_project_ray/python/ray/tune/tests/test_trial_runner.py", line 6, in <module>
from ray.rllib import _register_all
File "/Users/travis/build/ray-project/ray/python/ray/rllib/__init__.py", line 60, in <module>
_register_all()
File "/Users/travis/build/ray-project/ray/python/ray/rllib/__init__.py", line 37, in _register_all
register_trainable(key, get_agent_class(key))
File "/Users/travis/build/ray-project/ray/python/ray/rllib/agents/registry.py", line 130, in get_agent_class
return _get_agent_class(alg)
File "/Users/travis/build/ray-project/ray/python/ray/rllib/agents/registry.py", line 140, in _get_agent_class
return CONTRIBUTED_ALGORITHMS[alg]()
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/registry.py", line 21, in _import_bandit_lints
from ray.rllib.contrib.bandits.agents.lin_ts import LinTSTrainer
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/__init__.py", line 1, in <module>
from ray.rllib.contrib.bandits.agents.lin_ts import LinTSTrainer
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/lin_ts.py", line 5, in <module>
from ray.rllib.contrib.bandits.agents.policy import BanditPolicy
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/policy.py", line 6, in <module>
from ray.rllib.contrib.bandits.models.linear_regression import \
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/models/linear_regression.py", line 9, in <module>
class OnlineLinearRegression(nn.Module):
AttributeError: 'NoneType' object has no attribute 'Module'
================================================================================
==================== Test output for //python/ray/tune:test_trial_runner:
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/Users/travis/miniconda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
lz4 not available, disabling sample compression. This will significantly impact RLlib performance. To install lz4, run `pip install lz4`.
Traceback (most recent call last):
File "/private/var/tmp/_bazel_travis/7c557718f3877739c657a427203800b1/execroot/com_github_ray_project_ray/bazel-out/darwin-opt/bin/python/ray/tune/test_trial_runner.runfiles/com_github_ray_project_ray/python/ray/tune/tests/test_trial_runner.py", line 6, in <module>
from ray.rllib import _register_all
File "/Users/travis/build/ray-project/ray/python/ray/rllib/__init__.py", line 60, in <module>
_register_all()
File "/Users/travis/build/ray-project/ray/python/ray/rllib/__init__.py", line 37, in _register_all
register_trainable(key, get_agent_class(key))
File "/Users/travis/build/ray-project/ray/python/ray/rllib/agents/registry.py", line 130, in get_agent_class
return _get_agent_class(alg)
File "/Users/travis/build/ray-project/ray/python/ray/rllib/agents/registry.py", line 140, in _get_agent_class
return CONTRIBUTED_ALGORITHMS[alg]()
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/registry.py", line 21, in _import_bandit_lints
from ray.rllib.contrib.bandits.agents.lin_ts import LinTSTrainer
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/__init__.py", line 1, in <module>
from ray.rllib.contrib.bandits.agents.lin_ts import LinTSTrainer
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/lin_ts.py", line 5, in <module>
from ray.rllib.contrib.bandits.agents.policy import BanditPolicy
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/agents/policy.py", line 6, in <module>
from ray.rllib.contrib.bandits.models.linear_regression import \
File "/Users/travis/build/ray-project/ray/python/ray/rllib/contrib/bandits/models/linear_regression.py", line 9, in <module>
class OnlineLinearRegression(nn.Module):
AttributeError: 'NoneType' object has no attribute 'Module'
|
AttributeError
|
def run(args, parser):
config = {}
# Load configuration from checkpoint file.
config_dir = os.path.dirname(args.checkpoint)
config_path = os.path.join(config_dir, "params.pkl")
# Try parent directory.
if not os.path.exists(config_path):
config_path = os.path.join(config_dir, "../params.pkl")
# If no pkl file found, require command line `--config`.
if not os.path.exists(config_path):
if not args.config:
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory AND no config given on command line!"
)
# Load the config from pickled.
else:
with open(config_path, "rb") as f:
config = pickle.load(f)
# Set num_workers to be at least 2.
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
# Merge with `evaluation_config`.
evaluation_config = copy.deepcopy(config.get("evaluation_config", {}))
config = merge_dicts(config, evaluation_config)
# Merge with command line `--config` settings.
config = merge_dicts(config, args.config)
if not args.env:
if not config.get("env"):
parser.error("the following arguments are required: --env")
args.env = config.get("env")
ray.init()
# Create the Trainer from config.
cls = get_trainable_cls(args.run)
agent = cls(env=args.env, config=config)
# Load state from checkpoint.
agent.restore(args.checkpoint)
num_steps = int(args.steps)
num_episodes = int(args.episodes)
# Determine the video output directory.
# Deprecated way: Use (--out|~/ray_results) + "/monitor" as dir.
video_dir = None
if args.monitor:
video_dir = os.path.join(
os.path.dirname(args.out or "") or os.path.expanduser("~/ray_results/"),
"monitor",
)
# New way: Allow user to specify a video output path.
elif args.video_dir:
video_dir = os.path.expanduser(args.video_dir)
# Do the actual rollout.
with RolloutSaver(
args.out,
args.use_shelve,
write_update_file=args.track_progress,
target_steps=num_steps,
target_episodes=num_episodes,
save_info=args.save_info,
) as saver:
rollout(
agent, args.env, num_steps, num_episodes, saver, args.no_render, video_dir
)
|
def run(args, parser):
config = {}
# Load configuration from checkpoint file.
config_dir = os.path.dirname(args.checkpoint)
config_path = os.path.join(config_dir, "params.pkl")
# Try parent directory.
if not os.path.exists(config_path):
config_path = os.path.join(config_dir, "../params.pkl")
# If no pkl file found, require command line `--config`.
if not os.path.exists(config_path):
if not args.config:
raise ValueError(
"Could not find params.pkl in either the checkpoint dir or "
"its parent directory AND no config given on command line!"
)
# Load the config from pickled.
else:
with open(config_path, "rb") as f:
config = pickle.load(f)
# Set num_workers to be at least 2.
if "num_workers" in config:
config["num_workers"] = min(2, config["num_workers"])
# Merge with `evaluation_config`.
evaluation_config = copy.deepcopy(config.get("evaluation_config", {}))
config = merge_dicts(config, evaluation_config)
# Merge with command line `--config` settings.
config = merge_dicts(config, args.config)
if not args.env:
if not config.get("env"):
parser.error("the following arguments are required: --env")
args.env = config.get("env")
ray.init()
# Create the Trainer from config.
cls = get_agent_class(args.run)
agent = cls(env=args.env, config=config)
# Load state from checkpoint.
agent.restore(args.checkpoint)
num_steps = int(args.steps)
num_episodes = int(args.episodes)
# Determine the video output directory.
# Deprecated way: Use (--out|~/ray_results) + "/monitor" as dir.
video_dir = None
if args.monitor:
video_dir = os.path.join(
os.path.dirname(args.out or "") or os.path.expanduser("~/ray_results/"),
"monitor",
)
# New way: Allow user to specify a video output path.
elif args.video_dir:
video_dir = os.path.expanduser(args.video_dir)
# Do the actual rollout.
with RolloutSaver(
args.out,
args.use_shelve,
write_update_file=args.track_progress,
target_steps=num_steps,
target_episodes=num_episodes,
save_info=args.save_info,
) as saver:
rollout(
agent, args.env, num_steps, num_episodes, saver, args.no_render, video_dir
)
|
https://github.com/ray-project/ray/issues/7757
|
#generate checkpoint
rllib train --run DQN --env CartPole-v0 --stop '{"timesteps_total": 5000}' --checkpoint-freq 1
python rollout.py PATH_TO_CHECKPOINT --run OtherDQN --episodes 10 --out rollout.pkl
2020-03-26 16:28:25,858 INFO resource_spec.py:212 -- Starting Ray with 11.62 GiB memory available for workers and up to 5.83 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-03-26 16:28:26,332 INFO services.py:1123 -- View the Ray dashboard at localhost:8265
Traceback (most recent call last):
File "rollout.py", line 475, in <module>
run(args, parser)
File "rollout.py", line 285, in run
cls = get_agent_class(args.run)
File "/home/carl/miniconda3/envs/rollout_test/lib/python3.7/site-packages/ray/rllib/agents/registry.py", line 130, in get_agent_class
return _get_agent_class(alg)
File "/home/carl/miniconda3/envs/rollout_test/lib/python3.7/site-packages/ray/rllib/agents/registry.py", line 154, in _get_agent_class
raise Exception(("Unknown algorithm {}.").format(alg))
Exception: Unknown algorithm OtherDQN.
|
Exception
|
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
q_hiddens=(256,),
dueling=False,
num_atoms=1,
use_noisy=False,
v_min=-10.0,
v_max=10.0,
sigma0=0.5,
parameter_noise=False,
):
"""Initialize variables of this model.
Extra model kwargs:
q_hiddens (list): defines size of hidden layers for the q head.
These will be used to postprocess the model output for the
purposes of computing Q values.
dueling (bool): whether to build the state value head for DDQN
num_atoms (int): if >1, enables distributional DQN
use_noisy (bool): use noisy nets
v_min (float): min value support for distributional DQN
v_max (float): max value support for distributional DQN
sigma0 (float): initial value of noisy nets
parameter_noise (bool): enable layer norm for param noise
Note that the core layers for forward() are not defined here, this
only defines the layers for the Q head. Those layers for forward()
should be defined in subclasses of DistributionalQModel.
"""
super(DistributionalQModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# setup the Q head output (i.e., model for get_q_values)
self.model_out = tf.keras.layers.Input(shape=(num_outputs,), name="model_out")
def build_action_value(model_out):
if q_hiddens:
action_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
action_out = self._noisy_layer(
"hidden_%d" % i, action_out, q_hiddens[i], sigma0
)
elif parameter_noise:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.keras.layers.LayerNormalization,
)(action_out)
else:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu, name="hidden_%d" % i
)(action_out)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model_out
if use_noisy:
action_scores = self._noisy_layer(
"output",
action_out,
self.action_space.n * num_atoms,
sigma0,
non_linear=False,
)
elif q_hiddens:
action_scores = tf.keras.layers.Dense(
units=self.action_space.n * num_atoms, activation=None
)(action_out)
else:
action_scores = model_out
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
def _layer(x):
support_logits_per_action = tf.reshape(
tensor=x, shape=(-1, self.action_space.n, num_atoms)
)
support_prob_per_action = tf.nn.softmax(
logits=support_logits_per_action
)
x = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
return [x, z, support_logits_per_action, logits, dist]
return tf.keras.layers.Lambda(_layer)(action_scores)
else:
logits = tf.expand_dims(tf.ones_like(action_scores), -1)
dist = tf.expand_dims(tf.ones_like(action_scores), -1)
return [action_scores, logits, dist]
def build_state_score(model_out):
state_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
state_out = self._noisy_layer(
"dueling_hidden_%d" % i, state_out, q_hiddens[i], sigma0
)
elif parameter_noise:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.contrib.layers.layer_norm,
)(state_out)
else:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu
)(state_out)
if use_noisy:
state_score = self._noisy_layer(
"dueling_output", state_out, num_atoms, sigma0, non_linear=False
)
else:
state_score = tf.keras.layers.Dense(units=num_atoms, activation=None)(
state_out
)
return state_score
if tf.executing_eagerly():
from tensorflow.python.ops import variable_scope
# Have to use a variable store to reuse variables in eager mode
store = variable_scope.EagerVariableStore()
# Save the scope objects, since in eager we will execute this
# path repeatedly and there is no guarantee it will always be run
# in the same original scope.
with tf.variable_scope(name + "/action_value") as action_scope:
pass
with tf.variable_scope(name + "/state_value") as state_scope:
pass
def build_action_value_in_scope(model_out):
with store.as_default():
with tf.variable_scope(action_scope, reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with store.as_default():
with tf.variable_scope(state_scope, reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
else:
def build_action_value_in_scope(model_out):
with tf.variable_scope(name + "/action_value", reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with tf.variable_scope(name + "/state_value", reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
q_out = build_action_value_in_scope(self.model_out)
self.q_value_head = tf.keras.Model(self.model_out, q_out)
self.register_variables(self.q_value_head.variables)
if dueling:
state_out = build_state_score_in_scope(self.model_out)
self.state_value_head = tf.keras.Model(self.model_out, state_out)
self.register_variables(self.state_value_head.variables)
|
def __init__(
self,
obs_space,
action_space,
num_outputs,
model_config,
name,
q_hiddens=(256,),
dueling=False,
num_atoms=1,
use_noisy=False,
v_min=-10.0,
v_max=10.0,
sigma0=0.5,
parameter_noise=False,
):
"""Initialize variables of this model.
Extra model kwargs:
q_hiddens (list): defines size of hidden layers for the q head.
These will be used to postprocess the model output for the
purposes of computing Q values.
dueling (bool): whether to build the state value head for DDQN
num_atoms (int): if >1, enables distributional DQN
use_noisy (bool): use noisy nets
v_min (float): min value support for distributional DQN
v_max (float): max value support for distributional DQN
sigma0 (float): initial value of noisy nets
parameter_noise (bool): enable layer norm for param noise
Note that the core layers for forward() are not defined here, this
only defines the layers for the Q head. Those layers for forward()
should be defined in subclasses of DistributionalQModel.
"""
super(DistributionalQModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
# setup the Q head output (i.e., model for get_q_values)
self.model_out = tf.keras.layers.Input(shape=(num_outputs,), name="model_out")
def build_action_value(model_out):
if q_hiddens:
action_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
action_out = self._noisy_layer(
"hidden_%d" % i, action_out, q_hiddens[i], sigma0
)
elif parameter_noise:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.keras.layers.LayerNormalization,
)(action_out)
else:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu, name="hidden_%d" % i
)(action_out)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model_out
if use_noisy:
action_scores = self._noisy_layer(
"output",
action_out,
self.action_space.n * num_atoms,
sigma0,
non_linear=False,
)
elif q_hiddens:
action_scores = tf.keras.layers.Dense(
units=self.action_space.n * num_atoms, activation=None
)(action_out)
else:
action_scores = model_out
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
support_logits_per_action = tf.reshape(
tensor=action_scores, shape=(-1, self.action_space.n, num_atoms)
)
support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action)
action_scores = tf.reduce_sum(
input_tensor=z * support_prob_per_action, axis=-1
)
logits = support_logits_per_action
dist = support_prob_per_action
return [action_scores, z, support_logits_per_action, logits, dist]
else:
logits = tf.expand_dims(tf.ones_like(action_scores), -1)
dist = tf.expand_dims(tf.ones_like(action_scores), -1)
return [action_scores, logits, dist]
def build_state_score(model_out):
state_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
state_out = self._noisy_layer(
"dueling_hidden_%d" % i, state_out, q_hiddens[i], sigma0
)
elif parameter_noise:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.contrib.layers.layer_norm,
)(state_out)
else:
state_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu
)(state_out)
if use_noisy:
state_score = self._noisy_layer(
"dueling_output", state_out, num_atoms, sigma0, non_linear=False
)
else:
state_score = tf.keras.layers.Dense(units=num_atoms, activation=None)(
state_out
)
return state_score
if tf.executing_eagerly():
from tensorflow.python.ops import variable_scope
# Have to use a variable store to reuse variables in eager mode
store = variable_scope.EagerVariableStore()
# Save the scope objects, since in eager we will execute this
# path repeatedly and there is no guarantee it will always be run
# in the same original scope.
with tf.variable_scope(name + "/action_value") as action_scope:
pass
with tf.variable_scope(name + "/state_value") as state_scope:
pass
def build_action_value_in_scope(model_out):
with store.as_default():
with tf.variable_scope(action_scope, reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with store.as_default():
with tf.variable_scope(state_scope, reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
else:
def build_action_value_in_scope(model_out):
with tf.variable_scope(name + "/action_value", reuse=tf.AUTO_REUSE):
return build_action_value(model_out)
def build_state_score_in_scope(model_out):
with tf.variable_scope(name + "/state_value", reuse=tf.AUTO_REUSE):
return build_state_score(model_out)
q_out = build_action_value_in_scope(self.model_out)
self.q_value_head = tf.keras.Model(self.model_out, q_out)
self.register_variables(self.q_value_head.variables)
if dueling:
state_out = build_state_score_in_scope(self.model_out)
self.state_value_head = tf.keras.Model(self.model_out, state_out)
self.register_variables(self.state_value_head.variables)
|
https://github.com/ray-project/ray/issues/7635
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 459, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 377, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 1504, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): ray::DQN.__init__() (pid=96308, ip=192.168.120.74)
File "python/ray/_raylet.pyx", line 437, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 450, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 452, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 430, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 86, in __init__
Trainer.__init__(self, config, env, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 447, in __init__
super().__init__(config, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/tune/trainable.py", line 172, in __init__
self._setup(copy.deepcopy(self.config))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 591, in _setup
self._init(self.config, self.env_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 105, in _init
self.config["num_workers"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 658, in _make_workers
logdir=self.logdir)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 60, in __init__
RolloutWorker, env_creator, policy, 0, self._local_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 262, in _make_worker
_fake_sampler=config.get("_fake_sampler", False))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 355, in __init__
self._build_policy_map(policy_dict, policy_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 820, in _build_policy_map
policy_map[name] = cls(obs_space, act_space, merged_conf)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/tf_policy_template.py", line 138, in __init__
obs_include_prev_action_reward=obs_include_prev_action_reward)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/dynamic_tf_policy.py", line 137, in __init__
self.model = make_model(self, obs_space, action_space, config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/dqn_policy.py", line 183, in build_q_model
parameter_noise=config["parameter_noise"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/models/catalog.py", line 349, in get_model_v2
name, **model_kwargs)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 185, in __init__
q_out = build_action_value_in_scope(self.model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 178, in build_action_value_in_scope
return build_action_value(model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 68, in build_action_value
"hidden_%d" % i, action_out, q_hiddens[i], sigma0)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 259, in _noisy_layer
initializer=tf.initializers.GlorotUniform())
File "/usr/local/lib/python3.7/site-packages/tensorflow_core/python/util/module_wrapper.py", line 193, in __getattr__
attr = getattr(self._tfmw_wrapped_module, name)
AttributeError: module 'tensorflow._api.v1.compat.v1.initializers' has no attribute 'GlorotUniform'
|
AttributeError
|
def build_action_value(model_out):
if q_hiddens:
action_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
action_out = self._noisy_layer(
"hidden_%d" % i, action_out, q_hiddens[i], sigma0
)
elif parameter_noise:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.keras.layers.LayerNormalization,
)(action_out)
else:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu, name="hidden_%d" % i
)(action_out)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model_out
if use_noisy:
action_scores = self._noisy_layer(
"output",
action_out,
self.action_space.n * num_atoms,
sigma0,
non_linear=False,
)
elif q_hiddens:
action_scores = tf.keras.layers.Dense(
units=self.action_space.n * num_atoms, activation=None
)(action_out)
else:
action_scores = model_out
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
def _layer(x):
support_logits_per_action = tf.reshape(
tensor=x, shape=(-1, self.action_space.n, num_atoms)
)
support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action)
x = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
return [x, z, support_logits_per_action, logits, dist]
return tf.keras.layers.Lambda(_layer)(action_scores)
else:
logits = tf.expand_dims(tf.ones_like(action_scores), -1)
dist = tf.expand_dims(tf.ones_like(action_scores), -1)
return [action_scores, logits, dist]
|
def build_action_value(model_out):
if q_hiddens:
action_out = model_out
for i in range(len(q_hiddens)):
if use_noisy:
action_out = self._noisy_layer(
"hidden_%d" % i, action_out, q_hiddens[i], sigma0
)
elif parameter_noise:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i],
activation_fn=tf.nn.relu,
normalizer_fn=tf.keras.layers.LayerNormalization,
)(action_out)
else:
action_out = tf.keras.layers.Dense(
units=q_hiddens[i], activation=tf.nn.relu, name="hidden_%d" % i
)(action_out)
else:
# Avoid postprocessing the outputs. This enables custom models
# to be used for parametric action DQN.
action_out = model_out
if use_noisy:
action_scores = self._noisy_layer(
"output",
action_out,
self.action_space.n * num_atoms,
sigma0,
non_linear=False,
)
elif q_hiddens:
action_scores = tf.keras.layers.Dense(
units=self.action_space.n * num_atoms, activation=None
)(action_out)
else:
action_scores = model_out
if num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = tf.range(num_atoms, dtype=tf.float32)
z = v_min + z * (v_max - v_min) / float(num_atoms - 1)
support_logits_per_action = tf.reshape(
tensor=action_scores, shape=(-1, self.action_space.n, num_atoms)
)
support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action)
action_scores = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
return [action_scores, z, support_logits_per_action, logits, dist]
else:
logits = tf.expand_dims(tf.ones_like(action_scores), -1)
dist = tf.expand_dims(tf.ones_like(action_scores), -1)
return [action_scores, logits, dist]
|
https://github.com/ray-project/ray/issues/7635
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 459, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 377, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 1504, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): ray::DQN.__init__() (pid=96308, ip=192.168.120.74)
File "python/ray/_raylet.pyx", line 437, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 450, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 452, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 430, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 86, in __init__
Trainer.__init__(self, config, env, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 447, in __init__
super().__init__(config, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/tune/trainable.py", line 172, in __init__
self._setup(copy.deepcopy(self.config))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 591, in _setup
self._init(self.config, self.env_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 105, in _init
self.config["num_workers"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 658, in _make_workers
logdir=self.logdir)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 60, in __init__
RolloutWorker, env_creator, policy, 0, self._local_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 262, in _make_worker
_fake_sampler=config.get("_fake_sampler", False))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 355, in __init__
self._build_policy_map(policy_dict, policy_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 820, in _build_policy_map
policy_map[name] = cls(obs_space, act_space, merged_conf)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/tf_policy_template.py", line 138, in __init__
obs_include_prev_action_reward=obs_include_prev_action_reward)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/dynamic_tf_policy.py", line 137, in __init__
self.model = make_model(self, obs_space, action_space, config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/dqn_policy.py", line 183, in build_q_model
parameter_noise=config["parameter_noise"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/models/catalog.py", line 349, in get_model_v2
name, **model_kwargs)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 185, in __init__
q_out = build_action_value_in_scope(self.model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 178, in build_action_value_in_scope
return build_action_value(model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 68, in build_action_value
"hidden_%d" % i, action_out, q_hiddens[i], sigma0)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 259, in _noisy_layer
initializer=tf.initializers.GlorotUniform())
File "/usr/local/lib/python3.7/site-packages/tensorflow_core/python/util/module_wrapper.py", line 193, in __getattr__
attr = getattr(self._tfmw_wrapped_module, name)
AttributeError: module 'tensorflow._api.v1.compat.v1.initializers' has no attribute 'GlorotUniform'
|
AttributeError
|
def _noisy_layer(self, prefix, action_in, out_size, sigma0, non_linear=True):
"""
a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x +
(b+\\epsilon_b*\\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \\sigma are trainable variables which are expected to
vanish along the training procedure
"""
in_size = int(action_in.shape[1])
epsilon_in = tf.random_normal(shape=[in_size])
epsilon_out = tf.random_normal(shape=[out_size])
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0)
)
epsilon_b = epsilon_out
sigma_w = tf.get_variable(
name=prefix + "_sigma_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-1.0 / np.sqrt(float(in_size)), maxval=1.0 / np.sqrt(float(in_size))
),
)
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
sigma_b = tf.get_variable(
name=prefix + "_sigma_b",
shape=[out_size],
dtype=tf.float32, # 0.5~GPU, 0.1~CPU
initializer=tf.constant_initializer(sigma0 / np.sqrt(float(in_size))),
)
w = tf.get_variable(
name=prefix + "_fc_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.initializers.glorot_uniform(),
)
b = tf.get_variable(
name=prefix + "_fc_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
)
action_activation = tf.keras.layers.Lambda(
lambda x: tf.matmul(x, w + sigma_w * epsilon_w) + b + sigma_b * epsilon_b
)(action_in)
if not non_linear:
return action_activation
return tf.nn.relu(action_activation)
|
def _noisy_layer(self, prefix, action_in, out_size, sigma0, non_linear=True):
"""
a common dense layer: y = w^{T}x + b
a noisy layer: y = (w + \\epsilon_w*\\sigma_w)^{T}x +
(b+\\epsilon_b*\\sigma_b)
where \epsilon are random variables sampled from factorized normal
distributions and \\sigma are trainable variables which are expected to
vanish along the training procedure
"""
in_size = int(action_in.shape[1])
epsilon_in = tf.random_normal(shape=[in_size])
epsilon_out = tf.random_normal(shape=[out_size])
epsilon_in = self._f_epsilon(epsilon_in)
epsilon_out = self._f_epsilon(epsilon_out)
epsilon_w = tf.matmul(
a=tf.expand_dims(epsilon_in, -1), b=tf.expand_dims(epsilon_out, 0)
)
epsilon_b = epsilon_out
sigma_w = tf.get_variable(
name=prefix + "_sigma_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-1.0 / np.sqrt(float(in_size)), maxval=1.0 / np.sqrt(float(in_size))
),
)
# TF noise generation can be unreliable on GPU
# If generating the noise on the CPU,
# lowering sigma0 to 0.1 may be helpful
sigma_b = tf.get_variable(
name=prefix + "_sigma_b",
shape=[out_size],
dtype=tf.float32, # 0.5~GPU, 0.1~CPU
initializer=tf.constant_initializer(sigma0 / np.sqrt(float(in_size))),
)
w = tf.get_variable(
name=prefix + "_fc_w",
shape=[in_size, out_size],
dtype=tf.float32,
initializer=tf.initializers.GlorotUniform(),
)
b = tf.get_variable(
name=prefix + "_fc_b",
shape=[out_size],
dtype=tf.float32,
initializer=tf.zeros_initializer(),
)
action_activation = tf.nn.xw_plus_b(
action_in, w + sigma_w * epsilon_w, b + sigma_b * epsilon_b
)
if not non_linear:
return action_activation
return tf.nn.relu(action_activation)
|
https://github.com/ray-project/ray/issues/7635
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 459, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 377, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.7/site-packages/ray/worker.py", line 1504, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): ray::DQN.__init__() (pid=96308, ip=192.168.120.74)
File "python/ray/_raylet.pyx", line 437, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 449, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 450, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 452, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 430, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 86, in __init__
Trainer.__init__(self, config, env, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 447, in __init__
super().__init__(config, logger_creator)
File "/usr/local/lib/python3.7/site-packages/ray/tune/trainable.py", line 172, in __init__
self._setup(copy.deepcopy(self.config))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 591, in _setup
self._init(self.config, self.env_creator)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 105, in _init
self.config["num_workers"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 658, in _make_workers
logdir=self.logdir)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 60, in __init__
RolloutWorker, env_creator, policy, 0, self._local_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/worker_set.py", line 262, in _make_worker
_fake_sampler=config.get("_fake_sampler", False))
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 355, in __init__
self._build_policy_map(policy_dict, policy_config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/evaluation/rollout_worker.py", line 820, in _build_policy_map
policy_map[name] = cls(obs_space, act_space, merged_conf)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/tf_policy_template.py", line 138, in __init__
obs_include_prev_action_reward=obs_include_prev_action_reward)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/policy/dynamic_tf_policy.py", line 137, in __init__
self.model = make_model(self, obs_space, action_space, config)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/dqn_policy.py", line 183, in build_q_model
parameter_noise=config["parameter_noise"])
File "/usr/local/lib/python3.7/site-packages/ray/rllib/models/catalog.py", line 349, in get_model_v2
name, **model_kwargs)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 185, in __init__
q_out = build_action_value_in_scope(self.model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 178, in build_action_value_in_scope
return build_action_value(model_out)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 68, in build_action_value
"hidden_%d" % i, action_out, q_hiddens[i], sigma0)
File "/usr/local/lib/python3.7/site-packages/ray/rllib/agents/dqn/distributional_q_model.py", line 259, in _noisy_layer
initializer=tf.initializers.GlorotUniform())
File "/usr/local/lib/python3.7/site-packages/tensorflow_core/python/util/module_wrapper.py", line 193, in __getattr__
attr = getattr(self._tfmw_wrapped_module, name)
AttributeError: module 'tensorflow._api.v1.compat.v1.initializers' has no attribute 'GlorotUniform'
|
AttributeError
|
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
flat_result = flatten_dict(self.last_result, delimiter="/")
scrubbed_result = {
k: value
for k, value in flat_result.items()
if type(value) in VALID_SUMMARY_TYPES
}
self._try_log_hparams(scrubbed_result)
self._file_writer.close()
|
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
scrubbed_result = {
k: value
for k, value in self.last_result.items()
if type(value) in VALID_SUMMARY_TYPES
}
self._try_log_hparams(scrubbed_result)
self._file_writer.close()
|
https://github.com/ray-project/ray/issues/7695
|
$ python ./tests/tune_done_test.py
2020-03-22 12:17:21,455 INFO resource_spec.py:212 -- Starting Ray with 10.3 GiB memory available for workers and up to 5.17 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-03-22 12:17:21,746 INFO services.py:1078 -- View the Ray dashboard at localhost:8265
== Status ==
Memory usage on this node: 17.8/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 1/16 CPUs, 0/0 GPUs, 0.0/10.3 GiB heap, 0.0/3.56 GiB objects
Result logdir: /Users/hartikainen/ray_results/done-test
Number of trials: 1 (1 RUNNING)
+---------------------------+----------+-------+-------+
| Trial name | status | loc | a/b |
|---------------------------+----------+-------+-------|
| MyTrainableClass_14fda478 | RUNNING | | |
+---------------------------+----------+-------+-------+
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: false
episode_reward_mean: 1
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 1
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 3.0994415283203125e-06
time_this_iter_s: 3.0994415283203125e-06
time_total_s: 3.0994415283203125e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 1
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: true
episode_reward_mean: 3
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 3
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 9.775161743164062e-06
time_this_iter_s: 2.86102294921875e-06
time_total_s: 9.775161743164062e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 3
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
2020-03-22 12:17:23,204 ERROR trial_runner.py:513 -- Trial MyTrainableClass_14fda478: Error processing event.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./tests/tune_done_test.py", line 24, in <module>
'b': tune.sample_from(lambda spec: spec['config']['b']['c']),
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/tune.py", line 324, in run
runner.step()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 335, in step
self._process_events() # blocking
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 444, in _process_events
self._process_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 514, in _process_trial
self._process_trial_failure(trial, traceback.format_exc())
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 580, in _process_trial_failure
trial, error=True, error_msg=error_msg)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
|
NotImplementedError
|
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
flat_params = flatten_dict(self.trial.evaluated_params)
scrubbed_params = {k: v for k, v in flat_params.items() if v is not None}
from tensorboardX.summary import hparams
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
|
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
scrubbed_params = {
k: v for k, v in self.trial.evaluated_params.items() if v is not None
}
from tensorboardX.summary import hparams
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result
)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
|
https://github.com/ray-project/ray/issues/7695
|
$ python ./tests/tune_done_test.py
2020-03-22 12:17:21,455 INFO resource_spec.py:212 -- Starting Ray with 10.3 GiB memory available for workers and up to 5.17 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-03-22 12:17:21,746 INFO services.py:1078 -- View the Ray dashboard at localhost:8265
== Status ==
Memory usage on this node: 17.8/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 1/16 CPUs, 0/0 GPUs, 0.0/10.3 GiB heap, 0.0/3.56 GiB objects
Result logdir: /Users/hartikainen/ray_results/done-test
Number of trials: 1 (1 RUNNING)
+---------------------------+----------+-------+-------+
| Trial name | status | loc | a/b |
|---------------------------+----------+-------+-------|
| MyTrainableClass_14fda478 | RUNNING | | |
+---------------------------+----------+-------+-------+
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: false
episode_reward_mean: 1
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 1
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 3.0994415283203125e-06
time_this_iter_s: 3.0994415283203125e-06
time_total_s: 3.0994415283203125e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 1
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: true
episode_reward_mean: 3
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 3
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 9.775161743164062e-06
time_this_iter_s: 2.86102294921875e-06
time_total_s: 9.775161743164062e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 3
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
2020-03-22 12:17:23,204 ERROR trial_runner.py:513 -- Trial MyTrainableClass_14fda478: Error processing event.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./tests/tune_done_test.py", line 24, in <module>
'b': tune.sample_from(lambda spec: spec['config']['b']['c']),
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/tune.py", line 324, in run
runner.step()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 335, in step
self._process_events() # blocking
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 444, in _process_events
self._process_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 514, in _process_trial
self._process_trial_failure(trial, traceback.format_exc())
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 580, in _process_trial_failure
trial, error=True, error_msg=error_msg)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
|
NotImplementedError
|
def __init__(self, config=None, logger_creator=None):
"""Initialize an Trainable.
Sets up logging and points ``self.logdir`` to a directory in which
training outputs should be placed.
Subclasses should prefer defining ``_setup()`` instead of overriding
``__init__()`` directly.
Args:
config (dict): Trainable-specific configuration data. By default
will be saved as ``self.config``.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
self._experiment_id = uuid.uuid4().hex
self.config = config or {}
trial_info = self.config.pop(TRIAL_INFO, None)
if logger_creator:
self._result_logger = logger_creator(self.config)
self._logdir = self._result_logger.logdir
else:
logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
ray.utils.try_to_create_directory(DEFAULT_RESULTS_DIR)
self._logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
self._result_logger = UnifiedLogger(self.config, self._logdir, loggers=None)
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
self._trial_info = trial_info
start_time = time.time()
self._setup(copy.deepcopy(self.config))
setup_time = time.time() - start_time
if setup_time > SETUP_TIME_THRESHOLD:
logger.info(
"_setup took {:.3f} seconds. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(setup_time)
)
self._local_ip = self.get_current_ip()
log_sys_usage = self.config.get("log_sys_usage", False)
self._monitor = UtilMonitor(start=log_sys_usage)
|
def __init__(self, config=None, logger_creator=None):
"""Initialize an Trainable.
Sets up logging and points ``self.logdir`` to a directory in which
training outputs should be placed.
Subclasses should prefer defining ``_setup()`` instead of overriding
``__init__()`` directly.
Args:
config (dict): Trainable-specific configuration data. By default
will be saved as ``self.config``.
logger_creator (func): Function that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
"""
self._experiment_id = uuid.uuid4().hex
self.config = config or {}
trial_info = self.config.pop(TRIAL_INFO, None)
if logger_creator:
self._result_logger = logger_creator(self.config)
self._logdir = self._result_logger.logdir
else:
logdir_prefix = datetime.today().strftime("%Y-%m-%d_%H-%M-%S")
ray.utils.try_to_create_directory(DEFAULT_RESULTS_DIR)
self._logdir = tempfile.mkdtemp(prefix=logdir_prefix, dir=DEFAULT_RESULTS_DIR)
self._result_logger = UnifiedLogger(self.config, self._logdir, loggers=None)
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
self._trial_info = trial_info
start_time = time.time()
self._setup(copy.deepcopy(self.config))
setup_time = time.time() - start_time
if setup_time > SETUP_TIME_THRESHOLD:
logger.info(
"_setup took {:.3f} seconds. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(setup_time)
)
self._local_ip = ray.services.get_node_ip_address()
log_sys_usage = self.config.get("log_sys_usage", False)
self._monitor = UtilMonitor(start=log_sys_usage)
|
https://github.com/ray-project/ray/issues/7695
|
$ python ./tests/tune_done_test.py
2020-03-22 12:17:21,455 INFO resource_spec.py:212 -- Starting Ray with 10.3 GiB memory available for workers and up to 5.17 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-03-22 12:17:21,746 INFO services.py:1078 -- View the Ray dashboard at localhost:8265
== Status ==
Memory usage on this node: 17.8/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 1/16 CPUs, 0/0 GPUs, 0.0/10.3 GiB heap, 0.0/3.56 GiB objects
Result logdir: /Users/hartikainen/ray_results/done-test
Number of trials: 1 (1 RUNNING)
+---------------------------+----------+-------+-------+
| Trial name | status | loc | a/b |
|---------------------------+----------+-------+-------|
| MyTrainableClass_14fda478 | RUNNING | | |
+---------------------------+----------+-------+-------+
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: false
episode_reward_mean: 1
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 1
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 3.0994415283203125e-06
time_this_iter_s: 3.0994415283203125e-06
time_total_s: 3.0994415283203125e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 1
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: true
episode_reward_mean: 3
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 3
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 9.775161743164062e-06
time_this_iter_s: 2.86102294921875e-06
time_total_s: 9.775161743164062e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 3
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
2020-03-22 12:17:23,204 ERROR trial_runner.py:513 -- Trial MyTrainableClass_14fda478: Error processing event.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./tests/tune_done_test.py", line 24, in <module>
'b': tune.sample_from(lambda spec: spec['config']['b']['c']),
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/tune.py", line 324, in run
runner.step()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 335, in step
self._process_events() # blocking
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 444, in _process_events
self._process_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 514, in _process_trial
self._process_trial_failure(trial, traceback.format_exc())
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 580, in _process_trial_failure
trial, error=True, error_msg=error_msg)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
|
NotImplementedError
|
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``_restore()`` instead to restore state.
This method restores additional metadata saved with the checkpoint.
"""
with open(checkpoint_path + ".tune_metadata", "rb") as f:
metadata = pickle.load(f)
self._experiment_id = metadata["experiment_id"]
self._iteration = metadata["iteration"]
self._timesteps_total = metadata["timesteps_total"]
self._time_total = metadata["time_total"]
self._episodes_total = metadata["episodes_total"]
saved_as_dict = metadata["saved_as_dict"]
if saved_as_dict:
with open(checkpoint_path, "rb") as loaded_state:
checkpoint_dict = pickle.load(loaded_state)
checkpoint_dict.update(tune_checkpoint_path=checkpoint_path)
self._restore(checkpoint_dict)
else:
self._restore(checkpoint_path)
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = True
logger.info(
"Restored on %s from checkpoint: %s", self.get_current_ip(), checkpoint_path
)
state = {
"_iteration": self._iteration,
"_timesteps_total": self._timesteps_total,
"_time_total": self._time_total,
"_episodes_total": self._episodes_total,
}
logger.info("Current state after restoring: %s", state)
|
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
Subclasses should override ``_restore()`` instead to restore state.
This method restores additional metadata saved with the checkpoint.
"""
with open(checkpoint_path + ".tune_metadata", "rb") as f:
metadata = pickle.load(f)
self._experiment_id = metadata["experiment_id"]
self._iteration = metadata["iteration"]
self._timesteps_total = metadata["timesteps_total"]
self._time_total = metadata["time_total"]
self._episodes_total = metadata["episodes_total"]
saved_as_dict = metadata["saved_as_dict"]
if saved_as_dict:
with open(checkpoint_path, "rb") as loaded_state:
checkpoint_dict = pickle.load(loaded_state)
checkpoint_dict.update(tune_checkpoint_path=checkpoint_path)
self._restore(checkpoint_dict)
else:
self._restore(checkpoint_path)
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = True
logger.info(
"Restored on %s from checkpoint: %s", self.current_ip(), checkpoint_path
)
state = {
"_iteration": self._iteration,
"_timesteps_total": self._timesteps_total,
"_time_total": self._time_total,
"_episodes_total": self._episodes_total,
}
logger.info("Current state after restoring: %s", state)
|
https://github.com/ray-project/ray/issues/7695
|
$ python ./tests/tune_done_test.py
2020-03-22 12:17:21,455 INFO resource_spec.py:212 -- Starting Ray with 10.3 GiB memory available for workers and up to 5.17 GiB for objects. You can adjust these settings with ray.init(memory=<bytes>, object_store_memory=<bytes>).
2020-03-22 12:17:21,746 INFO services.py:1078 -- View the Ray dashboard at localhost:8265
== Status ==
Memory usage on this node: 17.8/32.0 GiB
Using FIFO scheduling algorithm.
Resources requested: 1/16 CPUs, 0/0 GPUs, 0.0/10.3 GiB heap, 0.0/3.56 GiB objects
Result logdir: /Users/hartikainen/ray_results/done-test
Number of trials: 1 (1 RUNNING)
+---------------------------+----------+-------+-------+
| Trial name | status | loc | a/b |
|---------------------------+----------+-------+-------|
| MyTrainableClass_14fda478 | RUNNING | | |
+---------------------------+----------+-------+-------+
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: false
episode_reward_mean: 1
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 1
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 3.0994415283203125e-06
time_this_iter_s: 3.0994415283203125e-06
time_total_s: 3.0994415283203125e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 1
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
Result for MyTrainableClass_14fda478:
date: 2020-03-22_12-17-23
done: true
episode_reward_mean: 3
experiment_id: 3549c7334c884d3abc309232da4f6679
experiment_tag: '0_b={''d'': ''4''}'
hostname: catz-48fe.stcatz.ox.ac.uk
iterations_since_restore: 3
node_ip: 129.67.48.254
pid: 73820
time_since_restore: 9.775161743164062e-06
time_this_iter_s: 2.86102294921875e-06
time_total_s: 9.775161743164062e-06
timestamp: 1584879443
timesteps_since_restore: 0
training_iteration: 3
trial_id: 14fda478
what:
'1': '2'
'3': 4
'5':
'6': 4
2020-03-22 12:17:23,204 ERROR trial_runner.py:513 -- Trial MyTrainableClass_14fda478: Error processing event.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
Traceback (most recent call last):
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 511, in _process_trial
self._execute_action(trial, decision)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 595, in _execute_action
self.trial_executor.stop_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "./tests/tune_done_test.py", line 24, in <module>
'b': tune.sample_from(lambda spec: spec['config']['b']['c']),
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/tune.py", line 324, in run
runner.step()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 335, in step
self._process_events() # blocking
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 444, in _process_events
self._process_trial(trial)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 514, in _process_trial
self._process_trial_failure(trial, traceback.format_exc())
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 580, in _process_trial_failure
trial, error=True, error_msg=error_msg)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 263, in stop_trial
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 204, in _stop_trial
trial.close_logger()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/trial.py", line 315, in close_logger
self.result_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 305, in close
_logger.close()
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 233, in close
self._try_log_hparams(self.last_result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/ray/tune/logger.py", line 244, in _try_log_hparams
hparam_dict=scrubbed_params, metric_dict=result)
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/summary.py", line 102, in hparams
v = make_np(v)[0]
File "/Users/hartikainen/conda/envs/softlearning-tf2/lib/python3.7/site-packages/tensorboardX/x2num.py", line 34, in make_np
'Got {}, but expected numpy array or torch tensor.'.format(type(x)))
NotImplementedError: Got <class 'dict'>, but expected numpy array or torch tensor.
|
NotImplementedError
|
def _actor_table(self, actor_id):
"""Fetch and parse the actor table information for a single actor ID.
Args:
actor_id: A actor ID to get information about.
Returns:
A dictionary with information about the actor ID in question.
"""
assert isinstance(actor_id, ray.ActorID)
message = self.redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("ACTOR"), "", actor_id.binary()
)
if message is None:
return {}
gcs_entries = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entries.entries) > 0
actor_table_data = gcs_utils.ActorTableData.FromString(gcs_entries.entries[-1])
actor_info = {
"ActorID": binary_to_hex(actor_table_data.actor_id),
"JobID": binary_to_hex(actor_table_data.job_id),
"Address": {
"IPAddress": actor_table_data.address.ip_address,
"Port": actor_table_data.address.port,
},
"OwnerAddress": {
"IPAddress": actor_table_data.owner_address.ip_address,
"Port": actor_table_data.owner_address.port,
},
"IsDirectCall": actor_table_data.is_direct_call,
"State": actor_table_data.state,
"Timestamp": actor_table_data.timestamp,
}
return actor_info
|
def _actor_table(self, actor_id):
"""Fetch and parse the actor table information for a single actor ID.
Args:
actor_id: A actor ID to get information about.
Returns:
A dictionary with information about the actor ID in question.
"""
assert isinstance(actor_id, ray.ActorID)
message = self.redis_client.execute_command(
"RAY.TABLE_LOOKUP", gcs_utils.TablePrefix.Value("ACTOR"), "", actor_id.binary()
)
if message is None:
return {}
gcs_entries = gcs_utils.GcsEntry.FromString(message)
assert len(gcs_entries.entries) == 1
actor_table_data = gcs_utils.ActorTableData.FromString(gcs_entries.entries[0])
actor_info = {
"ActorID": binary_to_hex(actor_table_data.actor_id),
"JobID": binary_to_hex(actor_table_data.job_id),
"Address": {
"IPAddress": actor_table_data.address.ip_address,
"Port": actor_table_data.address.port,
},
"OwnerAddress": {
"IPAddress": actor_table_data.owner_address.ip_address,
"Port": actor_table_data.owner_address.port,
},
"IsDirectCall": actor_table_data.is_direct_call,
"State": actor_table_data.state,
"Timestamp": actor_table_data.timestamp,
}
return actor_info
|
https://github.com/ray-project/ray/issues/7310
|
Traceback (most recent call last):
File "/Users/rkn/opt/anaconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/rkn/opt/anaconda3/lib/python3.7/site-packages/ray/dashboard/dashboard.py", line 546, in run
current_actor_table = ray.actors()
File "/Users/rkn/opt/anaconda3/lib/python3.7/site-packages/ray/state.py", line 1133, in actors
return state.actor_table(actor_id=actor_id)
File "/Users/rkn/opt/anaconda3/lib/python3.7/site-packages/ray/state.py", line 369, in actor_table
ray.ActorID(actor_id_binary))
File "/Users/rkn/opt/anaconda3/lib/python3.7/site-packages/ray/state.py", line 321, in _actor_table
assert len(gcs_entries.entries) == 1
AssertionError
|
AssertionError
|
def build_torch_policy(
name,
loss_fn,
get_default_config=None,
stats_fn=None,
postprocess_fn=None,
extra_action_out_fn=None,
extra_grad_process_fn=None,
optimizer_fn=None,
before_init=None,
after_init=None,
make_model_and_action_dist=None,
mixins=None,
):
"""Helper function for creating a torch policy at runtime.
Arguments:
name (str): name of the policy (e.g., "PPOTorchPolicy")
loss_fn (func): function that returns a loss tensor as arguments
(policy, model, dist_class, train_batch)
get_default_config (func): optional function that returns the default
config to merge with any overrides
stats_fn (func): optional function that returns a dict of
values given the policy and batch input tensors
postprocess_fn (func): optional experience postprocessing function
that takes the same args as Policy.postprocess_trajectory()
extra_action_out_fn (func): optional function that returns
a dict of extra values to include in experiences
extra_grad_process_fn (func): optional function that is called after
gradients are computed and returns processing info
optimizer_fn (func): optional function that returns a torch optimizer
given the policy and config
before_init (func): optional function to run at the beginning of
policy init that takes the same arguments as the policy constructor
after_init (func): optional function to run at the end of policy init
that takes the same arguments as the policy constructor
make_model_and_action_dist (func): optional func that takes the same
arguments as policy init and returns a tuple of model instance and
torch action distribution class. If not specified, the default
model and action dist from the catalog will be used
mixins (list): list of any class mixins for the returned policy class.
These mixins will be applied in order and will have higher
precedence than the TorchPolicy class
Returns:
a TorchPolicy instance that uses the specified args
"""
original_kwargs = locals().copy()
base = add_mixins(TorchPolicy, mixins)
class policy_cls(base):
def __init__(self, obs_space, action_space, config):
if get_default_config:
config = dict(get_default_config(), **config)
self.config = config
if before_init:
before_init(self, obs_space, action_space, config)
if make_model_and_action_dist:
self.model, self.dist_class = make_model_and_action_dist(
self, obs_space, action_space, config
)
# Make sure, we passed in a correct Model factory.
assert isinstance(self.model, TorchModelV2), (
"ERROR: TorchPolicy::make_model_and_action_dist must "
"return a TorchModelV2 object!"
)
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework="torch"
)
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="torch",
)
TorchPolicy.__init__(
self,
obs_space,
action_space,
config,
self.model,
loss_fn,
self.dist_class,
)
if after_init:
after_init(self, obs_space, action_space, config)
@override(Policy)
def postprocess_trajectory(
self, sample_batch, other_agent_batches=None, episode=None
):
if not postprocess_fn:
return sample_batch
# Do all post-processing always with no_grad().
# Not using this here will introduce a memory leak (issue #6962).
with torch.no_grad():
return postprocess_fn(
self,
convert_to_non_torch_type(sample_batch),
convert_to_non_torch_type(other_agent_batches),
episode,
)
@override(TorchPolicy)
def extra_grad_process(self):
if extra_grad_process_fn:
return extra_grad_process_fn(self)
else:
return TorchPolicy.extra_grad_process(self)
@override(TorchPolicy)
def extra_action_out(self, input_dict, state_batches, model, action_dist=None):
with torch.no_grad():
if extra_action_out_fn:
stats_dict = extra_action_out_fn(
self, input_dict, state_batches, model, action_dist
)
else:
stats_dict = TorchPolicy.extra_action_out(
self, input_dict, state_batches, model, action_dist
)
return convert_to_non_torch_type(stats_dict)
@override(TorchPolicy)
def optimizer(self):
if optimizer_fn:
return optimizer_fn(self, self.config)
else:
return TorchPolicy.optimizer(self)
@override(TorchPolicy)
def extra_grad_info(self, train_batch):
with torch.no_grad():
if stats_fn:
stats_dict = stats_fn(self, train_batch)
else:
stats_dict = TorchPolicy.extra_grad_info(self, train_batch)
return convert_to_non_torch_type(stats_dict)
def with_updates(**overrides):
return build_torch_policy(**dict(original_kwargs, **overrides))
policy_cls.with_updates = staticmethod(with_updates)
policy_cls.__name__ = name
policy_cls.__qualname__ = name
return policy_cls
|
def build_torch_policy(
name,
loss_fn,
get_default_config=None,
stats_fn=None,
postprocess_fn=None,
extra_action_out_fn=None,
extra_grad_process_fn=None,
optimizer_fn=None,
before_init=None,
after_init=None,
make_model_and_action_dist=None,
mixins=None,
):
"""Helper function for creating a torch policy at runtime.
Arguments:
name (str): name of the policy (e.g., "PPOTorchPolicy")
loss_fn (func): function that returns a loss tensor as arguments
(policy, model, dist_class, train_batch)
get_default_config (func): optional function that returns the default
config to merge with any overrides
stats_fn (func): optional function that returns a dict of
values given the policy and batch input tensors
postprocess_fn (func): optional experience postprocessing function
that takes the same args as Policy.postprocess_trajectory()
extra_action_out_fn (func): optional function that returns
a dict of extra values to include in experiences
extra_grad_process_fn (func): optional function that is called after
gradients are computed and returns processing info
optimizer_fn (func): optional function that returns a torch optimizer
given the policy and config
before_init (func): optional function to run at the beginning of
policy init that takes the same arguments as the policy constructor
after_init (func): optional function to run at the end of policy init
that takes the same arguments as the policy constructor
make_model_and_action_dist (func): optional func that takes the same
arguments as policy init and returns a tuple of model instance and
torch action distribution class. If not specified, the default
model and action dist from the catalog will be used
mixins (list): list of any class mixins for the returned policy class.
These mixins will be applied in order and will have higher
precedence than the TorchPolicy class
Returns:
a TorchPolicy instance that uses the specified args
"""
original_kwargs = locals().copy()
base = add_mixins(TorchPolicy, mixins)
class policy_cls(base):
def __init__(self, obs_space, action_space, config):
if get_default_config:
config = dict(get_default_config(), **config)
self.config = config
if before_init:
before_init(self, obs_space, action_space, config)
if make_model_and_action_dist:
self.model, self.dist_class = make_model_and_action_dist(
self, obs_space, action_space, config
)
# Make sure, we passed in a correct Model factory.
assert isinstance(self.model, TorchModelV2), (
"ERROR: TorchPolicy::make_model_and_action_dist must "
"return a TorchModelV2 object!"
)
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"], framework="torch"
)
self.model = ModelCatalog.get_model_v2(
obs_space,
action_space,
logit_dim,
self.config["model"],
framework="torch",
)
TorchPolicy.__init__(
self,
obs_space,
action_space,
config,
self.model,
loss_fn,
self.dist_class,
)
if after_init:
after_init(self, obs_space, action_space, config)
@override(Policy)
def postprocess_trajectory(
self, sample_batch, other_agent_batches=None, episode=None
):
if not postprocess_fn:
return sample_batch
# Do all post-processing always with no_grad().
# Not using this here will introduce a memory leak (issue #6962).
with torch.no_grad():
return postprocess_fn(self, sample_batch, other_agent_batches, episode)
@override(TorchPolicy)
def extra_grad_process(self):
if extra_grad_process_fn:
return extra_grad_process_fn(self)
else:
return TorchPolicy.extra_grad_process(self)
@override(TorchPolicy)
def extra_action_out(self, input_dict, state_batches, model, action_dist=None):
with torch.no_grad():
if extra_action_out_fn:
stats_dict = extra_action_out_fn(
self, input_dict, state_batches, model, action_dist
)
else:
stats_dict = TorchPolicy.extra_action_out(
self, input_dict, state_batches, model, action_dist
)
return convert_to_non_torch_type(stats_dict)
@override(TorchPolicy)
def optimizer(self):
if optimizer_fn:
return optimizer_fn(self, self.config)
else:
return TorchPolicy.optimizer(self)
@override(TorchPolicy)
def extra_grad_info(self, train_batch):
with torch.no_grad():
if stats_fn:
stats_dict = stats_fn(self, train_batch)
else:
stats_dict = TorchPolicy.extra_grad_info(self, train_batch)
return convert_to_non_torch_type(stats_dict)
def with_updates(**overrides):
return build_torch_policy(**dict(original_kwargs, **overrides))
policy_cls.with_updates = staticmethod(with_updates)
policy_cls.__name__ = name
policy_cls.__qualname__ = name
return policy_cls
|
https://github.com/ray-project/ray/issues/7421
|
Traceback (most recent call last):
File "issue_serving_server.py", line 71, in <module>
ppo.train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 497, in train
raise e
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 483, in train
result = Trainable.train(self)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/tune/trainable.py", line 254, in train
result = self._train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 133, in _train
fetches = self.optimizer.step()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/optimizers/sync_samples_optimizer.py", line 62, in step
samples.append(self.workers.local_worker().sample())
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/rollout_worker.py", line 488, in sample
batches = [self.input_reader.next()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 52, in next
batches = [self.get_data()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 95, in get_data
item = next(self.rollout_provider)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 315, in _env_runner
soft_horizon, no_done_at_end)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 461, in _process_observations
episode.batch_builder.postprocess_batch_so_far(episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sample_batch_builder.py", line 152, in postprocess_batch_so_far
pre_batch, other_batches, episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/policy/torch_policy_template.py", line 109, in postprocess_trajectory
episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/ppo/ppo_tf_policy.py", line 191, in postprocess_ppo_gae
use_gae=policy.config["use_gae"])
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/postprocessing.py", line 45, in compute_advantages
traj[key] = np.stack(rollout[key])
File "<__array_function__ internals>", line 6, in stack
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in stack
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in <listcomp>
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/_asarray.py", line 138, in asanyarray
return array(a, dtype, copy=False, order=order, subok=True)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/torch/tensor.py", line 486, in __array__
return self.numpy()
TypeError: can't convert CUDA tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def postprocess_trajectory(self, sample_batch, other_agent_batches=None, episode=None):
if not postprocess_fn:
return sample_batch
# Do all post-processing always with no_grad().
# Not using this here will introduce a memory leak (issue #6962).
with torch.no_grad():
return postprocess_fn(
self,
convert_to_non_torch_type(sample_batch),
convert_to_non_torch_type(other_agent_batches),
episode,
)
|
def postprocess_trajectory(self, sample_batch, other_agent_batches=None, episode=None):
if not postprocess_fn:
return sample_batch
# Do all post-processing always with no_grad().
# Not using this here will introduce a memory leak (issue #6962).
with torch.no_grad():
return postprocess_fn(self, sample_batch, other_agent_batches, episode)
|
https://github.com/ray-project/ray/issues/7421
|
Traceback (most recent call last):
File "issue_serving_server.py", line 71, in <module>
ppo.train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 497, in train
raise e
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 483, in train
result = Trainable.train(self)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/tune/trainable.py", line 254, in train
result = self._train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 133, in _train
fetches = self.optimizer.step()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/optimizers/sync_samples_optimizer.py", line 62, in step
samples.append(self.workers.local_worker().sample())
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/rollout_worker.py", line 488, in sample
batches = [self.input_reader.next()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 52, in next
batches = [self.get_data()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 95, in get_data
item = next(self.rollout_provider)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 315, in _env_runner
soft_horizon, no_done_at_end)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 461, in _process_observations
episode.batch_builder.postprocess_batch_so_far(episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sample_batch_builder.py", line 152, in postprocess_batch_so_far
pre_batch, other_batches, episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/policy/torch_policy_template.py", line 109, in postprocess_trajectory
episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/ppo/ppo_tf_policy.py", line 191, in postprocess_ppo_gae
use_gae=policy.config["use_gae"])
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/postprocessing.py", line 45, in compute_advantages
traj[key] = np.stack(rollout[key])
File "<__array_function__ internals>", line 6, in stack
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in stack
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in <listcomp>
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/_asarray.py", line 138, in asanyarray
return array(a, dtype, copy=False, order=order, subok=True)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/torch/tensor.py", line 486, in __array__
return self.numpy()
TypeError: can't convert CUDA tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def convert_to_non_torch_type(stats):
"""Converts values in stats_dict to non-Tensor numpy or python types.
Args:
stats (any): Any (possibly nested) struct, the values in which will be
converted and returned as a new struct with all torch tensors
being converted to numpy types.
Returns:
dict: A new dict with the same structure as stats_dict, but with all
values converted to non-torch Tensor types.
"""
# The mapping function used to numpyize torch Tensors.
def mapping(item):
if isinstance(item, torch.Tensor):
return item.cpu().item() if len(item.size()) == 0 else item.cpu().numpy()
else:
return item
return tree.map_structure(mapping, stats)
|
def convert_to_non_torch_type(stats_dict):
"""Converts values in stats_dict to non-Tensor numpy or python types.
Args:
stats_dict (dict): A flat key, value dict, the values of which will be
converted and returned as a new dict.
Returns:
dict: A new dict with the same structure as stats_dict, but with all
values converted to non-torch Tensor types.
"""
ret = {}
for k, v in stats_dict.items():
if isinstance(v, torch.Tensor):
ret[k] = v.cpu().item() if len(v.size()) == 0 else v.cpu().numpy()
else:
ret[k] = v
return ret
|
https://github.com/ray-project/ray/issues/7421
|
Traceback (most recent call last):
File "issue_serving_server.py", line 71, in <module>
ppo.train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 497, in train
raise e
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 483, in train
result = Trainable.train(self)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/tune/trainable.py", line 254, in train
result = self._train()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 133, in _train
fetches = self.optimizer.step()
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/optimizers/sync_samples_optimizer.py", line 62, in step
samples.append(self.workers.local_worker().sample())
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/rollout_worker.py", line 488, in sample
batches = [self.input_reader.next()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 52, in next
batches = [self.get_data()]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 95, in get_data
item = next(self.rollout_provider)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 315, in _env_runner
soft_horizon, no_done_at_end)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sampler.py", line 461, in _process_observations
episode.batch_builder.postprocess_batch_so_far(episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/sample_batch_builder.py", line 152, in postprocess_batch_so_far
pre_batch, other_batches, episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/policy/torch_policy_template.py", line 109, in postprocess_trajectory
episode)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/agents/ppo/ppo_tf_policy.py", line 191, in postprocess_ppo_gae
use_gae=policy.config["use_gae"])
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/ray/rllib/evaluation/postprocessing.py", line 45, in compute_advantages
traj[key] = np.stack(rollout[key])
File "<__array_function__ internals>", line 6, in stack
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in stack
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/shape_base.py", line 420, in <listcomp>
arrays = [asanyarray(arr) for arr in arrays]
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/numpy/core/_asarray.py", line 138, in asanyarray
return array(a, dtype, copy=False, order=order, subok=True)
File "/auto/homes/jb2270/master-project/venv_ray_master/lib/python3.6/site-packages/torch/tensor.py", line 486, in __array__
return self.numpy()
TypeError: can't convert CUDA tensor to numpy. Use Tensor.cpu() to copy the tensor to host memory first.
|
TypeError
|
def compute_action(self, observation, *args, **kwargs):
return self.policy.compute(observation, update=True)[0]
|
def compute_action(self, observation):
return self.policy.compute(observation, update=True)[0]
|
https://github.com/ray-project/ray/issues/7136
|
2020-02-12 05:41:30,922 INFO trainable.py:423 -- Current state after restoring: {'_iteration': 50, '_timesteps_total': 16979666, '_time_total': 2578.4048051834106, '_episodes_total': None}
Traceback (most recent call last):
File "/home/andriy/miniconda3/envs/myproj/bin/rllib", line 8, in <module>
sys.exit(cli())
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/scripts.py", line 36, in cli
rollout.run(options, rollout_parser)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 265, in run
args.no_render, args.monitor)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 364, in rollout
prev_action=prev_actions[agent_id],
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 272, in __missing__
self[key] = value = self.default_factory(key)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 340, in <lambda>
lambda agent_id: action_init[mapping_cache[agent_id]])
NameError: free variable 'action_init' referenced before assignment in enclosing scope
|
NameError
|
def compute_action(self, observation, *args, **kwargs):
return self.policy.compute(observation, update=False)[0]
|
def compute_action(self, observation):
return self.policy.compute(observation, update=False)[0]
|
https://github.com/ray-project/ray/issues/7136
|
2020-02-12 05:41:30,922 INFO trainable.py:423 -- Current state after restoring: {'_iteration': 50, '_timesteps_total': 16979666, '_time_total': 2578.4048051834106, '_episodes_total': None}
Traceback (most recent call last):
File "/home/andriy/miniconda3/envs/myproj/bin/rllib", line 8, in <module>
sys.exit(cli())
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/scripts.py", line 36, in cli
rollout.run(options, rollout_parser)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 265, in run
args.no_render, args.monitor)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 364, in rollout
prev_action=prev_actions[agent_id],
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 272, in __missing__
self[key] = value = self.default_factory(key)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 340, in <lambda>
lambda agent_id: action_init[mapping_cache[agent_id]])
NameError: free variable 'action_init' referenced before assignment in enclosing scope
|
NameError
|
def rollout(
agent,
env_name,
num_steps,
num_episodes=0,
saver=None,
no_render=True,
video_dir=None,
):
policy_agent_mapping = default_policy_agent_mapping
if saver is None:
saver = RolloutSaver()
if hasattr(agent, "workers") and isinstance(agent.workers, WorkerSet):
env = agent.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if agent.workers.local_worker().multiagent:
policy_agent_mapping = agent.config["multiagent"]["policy_mapping_fn"]
policy_map = agent.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
else:
env = gym.make(env_name)
multiagent = False
try:
policy_map = {DEFAULT_POLICY_ID: agent.policy}
except AttributeError:
raise AttributeError(
"Agent ({}) does not have a `policy` property! This is needed "
"for performing (trained) agent rollouts.".format(agent)
)
use_lstm = {DEFAULT_POLICY_ID: False}
action_init = {
p: _flatten_action(m.action_space.sample()) for p, m in policy_map.items()
}
# If monitoring has been requested, manually wrap our environment with a
# gym monitor, which is set to record every episode.
if video_dir:
env = gym.wrappers.Monitor(
env=env, directory=video_dir, video_callable=lambda x: True, force=True
)
steps = 0
episodes = 0
while keep_going(steps, num_steps, episodes, num_episodes):
mapping_cache = {} # in case policy_agent_mapping is stochastic
saver.begin_rollout()
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]]
)
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]]
)
prev_rewards = collections.defaultdict(lambda: 0.0)
done = False
reward_total = 0.0
while not done and keep_going(steps, num_steps, episodes, num_episodes):
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id)
)
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = agent.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
agent_states[agent_id] = p_state
else:
a_action = agent.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
a_action = _flatten_action(a_action) # tuple actions
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, info = env.step(action)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
if not no_render:
env.render()
saver.append_step(obs, action, next_obs, reward, done, info)
steps += 1
obs = next_obs
saver.end_rollout()
print("Episode #{}: reward: {}".format(episodes, reward_total))
if done:
episodes += 1
|
def rollout(
agent,
env_name,
num_steps,
num_episodes=0,
saver=None,
no_render=True,
video_dir=None,
):
policy_agent_mapping = default_policy_agent_mapping
if saver is None:
saver = RolloutSaver()
if hasattr(agent, "workers"):
env = agent.workers.local_worker().env
multiagent = isinstance(env, MultiAgentEnv)
if agent.workers.local_worker().multiagent:
policy_agent_mapping = agent.config["multiagent"]["policy_mapping_fn"]
policy_map = agent.workers.local_worker().policy_map
state_init = {p: m.get_initial_state() for p, m in policy_map.items()}
use_lstm = {p: len(s) > 0 for p, s in state_init.items()}
action_init = {
p: _flatten_action(m.action_space.sample()) for p, m in policy_map.items()
}
else:
env = gym.make(env_name)
multiagent = False
use_lstm = {DEFAULT_POLICY_ID: False}
# If monitoring has been requested, manually wrap our environment with a
# gym monitor, which is set to record every episode.
if video_dir:
env = gym.wrappers.Monitor(
env=env, directory=video_dir, video_callable=lambda x: True, force=True
)
steps = 0
episodes = 0
while keep_going(steps, num_steps, episodes, num_episodes):
mapping_cache = {} # in case policy_agent_mapping is stochastic
saver.begin_rollout()
obs = env.reset()
agent_states = DefaultMapping(
lambda agent_id: state_init[mapping_cache[agent_id]]
)
prev_actions = DefaultMapping(
lambda agent_id: action_init[mapping_cache[agent_id]]
)
prev_rewards = collections.defaultdict(lambda: 0.0)
done = False
reward_total = 0.0
while not done and keep_going(steps, num_steps, episodes, num_episodes):
multi_obs = obs if multiagent else {_DUMMY_AGENT_ID: obs}
action_dict = {}
for agent_id, a_obs in multi_obs.items():
if a_obs is not None:
policy_id = mapping_cache.setdefault(
agent_id, policy_agent_mapping(agent_id)
)
p_use_lstm = use_lstm[policy_id]
if p_use_lstm:
a_action, p_state, _ = agent.compute_action(
a_obs,
state=agent_states[agent_id],
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
agent_states[agent_id] = p_state
else:
a_action = agent.compute_action(
a_obs,
prev_action=prev_actions[agent_id],
prev_reward=prev_rewards[agent_id],
policy_id=policy_id,
)
a_action = _flatten_action(a_action) # tuple actions
action_dict[agent_id] = a_action
prev_actions[agent_id] = a_action
action = action_dict
action = action if multiagent else action[_DUMMY_AGENT_ID]
next_obs, reward, done, info = env.step(action)
if multiagent:
for agent_id, r in reward.items():
prev_rewards[agent_id] = r
else:
prev_rewards[_DUMMY_AGENT_ID] = reward
if multiagent:
done = done["__all__"]
reward_total += sum(reward.values())
else:
reward_total += reward
if not no_render:
env.render()
saver.append_step(obs, action, next_obs, reward, done, info)
steps += 1
obs = next_obs
saver.end_rollout()
print("Episode #{}: reward: {}".format(episodes, reward_total))
if done:
episodes += 1
|
https://github.com/ray-project/ray/issues/7136
|
2020-02-12 05:41:30,922 INFO trainable.py:423 -- Current state after restoring: {'_iteration': 50, '_timesteps_total': 16979666, '_time_total': 2578.4048051834106, '_episodes_total': None}
Traceback (most recent call last):
File "/home/andriy/miniconda3/envs/myproj/bin/rllib", line 8, in <module>
sys.exit(cli())
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/scripts.py", line 36, in cli
rollout.run(options, rollout_parser)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 265, in run
args.no_render, args.monitor)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 364, in rollout
prev_action=prev_actions[agent_id],
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 272, in __missing__
self[key] = value = self.default_factory(key)
File "/home/andriy/miniconda3/envs/myproj/lib/python3.7/site-packages/ray/rllib/rollout.py", line 340, in <lambda>
lambda agent_id: action_init[mapping_cache[agent_id]])
NameError: free variable 'action_init' referenced before assignment in enclosing scope
|
NameError
|
def _read_utilization(self):
with self.lock:
if psutil is not None:
self.values["cpu_util_percent"].append(
float(psutil.cpu_percent(interval=None))
)
self.values["ram_util_percent"].append(
float(getattr(psutil.virtual_memory(), "percent"))
)
if GPUtil is not None:
gpu_list = []
try:
gpu_list = GPUtil.getGPUs()
except Exception:
logger.debug("GPUtil failed to retrieve GPUs.")
for gpu in gpu_list:
self.values["gpu_util_percent" + str(gpu.id)].append(float(gpu.load))
self.values["vram_util_percent" + str(gpu.id)].append(
float(gpu.memoryUtil)
)
|
def _read_utilization(self):
with self.lock:
if psutil is not None:
self.values["cpu_util_percent"].append(
float(psutil.cpu_percent(interval=None))
)
self.values["ram_util_percent"].append(
float(getattr(psutil.virtual_memory(), "percent"))
)
if GPUtil is not None:
for gpu in GPUtil.getGPUs():
self.values["gpu_util_percent" + str(gpu.id)].append(float(gpu.load))
self.values["vram_util_percent" + str(gpu.id)].append(
float(gpu.memoryUtil)
)
|
https://github.com/ray-project/ray/issues/7349
|
(pid=4628) Exception in thread Thread-2:
(pid=4628) Traceback (most recent call last):
(pid=4628) File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=4628) self.run()
(pid=4628) File "/home/ubuntu/algo/lib/python3.6/site-packages/ray/tune/utils/util.py", line 89, in run
(pid=4628) self._read_utilization()
(pid=4628) File "/home/ubuntu/algo/lib/python3.6/site-packages/ray/tune/utils/util.py", line 65, in _read_utilization
(pid=4628) for gpu in GPUtil.getGPUs():
(pid=4628) File "/home/ubuntu/algo/lib/python3.6/site-packages/GPUtil/GPUtil.py", line 102, in getGPUs
(pid=4628) deviceIds = int(vals[i])
(pid=4628) ValueError: invalid literal for int() with base 10: "NVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running."
|
ValueError
|
def _configure_iam_role(config):
if "IamInstanceProfile" in config["head_node"]:
return config
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
if profile is None:
logger.info(
"Creating new instance profile {}".format(DEFAULT_RAY_INSTANCE_PROFILE)
)
client = _client("iam", config)
client.create_instance_profile(InstanceProfileName=DEFAULT_RAY_INSTANCE_PROFILE)
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
time.sleep(15) # wait for propagation
assert profile is not None, "Failed to create instance profile"
if not profile.roles:
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
if role is None:
logger.info("Creating new role {}".format(DEFAULT_RAY_IAM_ROLE))
iam = _resource("iam", config)
iam.create_role(
RoleName=DEFAULT_RAY_IAM_ROLE,
AssumeRolePolicyDocument=json.dumps(
{
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole",
},
],
}
),
)
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
assert role is not None, "Failed to create role"
role.attach_policy(PolicyArn="arn:aws:iam::aws:policy/AmazonEC2FullAccess")
role.attach_policy(PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess")
profile.add_role(RoleName=role.name)
time.sleep(15) # wait for propagation
logger.info("Role not specified for head node, using {}".format(profile.arn))
config["head_node"]["IamInstanceProfile"] = {"Arn": profile.arn}
return config
|
def _configure_iam_role(config):
if "IamInstanceProfile" in config["head_node"]:
return config
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
if profile is None:
logger.info(
"Creating new instance profile {}".format(DEFAULT_RAY_INSTANCE_PROFILE)
)
client = _client("iam", config)
client.create_instance_profile(InstanceProfileName=DEFAULT_RAY_INSTANCE_PROFILE)
profile = _get_instance_profile(DEFAULT_RAY_INSTANCE_PROFILE, config)
time.sleep(15) # wait for propagation
assert profile is not None, "Failed to create instance profile"
if not profile.roles:
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
if role is None:
logger.info("Creating new role {}".format(DEFAULT_RAY_IAM_ROLE))
iam = _resource("iam", config)
iam.create_role(
RoleName=DEFAULT_RAY_IAM_ROLE,
AssumeRolePolicyDocument=json.dumps(
{
"Statement": [
{
"Effect": "Allow",
"Principal": {"Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole",
},
],
}
),
)
role = _get_role(DEFAULT_RAY_IAM_ROLE, config)
assert role is not None, "Failed to create role"
role.attach_policy(PolicyArn="arn:aws:iam::aws:policy/AmazonEC2FullAccess")
role.attach_policy(PolicyArn="arn:aws:iam::aws:policy/AmazonS3FullAccess")
profile.add_role(RoleName=role.name)
time.sleep(15) # wait for propagation
logger.info("Role not specified for head node, using {}".format(profile.arn))
config["head_node"]["IamInstanceProfile"] = {"Arn": profile.arn}
config["worker_nodes"]["IamInstanceProfile"] = {"Arn": profile.arn}
return config
|
https://github.com/ray-project/ray/issues/3190
|
ubuntu@ip-172-31-22-20:/tmp/ray/session_2018-11-01_22-01-45_16654/logs$ cat monitor.err
StandardAutoscaler: {'cluster_name': 'mastertest', 'min_workers': 200, 'max_workers': 200, 'docker': {'image': '', 'container_name': ''}, 'target_utilization_fraction': 0.8, 'idle_timeout_minutes': 5, 'provider': {'type': 'aws', 'region': 'us-west-2', 'availability_zone': 'us-west-2a,us-west-2b'}, 'auth': {'ssh_user': 'ubuntu', 'ssh_private_key': '~/ray_bootstrap_key.pem'}, 'head_node': {'InstanceType': 'm5.2xlarge', 'ImageId': 'ami-3b6bce43', 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': 50}}], 'IamInstanceProfile': {'Arn': 'arn:aws:iam::339530224232:instance-profile/ray-autoscaler-v1'}, 'KeyName': 'ray-autoscaler_us-west-2', 'SubnetIds': ['subnet-2154c944', 'subnet-4645f631'], 'SecurityGroupIds': ['sg-030b1764c1812e998']}, 'worker_nodes': {'InstanceType': 'm5.large', 'ImageId': 'ami-3b6bce43', 'InstanceMarketOptions': {'MarketType': 'spot'}, 'IamInstanceProfile': {'Arn': 'arn:aws:iam::339530224232:instance-profile/ray-autoscaler-v1'}, 'KeyName': 'ray-autoscaler_us-west-2', 'SubnetIds': ['subnet-2154c944', 'subnet-4645f631'], 'SecurityGroupIds': ['sg-030b1764c1812e998']}, 'file_mounts': {}, 'setup_commands': ['echo \'export PATH="$HOME/anaconda3/envs/tensorflow_p36/bin:$PATH"\' >> ~/.bashrc'], 'head_setup_commands': ['pip install boto3==1.4.8'], 'worker_setup_commands': [], 'head_start_ray_commands': ['ray stop', 'ulimit -n 65536; ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml'], 'worker_start_ray_commands': ['ray stop', 'ulimit -n 65536; ray start --redis-address=$RAY_HEAD_IP:6379 --object-manager-port=8076'], 'no_restart': False}
StandardAutoscaler [2018-11-01 22:01:46.799401]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=-1 Mean=-1 Max=-1
- NumNodesConnected: 0
- NumNodesUsed: 0.0
- ResourceUsage:
- TimeSinceLastHeartbeat: Min=-1 Mean=-1 Max=-1
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:46.830794]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=-1 Mean=-1 Max=-1
- NumNodesConnected: 0
- NumNodesUsed: 0.0
- ResourceUsage:
- TimeSinceLastHeartbeat: Min=-1 Mean=-1 Max=-1
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 251, in run
self._launch_node(config, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 242, in _launch_node
}, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/aws/node_provider.py", line 163, in create_node
self.ec2.create_instances(**conf)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(**params)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 314, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 612, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (UnauthorizedOperation) when calling the RunInstances operation: You are not authorized to perform this operation. Encoded authorization failure message: JUmMphVKvdVLB83WorEP2n5lOkl7LJT5E1K6lCJAxAjpvkJzdk--3vcnRXFh0Yj6Ez-XjrAda9hU472FJ_o2JIzby0EqMD2WD_qg7SqlgmahgDWBSwxOu4uCn_Py-OV1Cwj6XqFy6xJ9QcqsIfttWB9DstSHNQR_8y6SZ0-KYgUzzP51lLcYbTm2CK-D5mghExYd30aoyIV1YQpZ_8JyudvA8JhOFGVNrAIsYK_fT0iqsJOalAeTJAu-TUQNFxzUW6NENFT6xfN3bov6MPB2z0UvnFkMzH9fyerYzUXblO0qzdoEgyfxhcvhnq-7Dd6OJIBlycL5QF2XnR8czqmiSE3aQ09USKgKj1Oaru04EonLCRr64oVKMqpR80jTIGET7TTKDy-qqra-_uS2oQajd0T21V_y__GB7197KlSMi6JPSFHni7H6pZxOp3YTOneNBCydPrHCEmf6OFrbBtD7US-xIo_mW-LWMfHygRINgdAlPTQxBNfCWNpd7Mo9TK_02i0uNQaxR2Eb4sHQgPjWRRyN1gtsEA
StandardAutoscaler [2018-11-01 22:01:51.687415]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=4 Mean=4 Max=4
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:51.703377]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=4 Mean=4 Max=4
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 251, in run
self._launch_node(config, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 242, in _launch_node
}, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/aws/node_provider.py", line 163, in create_node
self.ec2.create_instances(**conf)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(**params)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 314, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 612, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (UnauthorizedOperation) when calling the RunInstances operation: You are not authorized to perform this operation. Encoded authorization failure message: QOnTis6VbTYrt-07yg40sRrg8_SPtfkGI7lGq8R4bvzaISfzl48BMA5e6PeMiilTnDI40xDfLhJCVrCBIvvPoKljhdk-fssOX9yc174zfGLQRWZZGra1F8P3_bAEaAZn-KlWMZKbfh2XPeMw6U83Lbln2wuEWNaTxydbXyO4ADjz9PUsyXDijXopkp7VylGRBNcQy20718--eGZ5kIZlc5BJ40YTFU9JfaiMGLWTAV_hNcUrB5DOAtklZI4de4tYjxt88imIpp-slkiTbtJXcP4--bU1keOqIgbIP5vZMTrECvo95av4Tyq2BsydvK4PVviQSpqnZ9FlFUJi232jrxNYoH7-meIqGRsU9Xl715WOos2Yorgfqp5KyJn_aaxwWzwdPReN_aaIetpmQHJJtUj6c5R5jiJxtl9LxJNicT47JHvmPyJYb4iYNFHdAUMcrJryQCvZ6aNvz9IiG5ZVXyueSabABGskplcTZkpQ9wEib0kGbMyBncGR9kyAkmHrqOcauAjx1qpGawbrz16JXUED1FZJfQ
StandardAutoscaler [2018-11-01 22:01:56.744077]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=9 Mean=9 Max=9
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:56.767962]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=9 Mean=9 Max=9
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:01.814536]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=14 Mean=14 Max=14
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:02:01.841278]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=14 Mean=14 Max=14
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:06.875743]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=19 Mean=19 Max=19
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:06.893544]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=19 Mean=19 Max=19
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:11.929128]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=24 Mean=24 Max=24
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:11.945196]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=25 Mean=25 Max=25
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:16.983206]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=30 Mean=30 Max=30
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:16.997781]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=30 Mean=30 Max=30
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:22.051389]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=35 Mean=35 Max=35
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:22.067704]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=35 Mean=35 Max=35
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:27.109345]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=40 Mean=40 Max=40
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:27.125878]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=40 Mean=40 Max=40
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:32.157747]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=45 Mean=45 Max=45
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:32.172933]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=45 Mean=45 Max=45
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
|
botocore.exceptions.ClientError
|
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"],
)
service_account = _get_service_account(email, config)
if service_account is None:
logger.info(
"Creating new service account {}".format(DEFAULT_SERVICE_ACCOUNT_ID)
)
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config
)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
config["head_node"]["serviceAccounts"] = [
{
"email": service_account["email"],
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
"scopes": ["https://www.googleapis.com/auth/cloud-platform"],
}
]
return config
|
def _configure_iam_role(config):
"""Setup a gcp service account with IAM roles.
Creates a gcp service acconut and binds IAM roles which allow it to control
control storage/compute services. Specifically, the head node needs to have
an IAM role that allows it to create further gce instances and store items
in google cloud storage.
TODO: Allow the name/id of the service account to be configured
"""
email = SERVICE_ACCOUNT_EMAIL_TEMPLATE.format(
account_id=DEFAULT_SERVICE_ACCOUNT_ID,
project_id=config["provider"]["project_id"],
)
service_account = _get_service_account(email, config)
if service_account is None:
logger.info(
"Creating new service account {}".format(DEFAULT_SERVICE_ACCOUNT_ID)
)
service_account = _create_service_account(
DEFAULT_SERVICE_ACCOUNT_ID, DEFAULT_SERVICE_ACCOUNT_CONFIG, config
)
assert service_account is not None, "Failed to create service account"
_add_iam_policy_binding(service_account, DEFAULT_SERVICE_ACCOUNT_ROLES)
# NOTE: The amount of access is determined by the scope + IAM
# role of the service account. Even if the cloud-platform scope
# gives (scope) access to the whole cloud-platform, the service
# account is limited by the IAM rights specified below.
config["head_node"]["serviceAccounts"] = [
{
"email": service_account["email"],
"scopes": ["https://www.googleapis.com/auth/cloud-platform"],
}
]
config["worker_nodes"]["serviceAccounts"] = [
{
"email": service_account["email"],
"scopes": ["https://www.googleapis.com/auth/cloud-platform"],
}
]
return config
|
https://github.com/ray-project/ray/issues/3190
|
ubuntu@ip-172-31-22-20:/tmp/ray/session_2018-11-01_22-01-45_16654/logs$ cat monitor.err
StandardAutoscaler: {'cluster_name': 'mastertest', 'min_workers': 200, 'max_workers': 200, 'docker': {'image': '', 'container_name': ''}, 'target_utilization_fraction': 0.8, 'idle_timeout_minutes': 5, 'provider': {'type': 'aws', 'region': 'us-west-2', 'availability_zone': 'us-west-2a,us-west-2b'}, 'auth': {'ssh_user': 'ubuntu', 'ssh_private_key': '~/ray_bootstrap_key.pem'}, 'head_node': {'InstanceType': 'm5.2xlarge', 'ImageId': 'ami-3b6bce43', 'BlockDeviceMappings': [{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeSize': 50}}], 'IamInstanceProfile': {'Arn': 'arn:aws:iam::339530224232:instance-profile/ray-autoscaler-v1'}, 'KeyName': 'ray-autoscaler_us-west-2', 'SubnetIds': ['subnet-2154c944', 'subnet-4645f631'], 'SecurityGroupIds': ['sg-030b1764c1812e998']}, 'worker_nodes': {'InstanceType': 'm5.large', 'ImageId': 'ami-3b6bce43', 'InstanceMarketOptions': {'MarketType': 'spot'}, 'IamInstanceProfile': {'Arn': 'arn:aws:iam::339530224232:instance-profile/ray-autoscaler-v1'}, 'KeyName': 'ray-autoscaler_us-west-2', 'SubnetIds': ['subnet-2154c944', 'subnet-4645f631'], 'SecurityGroupIds': ['sg-030b1764c1812e998']}, 'file_mounts': {}, 'setup_commands': ['echo \'export PATH="$HOME/anaconda3/envs/tensorflow_p36/bin:$PATH"\' >> ~/.bashrc'], 'head_setup_commands': ['pip install boto3==1.4.8'], 'worker_setup_commands': [], 'head_start_ray_commands': ['ray stop', 'ulimit -n 65536; ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml'], 'worker_start_ray_commands': ['ray stop', 'ulimit -n 65536; ray start --redis-address=$RAY_HEAD_IP:6379 --object-manager-port=8076'], 'no_restart': False}
StandardAutoscaler [2018-11-01 22:01:46.799401]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=-1 Mean=-1 Max=-1
- NumNodesConnected: 0
- NumNodesUsed: 0.0
- ResourceUsage:
- TimeSinceLastHeartbeat: Min=-1 Mean=-1 Max=-1
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:46.830794]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=-1 Mean=-1 Max=-1
- NumNodesConnected: 0
- NumNodesUsed: 0.0
- ResourceUsage:
- TimeSinceLastHeartbeat: Min=-1 Mean=-1 Max=-1
Exception in thread Thread-1:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 251, in run
self._launch_node(config, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 242, in _launch_node
}, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/aws/node_provider.py", line 163, in create_node
self.ec2.create_instances(**conf)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(**params)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 314, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 612, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (UnauthorizedOperation) when calling the RunInstances operation: You are not authorized to perform this operation. Encoded authorization failure message: JUmMphVKvdVLB83WorEP2n5lOkl7LJT5E1K6lCJAxAjpvkJzdk--3vcnRXFh0Yj6Ez-XjrAda9hU472FJ_o2JIzby0EqMD2WD_qg7SqlgmahgDWBSwxOu4uCn_Py-OV1Cwj6XqFy6xJ9QcqsIfttWB9DstSHNQR_8y6SZ0-KYgUzzP51lLcYbTm2CK-D5mghExYd30aoyIV1YQpZ_8JyudvA8JhOFGVNrAIsYK_fT0iqsJOalAeTJAu-TUQNFxzUW6NENFT6xfN3bov6MPB2z0UvnFkMzH9fyerYzUXblO0qzdoEgyfxhcvhnq-7Dd6OJIBlycL5QF2XnR8czqmiSE3aQ09USKgKj1Oaru04EonLCRr64oVKMqpR80jTIGET7TTKDy-qqra-_uS2oQajd0T21V_y__GB7197KlSMi6JPSFHni7H6pZxOp3YTOneNBCydPrHCEmf6OFrbBtD7US-xIo_mW-LWMfHygRINgdAlPTQxBNfCWNpd7Mo9TK_02i0uNQaxR2Eb4sHQgPjWRRyN1gtsEA
StandardAutoscaler [2018-11-01 22:01:51.687415]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=4 Mean=4 Max=4
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:51.703377]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=4 Mean=4 Max=4
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
Exception in thread Thread-2:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 251, in run
self._launch_node(config, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/autoscaler.py", line 242, in _launch_node
}, count)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/ray/autoscaler/aws/node_provider.py", line 163, in create_node
self.ec2.create_instances(**conf)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/factory.py", line 520, in do_action
response = action(self, *args, **kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/boto3/resources/action.py", line 83, in __call__
response = getattr(parent.meta.client, operation_name)(**params)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 314, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/ubuntu/anaconda3/envs/tensorflow_p36/lib/python3.6/site-packages/botocore/client.py", line 612, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (UnauthorizedOperation) when calling the RunInstances operation: You are not authorized to perform this operation. Encoded authorization failure message: QOnTis6VbTYrt-07yg40sRrg8_SPtfkGI7lGq8R4bvzaISfzl48BMA5e6PeMiilTnDI40xDfLhJCVrCBIvvPoKljhdk-fssOX9yc174zfGLQRWZZGra1F8P3_bAEaAZn-KlWMZKbfh2XPeMw6U83Lbln2wuEWNaTxydbXyO4ADjz9PUsyXDijXopkp7VylGRBNcQy20718--eGZ5kIZlc5BJ40YTFU9JfaiMGLWTAV_hNcUrB5DOAtklZI4de4tYjxt88imIpp-slkiTbtJXcP4--bU1keOqIgbIP5vZMTrECvo95av4Tyq2BsydvK4PVviQSpqnZ9FlFUJi232jrxNYoH7-meIqGRsU9Xl715WOos2Yorgfqp5KyJn_aaxwWzwdPReN_aaIetpmQHJJtUj6c5R5jiJxtl9LxJNicT47JHvmPyJYb4iYNFHdAUMcrJryQCvZ6aNvz9IiG5ZVXyueSabABGskplcTZkpQ9wEib0kGbMyBncGR9kyAkmHrqOcauAjx1qpGawbrz16JXUED1FZJfQ
StandardAutoscaler [2018-11-01 22:01:56.744077]: 0/200 target nodes (0 pending)
- NodeIdleSeconds: Min=9 Mean=9 Max=9
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:01:56.767962]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=9 Mean=9 Max=9
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:01.814536]: 0/200 target nodes (5 pending)
- NodeIdleSeconds: Min=14 Mean=14 Max=14
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 5 new nodes
StandardAutoscaler [2018-11-01 22:02:01.841278]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=14 Mean=14 Max=14
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:06.875743]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=19 Mean=19 Max=19
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:06.893544]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=19 Mean=19 Max=19
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:11.929128]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=24 Mean=24 Max=24
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:11.945196]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=25 Mean=25 Max=25
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:16.983206]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=30 Mean=30 Max=30
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:16.997781]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=30 Mean=30 Max=30
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:22.051389]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=35 Mean=35 Max=35
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:22.067704]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=35 Mean=35 Max=35
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:27.109345]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=40 Mean=40 Max=40
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:27.125878]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=40 Mean=40 Max=40
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler [2018-11-01 22:02:32.157747]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=45 Mean=45 Max=45
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
StandardAutoscaler: Launching 0 new nodes
StandardAutoscaler [2018-11-01 22:02:32.172933]: 0/200 target nodes (10 pending)
- NodeIdleSeconds: Min=45 Mean=45 Max=45
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/8.0 b'CPU', 0.0/0.0 b'GPU'
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
|
botocore.exceptions.ClientError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.