text stringlengths 81 112k |
|---|
Initialize collector worker thread, Log path will be checked first.
Records in DB backend will be cleared.
def _initialize(self):
"""Initialize collector worker thread, Log path will be checked first.
Records in DB backend will be cleared.
"""
if not os.path.exists(self._logdir):
raise CollectorError("Log directory %s not exists" % self._logdir)
self.logger.info("Collector started, taking %s as parent directory"
"for all job logs." % self._logdir)
# clear old records
JobRecord.objects.filter().delete()
TrialRecord.objects.filter().delete()
ResultRecord.objects.filter().delete() |
Load information of the job with the given job name.
1. Traverse each experiment sub-directory and sync information
for each trial.
2. Create or update the job information, together with the job
meta file.
Args:
job_name (str) name of the Tune experiment
def sync_job_info(self, job_name):
"""Load information of the job with the given job name.
1. Traverse each experiment sub-directory and sync information
for each trial.
2. Create or update the job information, together with the job
meta file.
Args:
job_name (str) name of the Tune experiment
"""
job_path = os.path.join(self._logdir, job_name)
if job_name not in self._monitored_jobs:
self._create_job_info(job_path)
self._monitored_jobs.add(job_name)
else:
self._update_job_info(job_path)
expr_dirs = filter(lambda d: os.path.isdir(os.path.join(job_path, d)),
os.listdir(job_path))
for expr_dir_name in expr_dirs:
self.sync_trial_info(job_path, expr_dir_name)
self._update_job_info(job_path) |
Load information of the trial from the given experiment directory.
Create or update the trial information, together with the trial
meta file.
Args:
job_path(str)
expr_dir_name(str)
def sync_trial_info(self, job_path, expr_dir_name):
"""Load information of the trial from the given experiment directory.
Create or update the trial information, together with the trial
meta file.
Args:
job_path(str)
expr_dir_name(str)
"""
expr_name = expr_dir_name[-8:]
expr_path = os.path.join(job_path, expr_dir_name)
if expr_name not in self._monitored_trials:
self._create_trial_info(expr_path)
self._monitored_trials.add(expr_name)
else:
self._update_trial_info(expr_path) |
Create information for given job.
Meta file will be loaded if exists, and the job information will
be saved in db backend.
Args:
job_dir (str): Directory path of the job.
def _create_job_info(self, job_dir):
"""Create information for given job.
Meta file will be loaded if exists, and the job information will
be saved in db backend.
Args:
job_dir (str): Directory path of the job.
"""
meta = self._build_job_meta(job_dir)
self.logger.debug("Create job: %s" % meta)
job_record = JobRecord.from_json(meta)
job_record.save() |
Update information for given job.
Meta file will be loaded if exists, and the job information in
in db backend will be updated.
Args:
job_dir (str): Directory path of the job.
Return:
Updated dict of job meta info
def _update_job_info(cls, job_dir):
"""Update information for given job.
Meta file will be loaded if exists, and the job information in
in db backend will be updated.
Args:
job_dir (str): Directory path of the job.
Return:
Updated dict of job meta info
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if meta:
logging.debug("Update job info for %s" % meta["job_id"])
JobRecord.objects \
.filter(job_id=meta["job_id"]) \
.update(end_time=timestamp2date(meta["end_time"])) |
Create information for given trial.
Meta file will be loaded if exists, and the trial information
will be saved in db backend.
Args:
expr_dir (str): Directory path of the experiment.
def _create_trial_info(self, expr_dir):
"""Create information for given trial.
Meta file will be loaded if exists, and the trial information
will be saved in db backend.
Args:
expr_dir (str): Directory path of the experiment.
"""
meta = self._build_trial_meta(expr_dir)
self.logger.debug("Create trial for %s" % meta)
trial_record = TrialRecord.from_json(meta)
trial_record.save() |
Update information for given trial.
Meta file will be loaded if exists, and the trial information
in db backend will be updated.
Args:
expr_dir(str)
def _update_trial_info(self, expr_dir):
"""Update information for given trial.
Meta file will be loaded if exists, and the trial information
in db backend will be updated.
Args:
expr_dir(str)
"""
trial_id = expr_dir[-8:]
meta_file = os.path.join(expr_dir, EXPR_META_FILE)
meta = parse_json(meta_file)
result_file = os.path.join(expr_dir, EXPR_RESULT_FILE)
offset = self._result_offsets.get(trial_id, 0)
results, new_offset = parse_multiple_json(result_file, offset)
self._add_results(results, trial_id)
self._result_offsets[trial_id] = new_offset
if meta:
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(trial_status=meta["status"],
end_time=timestamp2date(meta.get("end_time", None)))
elif len(results) > 0:
metrics = {
"episode_reward": results[-1].get("episode_reward_mean", None),
"accuracy": results[-1].get("mean_accuracy", None),
"loss": results[-1].get("loss", None)
}
if results[-1].get("done"):
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(trial_status="TERMINATED",
end_time=results[-1].get("date", None),
metrics=str(metrics))
else:
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(metrics=str(metrics)) |
Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
def _build_job_meta(cls, job_dir):
"""Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_name = job_dir.split("/")[-1]
user = os.environ.get("USER", None)
meta = {
"job_id": job_name,
"job_name": job_name,
"user": user,
"type": "ray",
"start_time": os.path.getctime(job_dir),
"end_time": None,
"best_trial_id": None,
}
if meta.get("start_time", None):
meta["start_time"] = timestamp2date(meta["start_time"])
return meta |
Build meta file for trial.
Args:
expr_dir (str): Directory path of the experiment.
Return:
A dict of trial meta info.
def _build_trial_meta(cls, expr_dir):
"""Build meta file for trial.
Args:
expr_dir (str): Directory path of the experiment.
Return:
A dict of trial meta info.
"""
meta_file = os.path.join(expr_dir, EXPR_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_id = expr_dir.split("/")[-2]
trial_id = expr_dir[-8:]
params = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))
meta = {
"trial_id": trial_id,
"job_id": job_id,
"status": "RUNNING",
"type": "TUNE",
"start_time": os.path.getctime(expr_dir),
"end_time": None,
"progress_offset": 0,
"result_offset": 0,
"params": params
}
if not meta.get("start_time", None):
meta["start_time"] = os.path.getctime(expr_dir)
if isinstance(meta["start_time"], float):
meta["start_time"] = timestamp2date(meta["start_time"])
if meta.get("end_time", None):
meta["end_time"] = timestamp2date(meta["end_time"])
meta["params"] = parse_json(os.path.join(expr_dir, EXPR_PARARM_FILE))
return meta |
Add a list of results into db.
Args:
results (list): A list of json results.
trial_id (str): Id of the trial.
def _add_results(self, results, trial_id):
"""Add a list of results into db.
Args:
results (list): A list of json results.
trial_id (str): Id of the trial.
"""
for result in results:
self.logger.debug("Appending result: %s" % result)
result["trial_id"] = trial_id
result_record = ResultRecord.from_json(result)
result_record.save() |
Adds a time dimension to padded inputs.
Arguments:
padded_inputs (Tensor): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
seq_lens (Tensor): the sequence lengths within the input batch,
suitable for passing to tf.nn.dynamic_rnn().
Returns:
Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].
def add_time_dimension(padded_inputs, seq_lens):
"""Adds a time dimension to padded inputs.
Arguments:
padded_inputs (Tensor): a padded batch of sequences. That is,
for seq_lens=[1, 2, 2], then inputs=[A, *, B, B, C, C], where
A, B, C are sequence elements and * denotes padding.
seq_lens (Tensor): the sequence lengths within the input batch,
suitable for passing to tf.nn.dynamic_rnn().
Returns:
Reshaped tensor of shape [NUM_SEQUENCES, MAX_SEQ_LEN, ...].
"""
# Sequence lengths have to be specified for LSTM batch inputs. The
# input batch must be padded to the max seq length given here. That is,
# batch_size == len(seq_lens) * max(seq_lens)
padded_batch_size = tf.shape(padded_inputs)[0]
max_seq_len = padded_batch_size // tf.shape(seq_lens)[0]
# Dynamically reshape the padded batch to introduce a time dimension.
new_batch_size = padded_batch_size // max_seq_len
new_shape = ([new_batch_size, max_seq_len] +
padded_inputs.get_shape().as_list()[1:])
return tf.reshape(padded_inputs, new_shape) |
Truncate and pad experiences into fixed-length sequences.
Arguments:
episode_ids (list): List of episode ids for each step.
unroll_ids (list): List of identifiers for the sample batch. This is
used to make sure sequences are cut between sample batches.
agent_indices (list): List of agent ids for each step. Note that this
has to be combined with episode_ids for uniqueness.
feature_columns (list): List of arrays containing features.
state_columns (list): List of arrays containing LSTM state values.
max_seq_len (int): Max length of sequences before truncation.
dynamic_max (bool): Whether to dynamically shrink the max seq len.
For example, if max len is 20 and the actual max seq len in the
data is 7, it will be shrunk to 7.
_extra_padding (int): Add extra padding to the end of sequences.
Returns:
f_pad (list): Padded feature columns. These will be of shape
[NUM_SEQUENCES * MAX_SEQ_LEN, ...].
s_init (list): Initial states for each sequence, of shape
[NUM_SEQUENCES, ...].
seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].
Examples:
>>> f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=[1, 1, 5, 5, 5, 5],
unroll_ids=[4, 4, 4, 4, 4, 4],
agent_indices=[0, 0, 0, 0, 0, 0],
feature_columns=[[4, 4, 8, 8, 8, 8],
[1, 1, 0, 1, 1, 0]],
state_columns=[[4, 5, 4, 5, 5, 5]],
max_seq_len=3)
>>> print(f_pad)
[[4, 4, 0, 8, 8, 8, 8, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0]]
>>> print(s_init)
[[4, 4, 5]]
>>> print(seq_lens)
[2, 3, 1]
def chop_into_sequences(episode_ids,
unroll_ids,
agent_indices,
feature_columns,
state_columns,
max_seq_len,
dynamic_max=True,
_extra_padding=0):
"""Truncate and pad experiences into fixed-length sequences.
Arguments:
episode_ids (list): List of episode ids for each step.
unroll_ids (list): List of identifiers for the sample batch. This is
used to make sure sequences are cut between sample batches.
agent_indices (list): List of agent ids for each step. Note that this
has to be combined with episode_ids for uniqueness.
feature_columns (list): List of arrays containing features.
state_columns (list): List of arrays containing LSTM state values.
max_seq_len (int): Max length of sequences before truncation.
dynamic_max (bool): Whether to dynamically shrink the max seq len.
For example, if max len is 20 and the actual max seq len in the
data is 7, it will be shrunk to 7.
_extra_padding (int): Add extra padding to the end of sequences.
Returns:
f_pad (list): Padded feature columns. These will be of shape
[NUM_SEQUENCES * MAX_SEQ_LEN, ...].
s_init (list): Initial states for each sequence, of shape
[NUM_SEQUENCES, ...].
seq_lens (list): List of sequence lengths, of shape [NUM_SEQUENCES].
Examples:
>>> f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=[1, 1, 5, 5, 5, 5],
unroll_ids=[4, 4, 4, 4, 4, 4],
agent_indices=[0, 0, 0, 0, 0, 0],
feature_columns=[[4, 4, 8, 8, 8, 8],
[1, 1, 0, 1, 1, 0]],
state_columns=[[4, 5, 4, 5, 5, 5]],
max_seq_len=3)
>>> print(f_pad)
[[4, 4, 0, 8, 8, 8, 8, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 0]]
>>> print(s_init)
[[4, 4, 5]]
>>> print(seq_lens)
[2, 3, 1]
"""
prev_id = None
seq_lens = []
seq_len = 0
unique_ids = np.add(
np.add(episode_ids, agent_indices),
np.array(unroll_ids) << 32)
for uid in unique_ids:
if (prev_id is not None and uid != prev_id) or \
seq_len >= max_seq_len:
seq_lens.append(seq_len)
seq_len = 0
seq_len += 1
prev_id = uid
if seq_len:
seq_lens.append(seq_len)
assert sum(seq_lens) == len(unique_ids)
# Dynamically shrink max len as needed to optimize memory usage
if dynamic_max:
max_seq_len = max(seq_lens) + _extra_padding
feature_sequences = []
for f in feature_columns:
f = np.array(f)
f_pad = np.zeros((len(seq_lens) * max_seq_len, ) + np.shape(f)[1:])
seq_base = 0
i = 0
for l in seq_lens:
for seq_offset in range(l):
f_pad[seq_base + seq_offset] = f[i]
i += 1
seq_base += max_seq_len
assert i == len(unique_ids), f
feature_sequences.append(f_pad)
initial_states = []
for s in state_columns:
s = np.array(s)
s_init = []
i = 0
for l in seq_lens:
s_init.append(s[i])
i += l
initial_states.append(np.array(s_init))
return feature_sequences, initial_states, np.array(seq_lens) |
Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
def explore(config, mutations, resample_probability, custom_explore_fn):
"""Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
"""
new_config = copy.deepcopy(config)
for key, distribution in mutations.items():
if isinstance(distribution, dict):
new_config.update({
key: explore(config[key], mutations[key], resample_probability,
None)
})
elif isinstance(distribution, list):
if random.random() < resample_probability or \
config[key] not in distribution:
new_config[key] = random.choice(distribution)
elif random.random() > 0.5:
new_config[key] = distribution[max(
0,
distribution.index(config[key]) - 1)]
else:
new_config[key] = distribution[min(
len(distribution) - 1,
distribution.index(config[key]) + 1)]
else:
if random.random() < resample_probability:
new_config[key] = distribution()
elif random.random() > 0.5:
new_config[key] = config[key] * 1.2
else:
new_config[key] = config[key] * 0.8
if type(config[key]) is int:
new_config[key] = int(new_config[key])
if custom_explore_fn:
new_config = custom_explore_fn(new_config)
assert new_config is not None, \
"Custom explore fn failed to return new config"
logger.info("[explore] perturbed config from {} -> {}".format(
config, new_config))
return new_config |
Appends perturbed params to the trial name to show in the console.
def make_experiment_tag(orig_tag, config, mutations):
"""Appends perturbed params to the trial name to show in the console."""
resolved_vars = {}
for k in mutations.keys():
resolved_vars[("config", k)] = config[k]
return "{}@perturbed[{}]".format(orig_tag, format_vars(resolved_vars)) |
Logs transition during exploit/exploit step.
For each step, logs: [target trial tag, clone trial tag, target trial
iteration, clone trial iteration, old config, new config].
def _log_config_on_step(self, trial_state, new_state, trial,
trial_to_clone, new_config):
"""Logs transition during exploit/exploit step.
For each step, logs: [target trial tag, clone trial tag, target trial
iteration, clone trial iteration, old config, new config].
"""
trial_name, trial_to_clone_name = (trial_state.orig_tag,
new_state.orig_tag)
trial_id = "".join(itertools.takewhile(str.isdigit, trial_name))
trial_to_clone_id = "".join(
itertools.takewhile(str.isdigit, trial_to_clone_name))
trial_path = os.path.join(trial.local_dir,
"pbt_policy_" + trial_id + ".txt")
trial_to_clone_path = os.path.join(
trial_to_clone.local_dir,
"pbt_policy_" + trial_to_clone_id + ".txt")
policy = [
trial_name, trial_to_clone_name,
trial.last_result[TRAINING_ITERATION],
trial_to_clone.last_result[TRAINING_ITERATION],
trial_to_clone.config, new_config
]
# Log to global file.
with open(os.path.join(trial.local_dir, "pbt_global.txt"), "a+") as f:
f.write(json.dumps(policy) + "\n")
# Overwrite state in target trial from trial_to_clone.
if os.path.exists(trial_to_clone_path):
shutil.copyfile(trial_to_clone_path, trial_path)
# Log new exploit in target trial log.
with open(trial_path, "a+") as f:
f.write(json.dumps(policy) + "\n") |
Transfers perturbed state from trial_to_clone -> trial.
If specified, also logs the updated hyperparam state.
def _exploit(self, trial_executor, trial, trial_to_clone):
"""Transfers perturbed state from trial_to_clone -> trial.
If specified, also logs the updated hyperparam state."""
trial_state = self._trial_state[trial]
new_state = self._trial_state[trial_to_clone]
if not new_state.last_checkpoint:
logger.info("[pbt]: no checkpoint for trial."
" Skip exploit for Trial {}".format(trial))
return
new_config = explore(trial_to_clone.config, self._hyperparam_mutations,
self._resample_probability,
self._custom_explore_fn)
logger.info("[exploit] transferring weights from trial "
"{} (score {}) -> {} (score {})".format(
trial_to_clone, new_state.last_score, trial,
trial_state.last_score))
if self._log_config:
self._log_config_on_step(trial_state, new_state, trial,
trial_to_clone, new_config)
new_tag = make_experiment_tag(trial_state.orig_tag, new_config,
self._hyperparam_mutations)
reset_successful = trial_executor.reset_trial(trial, new_config,
new_tag)
if reset_successful:
trial_executor.restore(
trial, Checkpoint.from_object(new_state.last_checkpoint))
else:
trial_executor.stop_trial(trial, stop_logger=False)
trial.config = new_config
trial.experiment_tag = new_tag
trial_executor.start_trial(
trial, Checkpoint.from_object(new_state.last_checkpoint))
self._num_perturbations += 1
# Transfer over the last perturbation time as well
trial_state.last_perturbation_time = new_state.last_perturbation_time |
Returns trials in the lower and upper `quantile` of the population.
If there is not enough data to compute this, returns empty lists.
def _quantiles(self):
"""Returns trials in the lower and upper `quantile` of the population.
If there is not enough data to compute this, returns empty lists."""
trials = []
for trial, state in self._trial_state.items():
if state.last_score is not None and not trial.is_finished():
trials.append(trial)
trials.sort(key=lambda t: self._trial_state[t].last_score)
if len(trials) <= 1:
return [], []
else:
return (trials[:int(math.ceil(len(trials) * PBT_QUANTILE))],
trials[int(math.floor(-len(trials) * PBT_QUANTILE)):]) |
Ensures all trials get fair share of time (as defined by time_attr).
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
def choose_trial_to_run(self, trial_runner):
"""Ensures all trials get fair share of time (as defined by time_attr).
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
"""
candidates = []
for trial in trial_runner.get_trials():
if trial.status in [Trial.PENDING, Trial.PAUSED] and \
trial_runner.has_resources(trial.resources):
candidates.append(trial)
candidates.sort(
key=lambda trial: self._trial_state[trial].last_perturbation_time)
return candidates[0] if candidates else None |
Returns the ith default (aws_key_pair_name, key_pair_path).
def key_pair(i, region):
"""Returns the ith default (aws_key_pair_name, key_pair_path)."""
if i == 0:
return ("{}_{}".format(RAY, region),
os.path.expanduser("~/.ssh/{}_{}.pem".format(RAY, region)))
return ("{}_{}_{}".format(RAY, i, region),
os.path.expanduser("~/.ssh/{}_{}_{}.pem".format(RAY, i, region))) |
Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2().
def _build_layers(self, inputs, num_outputs, options):
"""Process the flattened inputs.
Note that dict inputs will be flattened into a vector. To define a
model that processes the components separately, use _build_layers_v2().
"""
hiddens = options.get("fcnet_hiddens")
activation = get_activation_fn(options.get("fcnet_activation"))
with tf.name_scope("fc_net"):
i = 1
last_layer = inputs
for size in hiddens:
label = "fc{}".format(i)
last_layer = slim.fully_connected(
last_layer,
size,
weights_initializer=normc_initializer(1.0),
activation_fn=activation,
scope=label)
i += 1
label = "fc_out"
output = slim.fully_connected(
last_layer,
num_outputs,
weights_initializer=normc_initializer(0.01),
activation_fn=None,
scope=label)
return output, last_layer |
Returns the given config dict merged with a base agent conf.
def with_base_config(base_config, extra_config):
"""Returns the given config dict merged with a base agent conf."""
config = copy.deepcopy(base_config)
config.update(extra_config)
return config |
Returns the class of a known agent given its name.
def get_agent_class(alg):
"""Returns the class of a known agent given its name."""
try:
return _get_agent_class(alg)
except ImportError:
from ray.rllib.agents.mock import _agent_import_failed
return _agent_import_failed(traceback.format_exc()) |
Return the first IP address for an ethernet interface on the system.
def determine_ip_address():
"""Return the first IP address for an ethernet interface on the system."""
addrs = [
x.address for k, v in psutil.net_if_addrs().items() if k[0] == "e"
for x in v if x.family == AddressFamily.AF_INET
]
return addrs[0] |
Get any changes to the log files and push updates to Redis.
def perform_iteration(self):
"""Get any changes to the log files and push updates to Redis."""
stats = self.get_all_stats()
self.redis_client.publish(
self.redis_key,
jsonify_asdict(stats),
) |
Run the reporter.
def run(self):
"""Run the reporter."""
while True:
try:
self.perform_iteration()
except Exception:
traceback.print_exc()
pass
time.sleep(ray_constants.REPORTER_UPDATE_INTERVAL_MS / 1000) |
Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently.
def check_serializable(cls):
"""Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently.
"""
if is_named_tuple(cls):
# This case works.
return
if not hasattr(cls, "__new__"):
print("The class {} does not have a '__new__' attribute and is "
"probably an old-stye class. Please make it a new-style class "
"by inheriting from 'object'.")
raise RayNotDictionarySerializable("The class {} does not have a "
"'__new__' attribute and is "
"probably an old-style class. We "
"do not support this. Please make "
"it a new-style class by "
"inheriting from 'object'."
.format(cls))
try:
obj = cls.__new__(cls)
except Exception:
raise RayNotDictionarySerializable("The class {} has overridden "
"'__new__', so Ray may not be able "
"to serialize it efficiently."
.format(cls))
if not hasattr(obj, "__dict__"):
raise RayNotDictionarySerializable("Objects of the class {} do not "
"have a '__dict__' attribute, so "
"Ray cannot serialize it "
"efficiently.".format(cls))
if hasattr(obj, "__slots__"):
raise RayNotDictionarySerializable("The class {} uses '__slots__', so "
"Ray may not be able to serialize "
"it efficiently.".format(cls)) |
Return True if cls is a namedtuple and False otherwise.
def is_named_tuple(cls):
"""Return True if cls is a namedtuple and False otherwise."""
b = cls.__bases__
if len(b) != 1 or b[0] != tuple:
return False
f = getattr(cls, "_fields", None)
if not isinstance(f, tuple):
return False
return all(type(n) == str for n in f) |
Register a trainable function or class.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable class. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
def register_trainable(name, trainable):
"""Register a trainable function or class.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable class. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
from ray.tune.trainable import Trainable
from ray.tune.function_runner import wrap_function
if isinstance(trainable, type):
logger.debug("Detected class for trainable.")
elif isinstance(trainable, FunctionType):
logger.debug("Detected function for trainable.")
trainable = wrap_function(trainable)
elif callable(trainable):
logger.warning(
"Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
_global_registry.register(TRAINABLE_CLASS, name, trainable) |
Register a custom environment for use with RLlib.
Args:
name (str): Name to register.
env_creator (obj): Function that creates an env.
def register_env(name, env_creator):
"""Register a custom environment for use with RLlib.
Args:
name (str): Name to register.
env_creator (obj): Function that creates an env.
"""
if not isinstance(env_creator, FunctionType):
raise TypeError("Second argument must be a function.", env_creator)
_global_registry.register(ENV_CREATOR, name, env_creator) |
Return optimization stats reported from the policy graph.
Example:
>>> grad_info = evaluator.learn_on_batch(samples)
>>> print(get_stats(grad_info))
{"vf_loss": ..., "policy_loss": ...}
def get_learner_stats(grad_info):
"""Return optimization stats reported from the policy graph.
Example:
>>> grad_info = evaluator.learn_on_batch(samples)
>>> print(get_stats(grad_info))
{"vf_loss": ..., "policy_loss": ...}
"""
if LEARNER_STATS_KEY in grad_info:
return grad_info[LEARNER_STATS_KEY]
multiagent_stats = {}
for k, v in grad_info.items():
if type(v) is dict:
if LEARNER_STATS_KEY in v:
multiagent_stats[k] = v[LEARNER_STATS_KEY]
return multiagent_stats |
Gathers episode metrics from PolicyEvaluator instances.
def collect_metrics(local_evaluator=None,
remote_evaluators=[],
timeout_seconds=180):
"""Gathers episode metrics from PolicyEvaluator instances."""
episodes, num_dropped = collect_episodes(
local_evaluator, remote_evaluators, timeout_seconds=timeout_seconds)
metrics = summarize_episodes(episodes, episodes, num_dropped)
return metrics |
Gathers new episodes metrics tuples from the given evaluators.
def collect_episodes(local_evaluator=None,
remote_evaluators=[],
timeout_seconds=180):
"""Gathers new episodes metrics tuples from the given evaluators."""
pending = [
a.apply.remote(lambda ev: ev.get_metrics()) for a in remote_evaluators
]
collected, _ = ray.wait(
pending, num_returns=len(pending), timeout=timeout_seconds * 1.0)
num_metric_batches_dropped = len(pending) - len(collected)
if pending and len(collected) == 0:
raise ValueError(
"Timed out waiting for metrics from workers. You can configure "
"this timeout with `collect_metrics_timeout`.")
metric_lists = ray_get_and_free(collected)
if local_evaluator:
metric_lists.append(local_evaluator.get_metrics())
episodes = []
for metrics in metric_lists:
episodes.extend(metrics)
return episodes, num_metric_batches_dropped |
Summarizes a set of episode metrics tuples.
Arguments:
episodes: smoothed set of episodes including historical ones
new_episodes: just the new episodes in this iteration
num_dropped: number of workers haven't returned their metrics
def summarize_episodes(episodes, new_episodes, num_dropped):
"""Summarizes a set of episode metrics tuples.
Arguments:
episodes: smoothed set of episodes including historical ones
new_episodes: just the new episodes in this iteration
num_dropped: number of workers haven't returned their metrics
"""
if num_dropped > 0:
logger.warning("WARNING: {} workers have NOT returned metrics".format(
num_dropped))
episodes, estimates = _partition(episodes)
new_episodes, _ = _partition(new_episodes)
episode_rewards = []
episode_lengths = []
policy_rewards = collections.defaultdict(list)
custom_metrics = collections.defaultdict(list)
perf_stats = collections.defaultdict(list)
for episode in episodes:
episode_lengths.append(episode.episode_length)
episode_rewards.append(episode.episode_reward)
for k, v in episode.custom_metrics.items():
custom_metrics[k].append(v)
for k, v in episode.perf_stats.items():
perf_stats[k].append(v)
for (_, policy_id), reward in episode.agent_rewards.items():
if policy_id != DEFAULT_POLICY_ID:
policy_rewards[policy_id].append(reward)
if episode_rewards:
min_reward = min(episode_rewards)
max_reward = max(episode_rewards)
else:
min_reward = float("nan")
max_reward = float("nan")
avg_reward = np.mean(episode_rewards)
avg_length = np.mean(episode_lengths)
for policy_id, rewards in policy_rewards.copy().items():
policy_rewards[policy_id] = np.mean(rewards)
for k, v_list in custom_metrics.copy().items():
custom_metrics[k + "_mean"] = np.mean(v_list)
filt = [v for v in v_list if not np.isnan(v)]
if filt:
custom_metrics[k + "_min"] = np.min(filt)
custom_metrics[k + "_max"] = np.max(filt)
else:
custom_metrics[k + "_min"] = float("nan")
custom_metrics[k + "_max"] = float("nan")
del custom_metrics[k]
for k, v_list in perf_stats.copy().items():
perf_stats[k] = np.mean(v_list)
estimators = collections.defaultdict(lambda: collections.defaultdict(list))
for e in estimates:
acc = estimators[e.estimator_name]
for k, v in e.metrics.items():
acc[k].append(v)
for name, metrics in estimators.items():
for k, v_list in metrics.items():
metrics[k] = np.mean(v_list)
estimators[name] = dict(metrics)
return dict(
episode_reward_max=max_reward,
episode_reward_min=min_reward,
episode_reward_mean=avg_reward,
episode_len_mean=avg_length,
episodes_this_iter=len(new_episodes),
policy_reward_mean=dict(policy_rewards),
custom_metrics=dict(custom_metrics),
sampler_perf=dict(perf_stats),
off_policy_estimator=dict(estimators),
num_metric_batches_dropped=num_dropped) |
Divides metrics data into true rollouts vs off-policy estimates.
def _partition(episodes):
"""Divides metrics data into true rollouts vs off-policy estimates."""
from ray.rllib.evaluation.sampler import RolloutMetrics
rollouts, estimates = [], []
for e in episodes:
if isinstance(e, RolloutMetrics):
rollouts.append(e)
elif isinstance(e, OffPolicyEstimate):
estimates.append(e)
else:
raise ValueError("Unknown metric type: {}".format(e))
return rollouts, estimates |
Sets status and checkpoints metadata if needed.
Only checkpoints metadata if trial status is a terminal condition.
PENDING, PAUSED, and RUNNING switches have checkpoints taken care of
in the TrialRunner.
Args:
trial (Trial): Trial to checkpoint.
status (Trial.status): Status to set trial to.
def set_status(self, trial, status):
"""Sets status and checkpoints metadata if needed.
Only checkpoints metadata if trial status is a terminal condition.
PENDING, PAUSED, and RUNNING switches have checkpoints taken care of
in the TrialRunner.
Args:
trial (Trial): Trial to checkpoint.
status (Trial.status): Status to set trial to.
"""
trial.status = status
if status in [Trial.TERMINATED, Trial.ERROR]:
self.try_checkpoint_metadata(trial) |
Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
def try_checkpoint_metadata(self, trial):
"""Checkpoints metadata.
Args:
trial (Trial): Trial to checkpoint.
"""
if trial._checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Not saving data for trial w/ memory checkpoint.")
return
try:
logger.debug("Saving trial metadata.")
self._cached_trial_state[trial.trial_id] = trial.__getstate__()
except Exception:
logger.exception("Error checkpointing trial metadata.") |
Pauses the trial.
We want to release resources (specifically GPUs) when pausing an
experiment. This results in PAUSED state that similar to TERMINATED.
def pause_trial(self, trial):
"""Pauses the trial.
We want to release resources (specifically GPUs) when pausing an
experiment. This results in PAUSED state that similar to TERMINATED.
"""
assert trial.status == Trial.RUNNING, trial.status
try:
self.save(trial, Checkpoint.MEMORY)
self.stop_trial(trial, stop_logger=False)
self.set_status(trial, Trial.PAUSED)
except Exception:
logger.exception("Error pausing runner.")
self.set_status(trial, Trial.ERROR) |
Sets PAUSED trial to pending to allow scheduler to start.
def unpause_trial(self, trial):
"""Sets PAUSED trial to pending to allow scheduler to start."""
assert trial.status == Trial.PAUSED, trial.status
self.set_status(trial, Trial.PENDING) |
Resumes PAUSED trials. This is a blocking call.
def resume_trial(self, trial):
"""Resumes PAUSED trials. This is a blocking call."""
assert trial.status == Trial.PAUSED, trial.status
self.start_trial(trial) |
Passes the result to Nevergrad unless early terminated or errored.
The result is internally negated when interacting with Nevergrad
so that Nevergrad Optimizers can "maximize" this value,
as it minimizes on default.
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to Nevergrad unless early terminated or errored.
The result is internally negated when interacting with Nevergrad
so that Nevergrad Optimizers can "maximize" this value,
as it minimizes on default.
"""
ng_trial_info = self._live_trial_mapping.pop(trial_id)
if result:
self._nevergrad_opt.tell(ng_trial_info, -result[self._reward_attr]) |
Start the import thread.
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start() |
Process the given export key from redis.
def _process_key(self, key):
"""Process the given export key from redis."""
# Handle the driver case first.
if self.mode != ray.WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
# Return because FunctionsToRun are the only things that
# the driver should import.
return
if key.startswith(b"RemoteFunction"):
with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
raise Exception("This code should be unreachable.") |
Run on arbitrary function on the worker.
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(driver_id, serialized_function,
run_on_other_drivers) = self.redis_client.hmget(
key, ["driver_id", "function", "run_on_other_drivers"])
if (utils.decode(run_on_other_drivers) == "False"
and self.worker.mode == ray.SCRIPT_MODE
and driver_id != self.worker.task_driver_id.binary()):
return
try:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
driver_id=ray.DriverID(driver_id)) |
Called to clip actions to the specified range of this policy.
Arguments:
action: Single action.
space: Action space the actions should be present in.
Returns:
Clipped batch of actions.
def clip_action(action, space):
"""Called to clip actions to the specified range of this policy.
Arguments:
action: Single action.
space: Action space the actions should be present in.
Returns:
Clipped batch of actions.
"""
if isinstance(space, gym.spaces.Box):
return np.clip(action, space.low, space.high)
elif isinstance(space, gym.spaces.Tuple):
if type(action) not in (tuple, list):
raise ValueError("Expected tuple space for actions {}: {}".format(
action, space))
out = []
for a, s in zip(action, space.spaces):
out.append(clip_action(a, s))
return out
else:
return action |
Passes the result to skopt unless early terminated or errored.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to skopt unless early terminated or errored.
The result is internally negated when interacting with Skopt
so that Skopt Optimizers can "maximize" this value,
as it minimizes on default.
"""
skopt_trial_info = self._live_trial_mapping.pop(trial_id)
if result:
self._skopt_opt.tell(skopt_trial_info, -result[self._reward_attr]) |
Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
def address_to_ip(address):
"""Convert a hostname to a numerical IP addresses in an address.
This should be a no-op if address already contains an actual numerical IP
address.
Args:
address: This can be either a string containing a hostname (or an IP
address) and a port or it can be just an IP address.
Returns:
The same address but with the hostname replaced by a numerical IP
address.
"""
address_parts = address.split(":")
ip_address = socket.gethostbyname(address_parts[0])
# Make sure localhost isn't resolved to the loopback ip
if ip_address == "127.0.0.1":
ip_address = get_node_ip_address()
return ":".join([ip_address] + address_parts[1:]) |
Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
def get_node_ip_address(address="8.8.8.8:53"):
"""Determine the IP address of the local node.
Args:
address (str): The IP address and port of any known live service on the
network you care about.
Returns:
The IP address of the current node.
"""
ip_address, port = address.split(":")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# This command will raise an exception if there is no internet
# connection.
s.connect((ip_address, int(port)))
node_ip_address = s.getsockname()[0]
except Exception as e:
node_ip_address = "127.0.0.1"
# [Errno 101] Network is unreachable
if e.errno == 101:
try:
# try get node ip address from host name
host_name = socket.getfqdn(socket.gethostname())
node_ip_address = socket.gethostbyname(host_name)
except Exception:
pass
finally:
s.close()
return node_ip_address |
Create a Redis client.
Args:
The IP address, port, and password of the Redis server.
Returns:
A Redis client.
def create_redis_client(redis_address, password=None):
"""Create a Redis client.
Args:
The IP address, port, and password of the Redis server.
Returns:
A Redis client.
"""
redis_ip_address, redis_port = redis_address.split(":")
# For this command to work, some other client (on the same machine
# as Redis) must have run "CONFIG SET protected-mode no".
return redis.StrictRedis(
host=redis_ip_address, port=int(redis_port), password=password) |
Start one of the Ray processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command (List[str]): The command to use to start the Ray process.
process_type (str): The type of the process that is being started
(e.g., "raylet").
env_updates (dict): A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd (str): The directory to run the process in.
use_valgrind (bool): True if we should start the process in valgrind.
use_gdb (bool): True if we should start the process in gdb.
use_valgrind_profiler (bool): True if we should start the process in
the valgrind profiler.
use_perftools_profiler (bool): True if we should profile the process
using perftools.
use_tmux (bool): True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
Information about the process that was started including a handle to
the process that was started.
def start_ray_process(command,
process_type,
env_updates=None,
cwd=None,
use_valgrind=False,
use_gdb=False,
use_valgrind_profiler=False,
use_perftools_profiler=False,
use_tmux=False,
stdout_file=None,
stderr_file=None):
"""Start one of the Ray processes.
TODO(rkn): We need to figure out how these commands interact. For example,
it may only make sense to start a process in gdb if we also start it in
tmux. Similarly, certain combinations probably don't make sense, like
simultaneously running the process in valgrind and the profiler.
Args:
command (List[str]): The command to use to start the Ray process.
process_type (str): The type of the process that is being started
(e.g., "raylet").
env_updates (dict): A dictionary of additional environment variables to
run the command with (in addition to the caller's environment
variables).
cwd (str): The directory to run the process in.
use_valgrind (bool): True if we should start the process in valgrind.
use_gdb (bool): True if we should start the process in gdb.
use_valgrind_profiler (bool): True if we should start the process in
the valgrind profiler.
use_perftools_profiler (bool): True if we should profile the process
using perftools.
use_tmux (bool): True if we should start the process in tmux.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
Information about the process that was started including a handle to
the process that was started.
"""
# Detect which flags are set through environment variables.
valgrind_env_var = "RAY_{}_VALGRIND".format(process_type.upper())
if os.environ.get(valgrind_env_var) == "1":
logger.info("Detected environment variable '%s'.", valgrind_env_var)
use_valgrind = True
valgrind_profiler_env_var = "RAY_{}_VALGRIND_PROFILER".format(
process_type.upper())
if os.environ.get(valgrind_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
valgrind_profiler_env_var)
use_valgrind_profiler = True
perftools_profiler_env_var = "RAY_{}_PERFTOOLS_PROFILER".format(
process_type.upper())
if os.environ.get(perftools_profiler_env_var) == "1":
logger.info("Detected environment variable '%s'.",
perftools_profiler_env_var)
use_perftools_profiler = True
tmux_env_var = "RAY_{}_TMUX".format(process_type.upper())
if os.environ.get(tmux_env_var) == "1":
logger.info("Detected environment variable '%s'.", tmux_env_var)
use_tmux = True
gdb_env_var = "RAY_{}_GDB".format(process_type.upper())
if os.environ.get(gdb_env_var) == "1":
logger.info("Detected environment variable '%s'.", gdb_env_var)
use_gdb = True
if sum(
[use_gdb, use_valgrind, use_valgrind_profiler, use_perftools_profiler
]) > 1:
raise ValueError(
"At most one of the 'use_gdb', 'use_valgrind', "
"'use_valgrind_profiler', and 'use_perftools_profiler' flags can "
"be used at a time.")
if env_updates is None:
env_updates = {}
if not isinstance(env_updates, dict):
raise ValueError("The 'env_updates' argument must be a dictionary.")
modified_env = os.environ.copy()
modified_env.update(env_updates)
if use_gdb:
if not use_tmux:
raise ValueError(
"If 'use_gdb' is true, then 'use_tmux' must be true as well.")
# TODO(suquark): Any better temp file creation here?
gdb_init_path = "/tmp/ray/gdb_init_{}_{}".format(
process_type, time.time())
ray_process_path = command[0]
ray_process_args = command[1:]
run_args = " ".join(["'{}'".format(arg) for arg in ray_process_args])
with open(gdb_init_path, "w") as gdb_init_file:
gdb_init_file.write("run {}".format(run_args))
command = ["gdb", ray_process_path, "-x", gdb_init_path]
if use_valgrind:
command = [
"valgrind", "--track-origins=yes", "--leak-check=full",
"--show-leak-kinds=all", "--leak-check-heuristics=stdstring",
"--error-exitcode=1"
] + command
if use_valgrind_profiler:
command = ["valgrind", "--tool=callgrind"] + command
if use_perftools_profiler:
modified_env["LD_PRELOAD"] = os.environ["PERFTOOLS_PATH"]
modified_env["CPUPROFILE"] = os.environ["PERFTOOLS_LOGFILE"]
if use_tmux:
# The command has to be created exactly as below to ensure that it
# works on all versions of tmux. (Tested with tmux 1.8-5, travis'
# version, and tmux 2.1)
command = ["tmux", "new-session", "-d", "{}".format(" ".join(command))]
process = subprocess.Popen(
command,
env=modified_env,
cwd=cwd,
stdout=stdout_file,
stderr=stderr_file)
return ProcessInfo(
process=process,
stdout_file=stdout_file.name if stdout_file is not None else None,
stderr_file=stderr_file.name if stderr_file is not None else None,
use_valgrind=use_valgrind,
use_gdb=use_gdb,
use_valgrind_profiler=use_valgrind_profiler,
use_perftools_profiler=use_perftools_profiler,
use_tmux=use_tmux) |
Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
def wait_for_redis_to_start(redis_ip_address,
redis_port,
password=None,
num_retries=5):
"""Wait for a Redis server to be available.
This is accomplished by creating a Redis client and sending a random
command to the server until the command gets through.
Args:
redis_ip_address (str): The IP address of the redis server.
redis_port (int): The port of the redis server.
password (str): The password of the redis server.
num_retries (int): The number of times to try connecting with redis.
The client will sleep for one second between attempts.
Raises:
Exception: An exception is raised if we could not connect with Redis.
"""
redis_client = redis.StrictRedis(
host=redis_ip_address, port=redis_port, password=password)
# Wait for the Redis server to start.
counter = 0
while counter < num_retries:
try:
# Run some random command and see if it worked.
logger.info(
"Waiting for redis server at {}:{} to respond...".format(
redis_ip_address, redis_port))
redis_client.client_list()
except redis.ConnectionError:
# Wait a little bit.
time.sleep(1)
logger.info("Failed to connect to the redis server, retrying.")
counter += 1
else:
break
if counter == num_retries:
raise Exception("Unable to connect to Redis. If the Redis instance is "
"on a different machine, check that your firewall is "
"configured properly.") |
Attempt to detect the number of GPUs on this machine.
TODO(rkn): This currently assumes Nvidia GPUs and Linux.
Returns:
The number of GPUs if any were detected, otherwise 0.
def _autodetect_num_gpus():
"""Attempt to detect the number of GPUs on this machine.
TODO(rkn): This currently assumes Nvidia GPUs and Linux.
Returns:
The number of GPUs if any were detected, otherwise 0.
"""
proc_gpus_path = "/proc/driver/nvidia/gpus"
if os.path.isdir(proc_gpus_path):
return len(os.listdir(proc_gpus_path))
return 0 |
Compute the versions of Python, pyarrow, and Ray.
Returns:
A tuple containing the version information.
def _compute_version_info():
"""Compute the versions of Python, pyarrow, and Ray.
Returns:
A tuple containing the version information.
"""
ray_version = ray.__version__
python_version = ".".join(map(str, sys.version_info[:3]))
pyarrow_version = pyarrow.__version__
return ray_version, python_version, pyarrow_version |
Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
def check_version_info(redis_client):
"""Check if various version info of this process is correct.
This will be used to detect if workers or drivers are started using
different versions of Python, pyarrow, or Ray. If the version
information is not present in Redis, then no check is done.
Args:
redis_client: A client for the primary Redis shard.
Raises:
Exception: An exception is raised if there is a version mismatch.
"""
redis_reply = redis_client.get("VERSION_INFO")
# Don't do the check if there is no version information in Redis. This
# is to make it easier to do things like start the processes by hand.
if redis_reply is None:
return
true_version_info = tuple(json.loads(ray.utils.decode(redis_reply)))
version_info = _compute_version_info()
if version_info != true_version_info:
node_ip_address = ray.services.get_node_ip_address()
error_message = ("Version mismatch: The cluster was started with:\n"
" Ray: " + true_version_info[0] + "\n"
" Python: " + true_version_info[1] + "\n"
" Pyarrow: " + str(true_version_info[2]) + "\n"
"This process on node " + node_ip_address +
" was started with:" + "\n"
" Ray: " + version_info[0] + "\n"
" Python: " + version_info[1] + "\n"
" Pyarrow: " + str(version_info[2]))
if version_info[:2] != true_version_info[:2]:
raise Exception(error_message)
else:
logger.warning(error_message) |
Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
redirect_files: The list of (stdout, stderr) file pairs.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
use_credis: If True, additionally load the chain-replicated libraries
into the redis servers. Defaults to None, which means its value is
set by the presence of "RAY_USE_NEW_GCS" in os.environ.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
include_java (bool): If True, the raylet backend can also support
Java worker.
Returns:
A tuple of the address for the primary Redis shard, a list of
addresses for the remaining shards, and the processes that were
started.
def start_redis(node_ip_address,
redirect_files,
port=None,
redis_shard_ports=None,
num_redis_shards=1,
redis_max_clients=None,
redirect_worker_output=False,
password=None,
use_credis=None,
redis_max_memory=None,
include_java=False):
"""Start the Redis global state store.
Args:
node_ip_address: The IP address of the current node. This is only used
for recording the log filenames in Redis.
redirect_files: The list of (stdout, stderr) file pairs.
port (int): If provided, the primary Redis shard will be started on
this port.
redis_shard_ports: A list of the ports to use for the non-primary Redis
shards.
num_redis_shards (int): If provided, the number of Redis shards to
start, in addition to the primary one. The default value is one
shard.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
redirect_worker_output (bool): True if worker output should be
redirected to a file and false otherwise. Workers will have access
to this value when they start up.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
use_credis: If True, additionally load the chain-replicated libraries
into the redis servers. Defaults to None, which means its value is
set by the presence of "RAY_USE_NEW_GCS" in os.environ.
redis_max_memory: The max amount of memory (in bytes) to allow each
redis shard to use. Once the limit is exceeded, redis will start
LRU eviction of entries. This only applies to the sharded redis
tables (task, object, and profile tables). By default, this is
capped at 10GB but can be set higher.
include_java (bool): If True, the raylet backend can also support
Java worker.
Returns:
A tuple of the address for the primary Redis shard, a list of
addresses for the remaining shards, and the processes that were
started.
"""
if len(redirect_files) != 1 + num_redis_shards:
raise ValueError("The number of redirect file pairs should be equal "
"to the number of redis shards (including the "
"primary shard) we will start.")
if redis_shard_ports is None:
redis_shard_ports = num_redis_shards * [None]
elif len(redis_shard_ports) != num_redis_shards:
raise Exception("The number of Redis shard ports does not match the "
"number of Redis shards.")
processes = []
if use_credis is None:
use_credis = ("RAY_USE_NEW_GCS" in os.environ)
if use_credis:
if password is not None:
# TODO(pschafhalter) remove this once credis supports
# authenticating Redis ports
raise Exception("Setting the `redis_password` argument is not "
"supported in credis. To run Ray with "
"password-protected Redis ports, ensure that "
"the environment variable `RAY_USE_NEW_GCS=off`.")
assert num_redis_shards == 1, (
"For now, RAY_USE_NEW_GCS supports 1 shard, and credis "
"supports 1-node chain for that shard only.")
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# TODO(suquark): We need credis here because some symbols need to be
# imported from credis dynamically through dlopen when Ray is built
# with RAY_USE_NEW_GCS=on. We should remove them later for the primary
# shard.
# See src/ray/gcs/redis_module/ray_redis_module.cc
redis_modules = [CREDIS_MASTER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_stdout_file, redis_stderr_file = redirect_files[0]
# Start the primary Redis shard.
port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=port,
password=password,
redis_max_clients=redis_max_clients,
# Below we use None to indicate no limit on the memory of the
# primary Redis shard.
redis_max_memory=None,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file)
processes.append(p)
redis_address = address(node_ip_address, port)
# Register the number of Redis shards in the primary shard, so that clients
# know how many redis shards to expect under RedisShards.
primary_redis_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
primary_redis_client.set("NumRedisShards", str(num_redis_shards))
# Put the redirect_worker_output bool in the Redis shard so that workers
# can access it and know whether or not to redirect their output.
primary_redis_client.set("RedirectOutput", 1
if redirect_worker_output else 0)
# put the include_java bool to primary redis-server, so that other nodes
# can access it and know whether or not to enable cross-languages.
primary_redis_client.set("INCLUDE_JAVA", 1 if include_java else 0)
# Store version information in the primary Redis shard.
_put_version_info_in_redis(primary_redis_client)
# Calculate the redis memory.
system_memory = ray.utils.get_system_memory()
if redis_max_memory is None:
redis_max_memory = min(
ray_constants.DEFAULT_REDIS_MAX_MEMORY_BYTES,
max(
int(system_memory * 0.2),
ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
if redis_max_memory < ray_constants.REDIS_MINIMUM_MEMORY_BYTES:
raise ValueError("Attempting to cap Redis memory usage at {} bytes, "
"but the minimum allowed is {} bytes.".format(
redis_max_memory,
ray_constants.REDIS_MINIMUM_MEMORY_BYTES))
# Start other Redis shards. Each Redis shard logs to a separate file,
# prefixed by "redis-<shard number>".
redis_shards = []
for i in range(num_redis_shards):
redis_stdout_file, redis_stderr_file = redirect_files[i + 1]
if use_credis:
redis_executable = CREDIS_EXECUTABLE
# It is important to load the credis module BEFORE the ray module,
# as the latter contains an extern declaration that the former
# supplies.
redis_modules = [CREDIS_MEMBER_MODULE, REDIS_MODULE]
else:
redis_executable = REDIS_EXECUTABLE
redis_modules = [REDIS_MODULE]
redis_shard_port, p = _start_redis_instance(
redis_executable,
modules=redis_modules,
port=redis_shard_ports[i],
password=password,
redis_max_clients=redis_max_clients,
redis_max_memory=redis_max_memory,
stdout_file=redis_stdout_file,
stderr_file=redis_stderr_file)
processes.append(p)
shard_address = address(node_ip_address, redis_shard_port)
redis_shards.append(shard_address)
# Store redis shard information in the primary redis shard.
primary_redis_client.rpush("RedisShards", shard_address)
if use_credis:
# Configure the chain state. The way it is intended to work is
# the following:
#
# PRIMARY_SHARD
#
# SHARD_1 (master replica) -> SHARD_1 (member replica)
# -> SHARD_1 (member replica)
#
# SHARD_2 (master replica) -> SHARD_2 (member replica)
# -> SHARD_2 (member replica)
# ...
#
#
# If we have credis members in future, their modules should be:
# [CREDIS_MEMBER_MODULE, REDIS_MODULE], and they will be initialized by
# execute_command("MEMBER.CONNECT_TO_MASTER", node_ip_address, port)
#
# Currently we have num_redis_shards == 1, so only one chain will be
# created, and the chain only contains master.
# TODO(suquark): Currently, this is not correct because we are
# using the master replica as the primary shard. This should be
# fixed later. I had tried to fix it but failed because of heartbeat
# issues.
primary_client = redis.StrictRedis(
host=node_ip_address, port=port, password=password)
shard_client = redis.StrictRedis(
host=node_ip_address, port=redis_shard_port, password=password)
primary_client.execute_command("MASTER.ADD", node_ip_address,
redis_shard_port)
shard_client.execute_command("MEMBER.CONNECT_TO_MASTER",
node_ip_address, port)
return redis_address, redis_shards, processes |
Start a single Redis server.
Notes:
If "port" is not None, then we will only use this port and try
only once. Otherwise, random ports will be used and the maximum
retries count is "num_retries".
Args:
executable (str): Full path of the redis-server executable.
modules (list of str): A list of pathnames, pointing to the redis
module(s) that will be loaded in this redis server.
port (int): If provided, start a Redis server with this port.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis. If a
port is provided, this defaults to 1.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries.
Returns:
A tuple of the port used by Redis and ProcessInfo for the process that
was started. If a port is passed in, then the returned port value
is the same.
Raises:
Exception: An exception is raised if Redis could not be started.
def _start_redis_instance(executable,
modules,
port=None,
redis_max_clients=None,
num_retries=20,
stdout_file=None,
stderr_file=None,
password=None,
redis_max_memory=None):
"""Start a single Redis server.
Notes:
If "port" is not None, then we will only use this port and try
only once. Otherwise, random ports will be used and the maximum
retries count is "num_retries".
Args:
executable (str): Full path of the redis-server executable.
modules (list of str): A list of pathnames, pointing to the redis
module(s) that will be loaded in this redis server.
port (int): If provided, start a Redis server with this port.
redis_max_clients: If this is provided, Ray will attempt to configure
Redis with this maxclients number.
num_retries (int): The number of times to attempt to start Redis. If a
port is provided, this defaults to 1.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
password (str): Prevents external clients without the password
from connecting to Redis if provided.
redis_max_memory: The max amount of memory (in bytes) to allow redis
to use, or None for no limit. Once the limit is exceeded, redis
will start LRU eviction of entries.
Returns:
A tuple of the port used by Redis and ProcessInfo for the process that
was started. If a port is passed in, then the returned port value
is the same.
Raises:
Exception: An exception is raised if Redis could not be started.
"""
assert os.path.isfile(executable)
for module in modules:
assert os.path.isfile(module)
counter = 0
if port is not None:
# If a port is specified, then try only once to connect.
# This ensures that we will use the given port.
num_retries = 1
else:
port = new_port()
load_module_args = []
for module in modules:
load_module_args += ["--loadmodule", module]
while counter < num_retries:
if counter > 0:
logger.warning("Redis failed to start, retrying now.")
# Construct the command to start the Redis server.
command = [executable]
if password:
command += ["--requirepass", password]
command += (
["--port", str(port), "--loglevel", "warning"] + load_module_args)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REDIS_SERVER,
stdout_file=stdout_file,
stderr_file=stderr_file)
time.sleep(0.1)
# Check if Redis successfully started (or at least if it the executable
# did not exit within 0.1 seconds).
if process_info.process.poll() is None:
break
port = new_port()
counter += 1
if counter == num_retries:
raise Exception("Couldn't start Redis. Check log files: {} {}".format(
stdout_file.name, stderr_file.name))
# Create a Redis client just for configuring Redis.
redis_client = redis.StrictRedis(
host="127.0.0.1", port=port, password=password)
# Wait for the Redis server to start.
wait_for_redis_to_start("127.0.0.1", port, password=password)
# Configure Redis to generate keyspace notifications. TODO(rkn): Change
# this to only generate notifications for the export keys.
redis_client.config_set("notify-keyspace-events", "Kl")
# Configure Redis to not run in protected mode so that processes on other
# hosts can connect to it. TODO(rkn): Do this in a more secure way.
redis_client.config_set("protected-mode", "no")
# Discard old task and object metadata.
if redis_max_memory is not None:
redis_client.config_set("maxmemory", str(redis_max_memory))
redis_client.config_set("maxmemory-policy", "allkeys-lru")
redis_client.config_set("maxmemory-samples", "10")
logger.info("Starting Redis shard with {} GB max memory.".format(
round(redis_max_memory / 1e9, 2)))
# If redis_max_clients is provided, attempt to raise the number of maximum
# number of Redis clients.
if redis_max_clients is not None:
redis_client.config_set("maxclients", str(redis_max_clients))
else:
# If redis_max_clients is not provided, determine the current ulimit.
# We will use this to attempt to raise the maximum number of Redis
# clients.
current_max_clients = int(
redis_client.config_get("maxclients")["maxclients"])
# The below command should be the same as doing ulimit -n.
ulimit_n = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# The quantity redis_client_buffer appears to be the required buffer
# between the maximum number of redis clients and ulimit -n. That is,
# if ulimit -n returns 10000, then we can set maxclients to
# 10000 - redis_client_buffer.
redis_client_buffer = 32
if current_max_clients < ulimit_n - redis_client_buffer:
redis_client.config_set("maxclients",
ulimit_n - redis_client_buffer)
# Increase the hard and soft limits for the redis client pubsub buffer to
# 128MB. This is a hack to make it less likely for pubsub messages to be
# dropped and for pubsub connections to therefore be killed.
cur_config = (redis_client.config_get("client-output-buffer-limit")[
"client-output-buffer-limit"])
cur_config_list = cur_config.split()
assert len(cur_config_list) == 12
cur_config_list[8:] = ["pubsub", "134217728", "134217728", "60"]
redis_client.config_set("client-output-buffer-limit",
" ".join(cur_config_list))
# Put a time stamp in Redis to indicate when it was started.
redis_client.set("redis_start_time", time.time())
return port, process_info |
Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
def start_log_monitor(redis_address,
logs_dir,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a log monitor process.
Args:
redis_address (str): The address of the Redis instance.
logs_dir (str): The directory of logging files.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
log_monitor_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "log_monitor.py")
command = [
sys.executable, "-u", log_monitor_filepath,
"--redis-address={}".format(redis_address),
"--logs-dir={}".format(logs_dir)
]
if redis_password:
command += ["--redis-password", redis_password]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_LOG_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
Start a reporter process.
Args:
redis_address (str): The address of the Redis instance.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
def start_reporter(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a reporter process.
Args:
redis_address (str): The address of the Redis instance.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
reporter_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "reporter.py")
command = [
sys.executable, "-u", reporter_filepath,
"--redis-address={}".format(redis_address)
]
if redis_password:
command += ["--redis-password", redis_password]
try:
import psutil # noqa: F401
except ImportError:
logger.warning("Failed to start the reporter. The reporter requires "
"'pip install psutil'.")
return None
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_REPORTER,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
Start a dashboard process.
Args:
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
def start_dashboard(redis_address,
temp_dir,
stdout_file=None,
stderr_file=None,
redis_password=None):
"""Start a dashboard process.
Args:
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
port = 8080
while True:
try:
port_test_socket = socket.socket()
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
token = ray.utils.decode(binascii.hexlify(os.urandom(24)))
dashboard_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "dashboard/dashboard.py")
command = [
sys.executable,
"-u",
dashboard_filepath,
"--redis-address={}".format(redis_address),
"--http-port={}".format(port),
"--token={}".format(token),
"--temp-dir={}".format(temp_dir),
]
if redis_password:
command += ["--redis-password", redis_password]
if sys.version_info <= (3, 0):
return None, None
try:
import aiohttp # noqa: F401
import psutil # noqa: F401
except ImportError:
raise ImportError(
"Failed to start the dashboard. The dashboard requires Python 3 "
"as well as 'pip install aiohttp psutil'.")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file)
dashboard_url = "http://{}:{}/?token={}".format(
ray.services.get_node_ip_address(), port, token)
print("\n" + "=" * 70)
print("View the dashboard at {}".format(dashboard_url))
print("=" * 70 + "\n")
return dashboard_url, process_info |
Sanity check a resource dictionary and add sensible defaults.
Args:
num_cpus: The number of CPUs.
num_gpus: The number of GPUs.
resources: A dictionary mapping resource names to resource quantities.
Returns:
A new resource dictionary.
def check_and_update_resources(num_cpus, num_gpus, resources):
"""Sanity check a resource dictionary and add sensible defaults.
Args:
num_cpus: The number of CPUs.
num_gpus: The number of GPUs.
resources: A dictionary mapping resource names to resource quantities.
Returns:
A new resource dictionary.
"""
if resources is None:
resources = {}
resources = resources.copy()
assert "CPU" not in resources
assert "GPU" not in resources
if num_cpus is not None:
resources["CPU"] = num_cpus
if num_gpus is not None:
resources["GPU"] = num_gpus
if "CPU" not in resources:
# By default, use the number of hardware execution threads for the
# number of cores.
resources["CPU"] = multiprocessing.cpu_count()
# See if CUDA_VISIBLE_DEVICES has already been set.
gpu_ids = ray.utils.get_cuda_visible_devices()
# Check that the number of GPUs that the raylet wants doesn't
# excede the amount allowed by CUDA_VISIBLE_DEVICES.
if ("GPU" in resources and gpu_ids is not None
and resources["GPU"] > len(gpu_ids)):
raise Exception("Attempting to start raylet with {} GPUs, "
"but CUDA_VISIBLE_DEVICES contains {}.".format(
resources["GPU"], gpu_ids))
if "GPU" not in resources:
# Try to automatically detect the number of GPUs.
resources["GPU"] = _autodetect_num_gpus()
# Don't use more GPUs than allowed by CUDA_VISIBLE_DEVICES.
if gpu_ids is not None:
resources["GPU"] = min(resources["GPU"], len(gpu_ids))
resources = {
resource_label: resource_quantity
for resource_label, resource_quantity in resources.items()
if resource_quantity != 0
}
# Check types.
for _, resource_quantity in resources.items():
assert (isinstance(resource_quantity, int)
or isinstance(resource_quantity, float))
if (isinstance(resource_quantity, float)
and not resource_quantity.is_integer()):
raise ValueError(
"Resource quantities must all be whole numbers. Received {}.".
format(resources))
if resource_quantity < 0:
raise ValueError(
"Resource quantities must be nonnegative. Received {}.".format(
resources))
if resource_quantity > ray_constants.MAX_RESOURCE_QUANTITY:
raise ValueError("Resource quantities must be at most {}.".format(
ray_constants.MAX_RESOURCE_QUANTITY))
return resources |
Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
num_cpus: The CPUs allocated for this raylet.
num_gpus: The GPUs allocated for this raylet.
resources: The custom resources allocated for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
node_manager_port: The port to use for the node manager. If this is
None, then the node manager will choose its own port.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
def start_raylet(redis_address,
node_ip_address,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
num_cpus=None,
num_gpus=None,
resources=None,
object_manager_port=None,
node_manager_port=None,
redis_password=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
include_java=False,
java_worker_options=None,
load_code_from_local=False):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
num_cpus: The CPUs allocated for this raylet.
num_gpus: The GPUs allocated for this raylet.
resources: The custom resources allocated for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
node_manager_port: The port to use for the node manager. If this is
None, then the node manager will choose its own port.
redis_password: The password to use when connecting to Redis.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
include_java (bool): If True, the raylet backend can also support
Java worker.
java_worker_options (str): The command options for Java worker.
Returns:
ProcessInfo for the process that was started.
"""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
num_initial_workers = (num_cpus if num_cpus is not None else
multiprocessing.cpu_count())
static_resources = check_and_update_resources(num_cpus, num_gpus,
resources)
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static))
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()])
gcs_ip_address, gcs_port = redis_address.split(":")
if include_java is True:
java_worker_options = (java_worker_options
or DEFAULT_JAVA_WORKER_OPTIONS)
java_worker_command = build_java_worker_command(
java_worker_options,
redis_address,
plasma_store_name,
raylet_name,
redis_password,
os.path.join(temp_dir, "sockets"),
)
else:
java_worker_command = ""
# Create the command that the Raylet will use to start workers.
start_worker_command = ("{} {} "
"--node-ip-address={} "
"--object-store-name={} "
"--raylet-name={} "
"--redis-address={} "
"--temp-dir={}".format(
sys.executable, worker_path, node_ip_address,
plasma_store_name, raylet_name, redis_address,
temp_dir))
if redis_password:
start_worker_command += " --redis-password {}".format(redis_password)
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
# If the node manager port is None, then use 0 to cause the node manager
# to choose its own port.
if node_manager_port is None:
node_manager_port = 0
if load_code_from_local:
start_worker_command += " --load-code-from-local "
command = [
RAYLET_EXECUTABLE,
"--raylet_socket_name={}".format(raylet_name),
"--store_socket_name={}".format(plasma_store_name),
"--object_manager_port={}".format(object_manager_port),
"--node_manager_port={}".format(node_manager_port),
"--node_ip_address={}".format(node_ip_address),
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--num_initial_workers={}".format(num_initial_workers),
"--maximum_startup_concurrency={}".format(maximum_startup_concurrency),
"--static_resource_list={}".format(resource_argument),
"--config_list={}".format(config_str),
"--python_worker_command={}".format(start_worker_command),
"--java_worker_command={}".format(java_worker_command),
"--redis_password={}".format(redis_password or ""),
"--temp_dir={}".format(temp_dir),
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
This method assembles the command used to start a Java worker.
Args:
java_worker_options (str): The command options for Java worker.
redis_address (str): Redis address of GCS.
plasma_store_name (str): The name of the plasma store socket to connect
to.
raylet_name (str): The name of the raylet socket to create.
redis_password (str): The password of connect to redis.
temp_dir (str): The path of the temporary directory Ray will use.
Returns:
The command string for starting Java worker.
def build_java_worker_command(
java_worker_options,
redis_address,
plasma_store_name,
raylet_name,
redis_password,
temp_dir,
):
"""This method assembles the command used to start a Java worker.
Args:
java_worker_options (str): The command options for Java worker.
redis_address (str): Redis address of GCS.
plasma_store_name (str): The name of the plasma store socket to connect
to.
raylet_name (str): The name of the raylet socket to create.
redis_password (str): The password of connect to redis.
temp_dir (str): The path of the temporary directory Ray will use.
Returns:
The command string for starting Java worker.
"""
assert java_worker_options is not None
command = "java ".format(java_worker_options)
if redis_address is not None:
command += "-Dray.redis.address={} ".format(redis_address)
if plasma_store_name is not None:
command += (
"-Dray.object-store.socket-name={} ".format(plasma_store_name))
if raylet_name is not None:
command += "-Dray.raylet.socket-name={} ".format(raylet_name)
if redis_password is not None:
command += "-Dray.redis.password={} ".format(redis_password)
command += "-Dray.home={} ".format(RAY_HOME)
# TODO(suquark): We should use temp_dir as the input of a java worker.
command += "-Dray.log-dir={} ".format(os.path.join(temp_dir, "sockets"))
if java_worker_options:
# Put `java_worker_options` in the last, so it can overwrite the
# above options.
command += java_worker_options + " "
command += "org.ray.runtime.runner.worker.DefaultWorker"
return command |
Figure out how to configure the plasma object store.
This will determine which directory to use for the plasma store (e.g.,
/tmp or /dev/shm) and how much memory to start the store with. On Linux,
we will try to use /dev/shm unless the shared memory file system is too
small, in which case we will fall back to /tmp. If any of the object store
memory or plasma directory parameters are specified by the user, then those
values will be preserved.
Args:
object_store_memory (int): The user-specified object store memory
parameter.
plasma_directory (str): The user-specified plasma directory parameter.
huge_pages (bool): The user-specified huge pages parameter.
Returns:
A tuple of the object store memory to use and the plasma directory to
use. If either of these values is specified by the user, then that
value will be preserved.
def determine_plasma_store_config(object_store_memory=None,
plasma_directory=None,
huge_pages=False):
"""Figure out how to configure the plasma object store.
This will determine which directory to use for the plasma store (e.g.,
/tmp or /dev/shm) and how much memory to start the store with. On Linux,
we will try to use /dev/shm unless the shared memory file system is too
small, in which case we will fall back to /tmp. If any of the object store
memory or plasma directory parameters are specified by the user, then those
values will be preserved.
Args:
object_store_memory (int): The user-specified object store memory
parameter.
plasma_directory (str): The user-specified plasma directory parameter.
huge_pages (bool): The user-specified huge pages parameter.
Returns:
A tuple of the object store memory to use and the plasma directory to
use. If either of these values is specified by the user, then that
value will be preserved.
"""
system_memory = ray.utils.get_system_memory()
# Choose a default object store size.
if object_store_memory is None:
object_store_memory = int(system_memory * 0.3)
# Cap memory to avoid memory waste and perf issues on large nodes
if (object_store_memory >
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES):
logger.warning(
"Warning: Capping object memory store to {}GB. ".format(
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES // 1e9)
+ "To increase this further, specify `object_store_memory` "
"when calling ray.init() or ray start.")
object_store_memory = (
ray_constants.DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES)
# Determine which directory to use. By default, use /tmp on MacOS and
# /dev/shm on Linux, unless the shared-memory file system is too small,
# in which case we default to /tmp on Linux.
if plasma_directory is None:
if sys.platform == "linux" or sys.platform == "linux2":
shm_avail = ray.utils.get_shared_memory_bytes()
# Compare the requested memory size to the memory available in
# /dev/shm.
if shm_avail > object_store_memory:
plasma_directory = "/dev/shm"
else:
plasma_directory = "/tmp"
logger.warning(
"WARNING: The object store is using /tmp instead of "
"/dev/shm because /dev/shm has only {} bytes available. "
"This may slow down performance! You may be able to free "
"up space by deleting files in /dev/shm or terminating "
"any running plasma_store_server processes. If you are "
"inside a Docker container, you may need to pass an "
"argument with the flag '--shm-size' to 'docker run'.".
format(shm_avail))
else:
plasma_directory = "/tmp"
# Do some sanity checks.
if object_store_memory > system_memory:
raise Exception(
"The requested object store memory size is greater "
"than the total available memory.")
else:
plasma_directory = os.path.abspath(plasma_directory)
logger.warning("WARNING: object_store_memory is not verified when "
"plasma_directory is set.")
if not os.path.isdir(plasma_directory):
raise Exception(
"The file {} does not exist or is not a directory.".format(
plasma_directory))
return object_store_memory, plasma_directory |
Start a plasma store process.
Args:
plasma_store_memory (int): The amount of memory in bytes to start the
plasma store with.
use_valgrind (bool): True if the plasma store should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the plasma store should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: a boolean flag indicating whether to start the
Object Store with hugetlbfs support. Requires plasma_directory.
socket_name (str): If provided, it will specify the socket
name used by the plasma store.
Return:
A tuple of the name of the plasma store socket and ProcessInfo for the
plasma store process.
def _start_plasma_store(plasma_store_memory,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
plasma_directory=None,
huge_pages=False,
socket_name=None):
"""Start a plasma store process.
Args:
plasma_store_memory (int): The amount of memory in bytes to start the
plasma store with.
use_valgrind (bool): True if the plasma store should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the plasma store should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: a boolean flag indicating whether to start the
Object Store with hugetlbfs support. Requires plasma_directory.
socket_name (str): If provided, it will specify the socket
name used by the plasma store.
Return:
A tuple of the name of the plasma store socket and ProcessInfo for the
plasma store process.
"""
if use_valgrind and use_profiler:
raise Exception("Cannot use valgrind and profiler at the same time.")
if huge_pages and not (sys.platform == "linux"
or sys.platform == "linux2"):
raise Exception("The huge_pages argument is only supported on "
"Linux.")
if huge_pages and plasma_directory is None:
raise Exception("If huge_pages is True, then the "
"plasma_directory argument must be provided.")
if not isinstance(plasma_store_memory, int):
raise Exception("plasma_store_memory should be an integer.")
command = [
PLASMA_STORE_EXECUTABLE, "-s", socket_name, "-m",
str(plasma_store_memory)
]
if plasma_directory is not None:
command += ["-d", plasma_directory]
if huge_pages:
command += ["-h"]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_PLASMA_STORE,
use_valgrind=use_valgrind,
use_valgrind_profiler=use_profiler,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
This method starts an object store process.
Args:
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started.
def start_plasma_store(stdout_file=None,
stderr_file=None,
object_store_memory=None,
plasma_directory=None,
huge_pages=False,
plasma_store_socket_name=None):
"""This method starts an object store process.
Args:
stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
Returns:
ProcessInfo for the process that was started.
"""
object_store_memory, plasma_directory = determine_plasma_store_config(
object_store_memory, plasma_directory, huge_pages)
if object_store_memory < ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES:
raise ValueError("Attempting to cap object store memory usage at {} "
"bytes, but the minimum allowed is {} bytes.".format(
object_store_memory,
ray_constants.OBJECT_STORE_MINIMUM_MEMORY_BYTES))
# Print the object store memory using two decimal places.
object_store_memory_str = (object_store_memory / 10**7) / 10**2
logger.info("Starting the Plasma object store with {} GB memory "
"using {}.".format(
round(object_store_memory_str, 2), plasma_directory))
# Start the Plasma store.
process_info = _start_plasma_store(
object_store_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=stdout_file,
stderr_file=stderr_file,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
socket_name=plasma_store_socket_name)
return process_info |
This method starts a worker process.
Args:
node_ip_address (str): The IP address of the node that this worker is
running on.
object_store_name (str): The socket name of the object store.
raylet_name (str): The socket name of the raylet server.
redis_address (str): The address that the Redis server is listening on.
worker_path (str): The path of the source code which the worker process
will run.
temp_dir (str): The path of the temp dir.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
ProcessInfo for the process that was started.
def start_worker(node_ip_address,
object_store_name,
raylet_name,
redis_address,
worker_path,
temp_dir,
stdout_file=None,
stderr_file=None):
"""This method starts a worker process.
Args:
node_ip_address (str): The IP address of the node that this worker is
running on.
object_store_name (str): The socket name of the object store.
raylet_name (str): The socket name of the raylet server.
redis_address (str): The address that the Redis server is listening on.
worker_path (str): The path of the source code which the worker process
will run.
temp_dir (str): The path of the temp dir.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
Returns:
ProcessInfo for the process that was started.
"""
command = [
sys.executable, "-u", worker_path,
"--node-ip-address=" + node_ip_address,
"--object-store-name=" + object_store_name,
"--raylet-name=" + raylet_name,
"--redis-address=" + str(redis_address), "--temp-dir=" + temp_dir
]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_WORKER,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
autoscaling_config: path to autoscaling config file.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
def start_monitor(redis_address,
stdout_file=None,
stderr_file=None,
autoscaling_config=None,
redis_password=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
autoscaling_config: path to autoscaling config file.
redis_password (str): The password of the redis server.
Returns:
ProcessInfo for the process that was started.
"""
monitor_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "monitor.py")
command = [
sys.executable, "-u", monitor_path,
"--redis-address=" + str(redis_address)
]
if autoscaling_config:
command.append("--autoscaling-config=" + str(autoscaling_config))
if redis_password:
command.append("--redis-password=" + redis_password)
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
config (dict|None): Optional configuration that will
override defaults in RayConfig.
Returns:
ProcessInfo for the process that was started.
def start_raylet_monitor(redis_address,
stdout_file=None,
stderr_file=None,
redis_password=None,
config=None):
"""Run a process to monitor the other processes.
Args:
redis_address (str): The address that the Redis server is listening on.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
config (dict|None): Optional configuration that will
override defaults in RayConfig.
Returns:
ProcessInfo for the process that was started.
"""
gcs_ip_address, gcs_port = redis_address.split(":")
redis_password = redis_password or ""
config = config or {}
config_str = ",".join(["{},{}".format(*kv) for kv in config.items()])
command = [
RAYLET_MONITOR_EXECUTABLE,
"--redis_address={}".format(gcs_ip_address),
"--redis_port={}".format(gcs_port),
"--config_list={}".format(config_str),
]
if redis_password:
command += [redis_password]
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET_MONITOR,
stdout_file=stdout_file,
stderr_file=stderr_file)
return process_info |
Unpacks Dict and Tuple space observations into their original form.
This is needed since we flatten Dict and Tuple observations in transit.
Before sending them to the model though, we should unflatten them into
Dicts or Tuples of tensors.
Arguments:
obs: The flattened observation tensor.
obs_space: The flattened obs space. If this has the `original_space`
attribute, we will unflatten the tensor to that shape.
tensorlib: The library used to unflatten (reshape) the array/tensor.
Returns:
single tensor or dict / tuple of tensors matching the original
observation space.
def restore_original_dimensions(obs, obs_space, tensorlib=tf):
"""Unpacks Dict and Tuple space observations into their original form.
This is needed since we flatten Dict and Tuple observations in transit.
Before sending them to the model though, we should unflatten them into
Dicts or Tuples of tensors.
Arguments:
obs: The flattened observation tensor.
obs_space: The flattened obs space. If this has the `original_space`
attribute, we will unflatten the tensor to that shape.
tensorlib: The library used to unflatten (reshape) the array/tensor.
Returns:
single tensor or dict / tuple of tensors matching the original
observation space.
"""
if hasattr(obs_space, "original_space"):
return _unpack_obs(obs, obs_space.original_space, tensorlib=tensorlib)
else:
return obs |
Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
def _unpack_obs(obs, space, tensorlib=tf):
"""Unpack a flattened Dict or Tuple observation array/tensor.
Arguments:
obs: The flattened observation tensor
space: The original space prior to flattening
tensorlib: The library used to unflatten (reshape) the array/tensor
"""
if (isinstance(space, gym.spaces.Dict)
or isinstance(space, gym.spaces.Tuple)):
prep = get_preprocessor(space)(space)
if len(obs.shape) != 2 or obs.shape[1] != prep.shape[0]:
raise ValueError(
"Expected flattened obs shape of [None, {}], got {}".format(
prep.shape[0], obs.shape))
assert len(prep.preprocessors) == len(space.spaces), \
(len(prep.preprocessors) == len(space.spaces))
offset = 0
if isinstance(space, gym.spaces.Tuple):
u = []
for p, v in zip(prep.preprocessors, space.spaces):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u.append(
_unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib))
else:
u = OrderedDict()
for p, (k, v) in zip(prep.preprocessors, space.spaces.items()):
obs_slice = obs[:, offset:offset + p.size]
offset += p.size
u[k] = _unpack_obs(
tensorlib.reshape(obs_slice, [-1] + list(p.shape)),
v,
tensorlib=tensorlib)
return u
else:
return obs |
Convert the Ray node name tag to the AWS-specific 'Name' tag.
def to_aws_format(tags):
"""Convert the Ray node name tag to the AWS-specific 'Name' tag."""
if TAG_RAY_NODE_NAME in tags:
tags["Name"] = tags[TAG_RAY_NODE_NAME]
del tags[TAG_RAY_NODE_NAME]
return tags |
Update the AWS tags for a cluster periodically.
The purpose of this loop is to avoid excessive EC2 calls when a large
number of nodes are being launched simultaneously.
def _node_tag_update_loop(self):
""" Update the AWS tags for a cluster periodically.
The purpose of this loop is to avoid excessive EC2 calls when a large
number of nodes are being launched simultaneously.
"""
while True:
self.tag_cache_update_event.wait()
self.tag_cache_update_event.clear()
batch_updates = defaultdict(list)
with self.tag_cache_lock:
for node_id, tags in self.tag_cache_pending.items():
for x in tags.items():
batch_updates[x].append(node_id)
self.tag_cache[node_id].update(tags)
self.tag_cache_pending = {}
for (k, v), node_ids in batch_updates.items():
m = "Set tag {}={} on {}".format(k, v, node_ids)
with LogTimer("AWSNodeProvider: {}".format(m)):
if k == TAG_RAY_NODE_NAME:
k = "Name"
self.ec2.meta.client.create_tags(
Resources=node_ids,
Tags=[{
"Key": k,
"Value": v
}],
)
self.tag_cache_kill_event.wait(timeout=5)
if self.tag_cache_kill_event.is_set():
return |
Refresh and get info for this node, updating the cache.
def _get_node(self, node_id):
"""Refresh and get info for this node, updating the cache."""
self.non_terminated_nodes({}) # Side effect: updates cache
if node_id in self.cached_nodes:
return self.cached_nodes[node_id]
# Node not in {pending, running} -- retry with a point query. This
# usually means the node was recently preempted or terminated.
matches = list(self.ec2.instances.filter(InstanceIds=[node_id]))
assert len(matches) == 1, "Invalid instance id {}".format(node_id)
return matches[0] |
Validates export_formats.
Raises:
ValueError if the format is unknown.
def validate(export_formats):
"""Validates export_formats.
Raises:
ValueError if the format is unknown.
"""
for i in range(len(export_formats)):
export_formats[i] = export_formats[i].strip().lower()
if export_formats[i] not in [
ExportFormat.CHECKPOINT, ExportFormat.MODEL
]:
raise TuneError("Unsupported export format: " +
export_formats[i]) |
Init logger.
def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
if not self.logdir:
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(
str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir)
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
upload_uri=self.upload_dir,
loggers=self.loggers,
sync_function=self.sync_function) |
EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
def update_resources(self, cpu, gpu, **kwargs):
"""EXPERIMENTAL: Updates the resource requirements.
Should only be called when the trial is not running.
Raises:
ValueError if trial status is running.
"""
if self.status is Trial.RUNNING:
raise ValueError("Cannot update resources while Trial is running.")
self.resources = Resources(cpu, gpu, **kwargs) |
Whether the given result meets this trial's stopping criteria.
def should_stop(self, result):
"""Whether the given result meets this trial's stopping criteria."""
if result.get(DONE):
return True
for criteria, stop_value in self.stopping_criterion.items():
if criteria not in result:
raise TuneError(
"Stopping criteria {} not provided in result {}.".format(
criteria, result))
if result[criteria] >= stop_value:
return True
return False |
Whether this trial is due for checkpointing.
def should_checkpoint(self):
"""Whether this trial is due for checkpointing."""
result = self.last_result or {}
if result.get(DONE) and self.checkpoint_at_end:
return True
if self.checkpoint_freq:
return result.get(TRAINING_ITERATION,
0) % self.checkpoint_freq == 0
else:
return False |
Returns a progress message for printing out to the console.
def progress_string(self):
"""Returns a progress message for printing out to the console."""
if not self.last_result:
return self._status_string()
def location_string(hostname, pid):
if hostname == os.uname()[1]:
return "pid={}".format(pid)
else:
return "{} pid={}".format(hostname, pid)
pieces = [
"{}".format(self._status_string()), "[{}]".format(
self.resources.summary_string()), "[{}]".format(
location_string(
self.last_result.get(HOSTNAME),
self.last_result.get(PID))), "{} s".format(
int(self.last_result.get(TIME_TOTAL_S)))
]
if self.last_result.get(TRAINING_ITERATION) is not None:
pieces.append("{} iter".format(
self.last_result[TRAINING_ITERATION]))
if self.last_result.get(TIMESTEPS_TOTAL) is not None:
pieces.append("{} ts".format(self.last_result[TIMESTEPS_TOTAL]))
if self.last_result.get(EPISODE_REWARD_MEAN) is not None:
pieces.append("{} rew".format(
format(self.last_result[EPISODE_REWARD_MEAN], ".3g")))
if self.last_result.get(MEAN_LOSS) is not None:
pieces.append("{} loss".format(
format(self.last_result[MEAN_LOSS], ".3g")))
if self.last_result.get(MEAN_ACCURACY) is not None:
pieces.append("{} acc".format(
format(self.last_result[MEAN_ACCURACY], ".3g")))
return ", ".join(pieces) |
Returns whether the trial qualifies for restoring.
This is if a checkpoint frequency is set and has not failed more than
max_failures. This may return true even when there may not yet
be a checkpoint.
def should_recover(self):
"""Returns whether the trial qualifies for restoring.
This is if a checkpoint frequency is set and has not failed more than
max_failures. This may return true even when there may not yet
be a checkpoint.
"""
return (self.checkpoint_freq > 0
and (self.num_failures < self.max_failures
or self.max_failures < 0)) |
Compares two checkpoints based on the attribute attr_mean param.
Greater than is used by default. If command-line parameter
checkpoint_score_attr starts with "min-" less than is used.
Arguments:
attr_mean: mean of attribute value for the current checkpoint
Returns:
True: when attr_mean is greater than previous checkpoint attr_mean
and greater than function is selected
when attr_mean is less than previous checkpoint attr_mean and
less than function is selected
False: when attr_mean is not in alignment with selected cmp fn
def compare_checkpoints(self, attr_mean):
"""Compares two checkpoints based on the attribute attr_mean param.
Greater than is used by default. If command-line parameter
checkpoint_score_attr starts with "min-" less than is used.
Arguments:
attr_mean: mean of attribute value for the current checkpoint
Returns:
True: when attr_mean is greater than previous checkpoint attr_mean
and greater than function is selected
when attr_mean is less than previous checkpoint attr_mean and
less than function is selected
False: when attr_mean is not in alignment with selected cmp fn
"""
if self._cmp_greater and attr_mean > self.best_checkpoint_attr_value:
return True
elif (not self._cmp_greater
and attr_mean < self.best_checkpoint_attr_value):
return True
return False |
Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector.
def preprocess(img):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
img = img[35:195]
# Downsample by factor of 2.
img = img[::2, ::2, 0]
# Erase background (background type 1).
img[img == 144] = 0
# Erase background (background type 2).
img[img == 109] = 0
# Set everything else (paddles, ball) to 1.
img[img != 0] = 1
return img.astype(np.float).ravel() |
take 1D float array of rewards and compute discounted reward
def discount_rewards(r):
"""take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
# Reset the sum, since this was a game boundary (pong specific!).
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r |
backward pass. (eph is array of intermediate hidden states)
def policy_backward(eph, epx, epdlogp, model):
"""backward pass. (eph is array of intermediate hidden states)"""
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model["W2"])
# Backprop relu.
dh[eph <= 0] = 0
dW1 = np.dot(dh.T, epx)
return {"W1": dW1, "W2": dW2} |
Load a class at runtime given a full path.
Example of the path: mypkg.mysubpkg.myclass
def load_class(path):
"""
Load a class at runtime given a full path.
Example of the path: mypkg.mysubpkg.myclass
"""
class_data = path.split(".")
if len(class_data) < 2:
raise ValueError(
"You need to pass a valid path like mymodule.provider_class")
module_path = ".".join(class_data[:-1])
class_str = class_data[-1]
module = importlib.import_module(module_path)
return getattr(module, class_str) |
Terminates a set of nodes. May be overridden with a batch method.
def terminate_nodes(self, node_ids):
"""Terminates a set of nodes. May be overridden with a batch method."""
for node_id in node_ids:
logger.info("NodeProvider: "
"{}: Terminating node".format(node_id))
self.terminate_node(node_id) |
Passes the result to BayesOpt unless early terminated or errored
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Passes the result to BayesOpt unless early terminated or errored"""
if result:
self.optimizer.register(
params=self._live_trial_mapping[trial_id],
target=result[self._reward_attr])
del self._live_trial_mapping[trial_id] |
Execute method with arg and return the result.
If the method fails, return a RayTaskError so it can be sealed in the
resultOID and retried by user.
def _execute_and_seal_error(method, arg, method_name):
"""Execute method with arg and return the result.
If the method fails, return a RayTaskError so it can be sealed in the
resultOID and retried by user.
"""
try:
return method(arg)
except Exception:
return ray.worker.RayTaskError(method_name, traceback.format_exc()) |
Helper method to dispatch a batch of input to self.serve_method.
def _dispatch(self, input_batch: List[SingleQuery]):
"""Helper method to dispatch a batch of input to self.serve_method."""
method = getattr(self, self.serve_method)
if hasattr(method, "ray_serve_batched_input"):
batch = [inp.data for inp in input_batch]
result = _execute_and_seal_error(method, batch, self.serve_method)
for res, inp in zip(result, input_batch):
ray.worker.global_worker.put_object(inp.result_object_id, res)
else:
for inp in input_batch:
result = _execute_and_seal_error(method, inp.data,
self.serve_method)
ray.worker.global_worker.put_object(inp.result_object_id,
result) |
Returns the gym env wrapper of the given class, or None.
def get_wrapper_by_cls(env, cls):
"""Returns the gym env wrapper of the given class, or None."""
currentenv = env
while True:
if isinstance(currentenv, cls):
return currentenv
elif isinstance(currentenv, gym.Wrapper):
currentenv = currentenv.env
else:
return None |
Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
def wrap_deepmind(env, dim=84, framestack=True):
"""Configure environment for DeepMind-style Atari.
Note that we assume reward clipping is done outside the wrapper.
Args:
dim (int): Dimension to resize observations to (dim x dim).
framestack (bool): Whether to framestack observations.
"""
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
if "NoFrameskip" in env.spec.id:
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = WarpFrame(env, dim)
# env = ScaledFloatFrame(env) # TODO: use for dqn?
# env = ClipRewardEnv(env) # reward clipping is handled by policy eval
if framestack:
env = FrameStack(env, 4)
return env |
Note: Padding is added to match TF conv2d `same` padding. See
www.tensorflow.org/versions/r0.12/api_docs/python/nn/convolution
Params:
in_size (tuple): Rows (Height), Column (Width) for input
stride_size (tuple): Rows (Height), Column (Width) for stride
filter_size (tuple): Rows (Height), Column (Width) for filter
Output:
padding (tuple): For input into torch.nn.ZeroPad2d
output (tuple): Output shape after padding and convolution
def valid_padding(in_size, filter_size, stride_size):
"""Note: Padding is added to match TF conv2d `same` padding. See
www.tensorflow.org/versions/r0.12/api_docs/python/nn/convolution
Params:
in_size (tuple): Rows (Height), Column (Width) for input
stride_size (tuple): Rows (Height), Column (Width) for stride
filter_size (tuple): Rows (Height), Column (Width) for filter
Output:
padding (tuple): For input into torch.nn.ZeroPad2d
output (tuple): Output shape after padding and convolution
"""
in_height, in_width = in_size
filter_height, filter_width = filter_size
stride_height, stride_width = stride_size
out_height = np.ceil(float(in_height) / float(stride_height))
out_width = np.ceil(float(in_width) / float(stride_width))
pad_along_height = int(
((out_height - 1) * stride_height + filter_height - in_height))
pad_along_width = int(
((out_width - 1) * stride_width + filter_width - in_width))
pad_top = pad_along_height // 2
pad_bottom = pad_along_height - pad_top
pad_left = pad_along_width // 2
pad_right = pad_along_width - pad_left
padding = (pad_left, pad_right, pad_top, pad_bottom)
output = (out_height, out_width)
return padding, output |
Call ray.get and then queue the object ids for deletion.
This function should be used whenever possible in RLlib, to optimize
memory usage. The only exception is when an object_id is shared among
multiple readers.
Args:
object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
Returns:
The result of ray.get(object_ids).
def ray_get_and_free(object_ids):
"""Call ray.get and then queue the object ids for deletion.
This function should be used whenever possible in RLlib, to optimize
memory usage. The only exception is when an object_id is shared among
multiple readers.
Args:
object_ids (ObjectID|List[ObjectID]): Object ids to fetch and free.
Returns:
The result of ray.get(object_ids).
"""
global _last_free_time
global _to_free
result = ray.get(object_ids)
if type(object_ids) is not list:
object_ids = [object_ids]
_to_free.extend(object_ids)
# batch calls to free to reduce overheads
now = time.time()
if (len(_to_free) > MAX_FREE_QUEUE_SIZE
or now - _last_free_time > FREE_DELAY_S):
ray.internal.free(_to_free)
_to_free = []
_last_free_time = now
return result |
Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
def aligned_array(size, dtype, align=64):
"""Returns an array of a given size that is 64-byte aligned.
The returned array can be efficiently copied into GPU memory by TensorFlow.
"""
n = size * dtype.itemsize
empty = np.empty(n + (align - 1), dtype=np.uint8)
data_align = empty.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
output = empty[offset:offset + n].view(dtype)
assert len(output) == size, len(output)
assert output.ctypes.data % align == 0, output.ctypes.data
return output |
Concatenate arrays, ensuring the output is 64-byte aligned.
We only align float arrays; other arrays are concatenated as normal.
This should be used instead of np.concatenate() to improve performance
when the output array is likely to be fed into TensorFlow.
def concat_aligned(items):
"""Concatenate arrays, ensuring the output is 64-byte aligned.
We only align float arrays; other arrays are concatenated as normal.
This should be used instead of np.concatenate() to improve performance
when the output array is likely to be fed into TensorFlow.
"""
if len(items) == 0:
return []
elif len(items) == 1:
# we assume the input is aligned. In any case, it doesn't help
# performance to force align it since that incurs a needless copy.
return items[0]
elif (isinstance(items[0], np.ndarray)
and items[0].dtype in [np.float32, np.float64, np.uint8]):
dtype = items[0].dtype
flat = aligned_array(sum(s.size for s in items), dtype)
batch_dim = sum(s.shape[0] for s in items)
new_shape = (batch_dim, ) + items[0].shape[1:]
output = flat.reshape(new_shape)
assert output.ctypes.data % 64 == 0, output.ctypes.data
np.concatenate(items, out=output)
return output
else:
return np.concatenate(items) |
Adds an item to the queue.
Uses polling if block=True, so there is no guarantee of order if
multiple producers put to the same full queue.
Raises:
Full if the queue is full and blocking is False.
def put(self, item, block=True, timeout=None):
"""Adds an item to the queue.
Uses polling if block=True, so there is no guarantee of order if
multiple producers put to the same full queue.
Raises:
Full if the queue is full and blocking is False.
"""
if self.maxsize <= 0:
self.actor.put.remote(item)
elif not block:
if not ray.get(self.actor.put.remote(item)):
raise Full
elif timeout is None:
# Polling
# Use a not_full condition variable or promise?
while not ray.get(self.actor.put.remote(item)):
# Consider adding time.sleep here
pass
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
# Polling
# Use a condition variable or switch to promise?
success = False
while not success and time.time() < endtime:
success = ray.get(self.actor.put.remote(item))
if not success:
raise Full |
Gets an item from the queue.
Uses polling if block=True, so there is no guarantee of order if
multiple consumers get from the same empty queue.
Returns:
The next item in the queue.
Raises:
Empty if the queue is empty and blocking is False.
def get(self, block=True, timeout=None):
"""Gets an item from the queue.
Uses polling if block=True, so there is no guarantee of order if
multiple consumers get from the same empty queue.
Returns:
The next item in the queue.
Raises:
Empty if the queue is empty and blocking is False.
"""
if not block:
success, item = ray.get(self.actor.get.remote())
if not success:
raise Empty
elif timeout is None:
# Polling
# Use a not_empty condition variable or return a promise?
success, item = ray.get(self.actor.get.remote())
while not success:
# Consider adding time.sleep here
success, item = ray.get(self.actor.get.remote())
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = time.time() + timeout
# Polling
# Use a not_full condition variable or return a promise?
success = False
while not success and time.time() < endtime:
success, item = ray.get(self.actor.get.remote())
if not success:
raise Empty
return item |
Annotation for documenting method overrides.
Arguments:
cls (type): The superclass that provides the overriden method. If this
cls does not actually have the method, an error is raised.
def override(cls):
"""Annotation for documenting method overrides.
Arguments:
cls (type): The superclass that provides the overriden method. If this
cls does not actually have the method, an error is raised.
"""
def check_override(method):
if method.__name__ not in dir(cls):
raise NameError("{} does not override any method of {}".format(
method, cls))
return method
return check_override |
Adds new trial.
On a new trial add, if current bracket is not filled,
add to current bracket. Else, if current band is not filled,
create new bracket, add to current bracket.
Else, create new iteration, create new bracket, add to bracket.
def on_trial_add(self, trial_runner, trial):
"""Adds new trial.
On a new trial add, if current bracket is not filled,
add to current bracket. Else, if current band is not filled,
create new bracket, add to current bracket.
Else, create new iteration, create new bracket, add to bracket."""
cur_bracket = self._state["bracket"]
cur_band = self._hyperbands[self._state["band_idx"]]
if cur_bracket is None or cur_bracket.filled():
retry = True
while retry:
# if current iteration is filled, create new iteration
if self._cur_band_filled():
cur_band = []
self._hyperbands.append(cur_band)
self._state["band_idx"] += 1
# cur_band will always be less than s_max_1 or else filled
s = len(cur_band)
assert s < self._s_max_1, "Current band is filled!"
if self._get_r0(s) == 0:
logger.info("Bracket too small - Retrying...")
cur_bracket = None
else:
retry = False
cur_bracket = Bracket(self._time_attr, self._get_n0(s),
self._get_r0(s), self._max_t_attr,
self._eta, s)
cur_band.append(cur_bracket)
self._state["bracket"] = cur_bracket
self._state["bracket"].add_trial(trial)
self._trial_info[trial] = cur_bracket, self._state["band_idx"] |
Checks if the current band is filled.
The size of the current band should be equal to s_max_1
def _cur_band_filled(self):
"""Checks if the current band is filled.
The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1 |
If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it.
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket, trial)
return action |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.