after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def run_rsync_down(self, source, target, redirect=None):
self.set_ssh_ip_if_required()
self.process_runner.check_call(
[
"rsync",
"--rsh",
" ".join(["ssh"] + self.get_default_ssh_options(120)),
"-avz",
"{}@{}:{}".format(self.ssh_user, self.ssh_ip, source),
target,
],
stdout=redirect,
stderr=redirect,
)
|
def run_rsync_down(self, source, target, redirect=None):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call(
[
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
],
stdout=redirect,
stderr=redirect,
)
except Exception as e:
logger.warning(
self.log_prefix
+ "rsync failed: '{}'. Falling back to 'kubectl cp'".format(e)
)
self.process_runner.check_call(
self.kubectl
+ ["cp", "{}/{}:{}".format(self.namespace, self.node_id, source), target],
stdout=redirect,
stderr=redirect,
)
|
https://github.com/ray-project/ray/issues/5862
|
Traceback (most recent call last):
File "/Users/swang/ray/python/ray/worker.py", line 2121, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(UnreconstructableError): <exception str() failed>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "error.py", line 11, in <module>
ray.get(x[0])
File "/Users/swang/ray/python/ray/worker.py", line 2131, in get
return values
File "python/ray/includes/libcoreworker.pxi", line 32, in ray._raylet.ProfileEvent.__exit__
"value": str(value),
File "/Users/swang/ray/python/ray/exceptions.py", line 155, in __str__
self.object_id.hex(),
AttributeError: 'RayTaskError(UnreconstructableError)' object has no attribute 'object_id'
2019-10-08 10:35:18,456 WARNING worker.py:1619 -- The task with ID ffffffffffffffffffff01000000 is a driver task and so the object created by ray.put could not be reconstructed.
|
AttributeError
|
def as_instanceof_cause(self):
"""Returns copy that is an instance of the cause's Python class.
The returned exception will inherit from both RayTaskError and the
cause class.
"""
if issubclass(RayTaskError, self.cause_cls):
return self # already satisfied
if issubclass(self.cause_cls, RayError):
return self # don't try to wrap ray internal errors
class cls(RayTaskError, self.cause_cls):
def __init__(self, function_name, traceback_str, cause_cls, proctitle, pid, ip):
RayTaskError.__init__(
self, function_name, traceback_str, cause_cls, proctitle, pid, ip
)
name = "RayTaskError({})".format(self.cause_cls.__name__)
cls.__name__ = name
cls.__qualname__ = name
return cls(
self.function_name,
self.traceback_str,
self.cause_cls,
self.proctitle,
self.pid,
self.ip,
)
|
def as_instanceof_cause(self):
"""Returns copy that is an instance of the cause's Python class.
The returned exception will inherit from both RayTaskError and the
cause class.
"""
if issubclass(RayTaskError, self.cause_cls):
return self # already satisfied
class cls(RayTaskError, self.cause_cls):
def __init__(self, function_name, traceback_str, cause_cls, proctitle, pid, ip):
RayTaskError.__init__(
self, function_name, traceback_str, cause_cls, proctitle, pid, ip
)
name = "RayTaskError({})".format(self.cause_cls.__name__)
cls.__name__ = name
cls.__qualname__ = name
return cls(
self.function_name,
self.traceback_str,
self.cause_cls,
self.proctitle,
self.pid,
self.ip,
)
|
https://github.com/ray-project/ray/issues/5862
|
Traceback (most recent call last):
File "/Users/swang/ray/python/ray/worker.py", line 2121, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(UnreconstructableError): <exception str() failed>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "error.py", line 11, in <module>
ray.get(x[0])
File "/Users/swang/ray/python/ray/worker.py", line 2131, in get
return values
File "python/ray/includes/libcoreworker.pxi", line 32, in ray._raylet.ProfileEvent.__exit__
"value": str(value),
File "/Users/swang/ray/python/ray/exceptions.py", line 155, in __str__
self.object_id.hex(),
AttributeError: 'RayTaskError(UnreconstructableError)' object has no attribute 'object_id'
2019-10-08 10:35:18,456 WARNING worker.py:1619 -- The task with ID ffffffffffffffffffff01000000 is a driver task and so the object created by ray.put could not be reconstructed.
|
AttributeError
|
def open_closed_files(self):
"""Open some closed files if they may have new lines.
Opening more files may require us to close some of the already open
files.
"""
if not self.can_open_more_files:
# If we can't open any more files. Close all of the files.
self.close_all_files()
files_with_no_updates = []
while len(self.closed_file_infos) > 0:
if len(self.open_file_infos) >= ray_constants.LOG_MONITOR_MAX_OPEN_FILES:
self.can_open_more_files = False
break
file_info = self.closed_file_infos.pop(0)
assert file_info.file_handle is None
# Get the file size to see if it has gotten bigger since we last
# opened it.
try:
file_size = os.path.getsize(file_info.filename)
except (IOError, OSError) as e:
# Catch "file not found" errors.
if e.errno == errno.ENOENT:
logger.warning(
"Warning: The file {} was not found.".format(file_info.filename)
)
self.log_filenames.remove(file_info.filename)
continue
raise e
# If some new lines have been added to this file, try to reopen the
# file.
if file_size > file_info.size_when_last_opened:
try:
f = open(file_info.filename, "rb")
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning(
"Warning: The file {} was not found.".format(file_info.filename)
)
self.log_filenames.remove(file_info.filename)
continue
else:
raise e
f.seek(file_info.file_position)
file_info.filesize_when_last_opened = file_size
file_info.file_handle = f
self.open_file_infos.append(file_info)
else:
files_with_no_updates.append(file_info)
# Add the files with no changes back to the list of closed files.
self.closed_file_infos += files_with_no_updates
|
def open_closed_files(self):
"""Open some closed files if they may have new lines.
Opening more files may require us to close some of the already open
files.
"""
if not self.can_open_more_files:
# If we can't open any more files. Close all of the files.
self.close_all_files()
files_with_no_updates = []
while len(self.closed_file_infos) > 0:
if len(self.open_file_infos) >= ray_constants.LOG_MONITOR_MAX_OPEN_FILES:
self.can_open_more_files = False
break
file_info = self.closed_file_infos.pop(0)
assert file_info.file_handle is None
# Get the file size to see if it has gotten bigger since we last
# opened it.
try:
file_size = os.path.getsize(file_info.filename)
except (IOError, OSError) as e:
# Catch "file not found" errors.
if e.errno == errno.ENOENT:
logger.warning(
"Warning: The file {} was not found.".format(file_info.filename)
)
self.log_filenames.remove(file_info.filename)
continue
raise e
# If some new lines have been added to this file, try to reopen the
# file.
if file_size > file_info.size_when_last_opened:
try:
f = open(file_info.filename, "r")
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning(
"Warning: The file {} was not found.".format(file_info.filename)
)
self.log_filenames.remove(file_info.filename)
continue
else:
raise e
f.seek(file_info.file_position)
file_info.filesize_when_last_opened = file_size
file_info.file_handle = f
self.open_file_infos.append(file_info)
else:
files_with_no_updates.append(file_info)
# Add the files with no changes back to the list of closed files.
self.closed_file_infos += files_with_no_updates
|
https://github.com/ray-project/ray/issues/4382
|
2019-03-15 15:48:49,381 ERROR worker.py:1752 -- The log monitor on node ip-10-2-247-245 failed with the following error:
Traceback (most recent call last):
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 268, in <module>
log_monitor.run()
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 219, in run
anything_published = self.check_log_files_and_publish_updates()
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 175, in check_log_files_and_publish_updates
next_line = file_info.file_handle.readline()
File "/home/ubuntu/conda/envs/venv/lib/python3.7/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc2 in position 0: unexpected end of data
|
UnicodeDecodeError
|
def check_log_files_and_publish_updates(self):
"""Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
"""
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
try:
next_line = file_info.file_handle.readline()
# Replace any characters not in UTF-8 with
# a replacement character, see
# https://stackoverflow.com/a/38565489/10891801
next_line = next_line.decode("utf-8", "replace")
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
except Exception:
logger.error(
"Error: Reading file: {}, position: {} failed.".format(
file_info.full_path, file_info.file_info.file_handle.tell()
)
)
raise
if file_info.file_position == 0:
if len(lines_to_publish) > 0 and lines_to_publish[0].startswith(
"Ray worker pid: "
):
file_info.worker_pid = int(lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
elif "/raylet" in file_info.filename:
file_info.worker_pid = "raylet"
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps(
{
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish,
}
),
)
anything_published = True
return anything_published
|
def check_log_files_and_publish_updates(self):
"""Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
"""
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
try:
next_line = file_info.file_handle.readline()
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
except Exception:
logger.error(
"Error: Reading file: {}, position: {} failed.".format(
file_info.full_path, file_info.file_info.file_handle.tell()
)
)
raise
if file_info.file_position == 0:
if len(lines_to_publish) > 0 and lines_to_publish[0].startswith(
"Ray worker pid: "
):
file_info.worker_pid = int(lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
elif "/raylet" in file_info.filename:
file_info.worker_pid = "raylet"
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps(
{
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish,
}
),
)
anything_published = True
return anything_published
|
https://github.com/ray-project/ray/issues/4382
|
2019-03-15 15:48:49,381 ERROR worker.py:1752 -- The log monitor on node ip-10-2-247-245 failed with the following error:
Traceback (most recent call last):
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 268, in <module>
log_monitor.run()
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 219, in run
anything_published = self.check_log_files_and_publish_updates()
File "/home/ubuntu/longshotsyndicate/ray/python/ray/log_monitor.py", line 175, in check_log_files_and_publish_updates
next_line = file_info.file_handle.readline()
File "/home/ubuntu/conda/envs/venv/lib/python3.7/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xc2 in position 0: unexpected end of data
|
UnicodeDecodeError
|
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG, **config)
if not isinstance(action_space, Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space)
)
self.config = config
self.cur_epsilon = 1.0
self.num_actions = action_space.n
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
self.cur_observations = tf.placeholder(
tf.float32, shape=(None,) + observation_space.shape
)
# Action Q network
with tf.variable_scope(Q_SCOPE) as scope:
q_values, q_logits, q_dist, _ = self._build_q_network(
self.cur_observations, observation_space, action_space
)
self.q_values = q_values
self.q_func_vars = _scope_vars(scope.name)
# Noise vars for Q network except for layer normalization vars
if self.config["parameter_noise"]:
self._build_parameter_noise(
[var for var in self.q_func_vars if "LayerNorm" not in var.name]
)
self.action_probs = tf.nn.softmax(self.q_values)
# Action outputs
self.output_actions, self.action_prob = self._build_q_value_policy(q_values)
# Replay inputs
self.obs_t = tf.placeholder(tf.float32, shape=(None,) + observation_space.shape)
self.act_t = tf.placeholder(tf.int32, [None], name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(tf.float32, shape=(None,) + observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
with tf.variable_scope(Q_SCOPE, reuse=True):
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
q_t, q_logits_t, q_dist_t, model = self._build_q_network(
self.obs_t, observation_space, action_space
)
q_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops
)
# target q network evalution
with tf.variable_scope(Q_TARGET_SCOPE) as scope:
q_tp1, q_logits_tp1, q_dist_tp1, _ = self._build_q_network(
self.obs_tp1, observation_space, action_space
)
self.target_q_func_vars = _scope_vars(scope.name)
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(self.act_t, self.num_actions)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
q_logits_t_selected = tf.reduce_sum(
q_logits_t * tf.expand_dims(one_hot_selection, -1), 1
)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
with tf.variable_scope(Q_SCOPE, reuse=True):
(
q_tp1_using_online_net,
q_logits_tp1_using_online_net,
q_dist_tp1_using_online_net,
_,
) = self._build_q_network(self.obs_tp1, observation_space, action_space)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = tf.one_hot(
q_tp1_best_using_online_net, self.num_actions
)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
else:
q_tp1_best_one_hot_selection = tf.one_hot(tf.argmax(q_tp1, 1), self.num_actions)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
self.loss = self._build_q_loss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_dist_tp1_best
)
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
assert len(self.q_func_vars) == len(self.target_q_func_vars), (
self.q_func_vars,
self.target_q_func_vars,
)
for var, var_target in zip(self.q_func_vars, self.target_q_func_vars):
update_target_expr.append(var_target.assign(var))
self.update_target_expr = tf.group(*update_target_expr)
# initialize TFPolicyGraph
self.sess = tf.get_default_session()
self.loss_inputs = [
(SampleBatch.CUR_OBS, self.obs_t),
(SampleBatch.ACTIONS, self.act_t),
(SampleBatch.REWARDS, self.rew_t),
(SampleBatch.NEXT_OBS, self.obs_tp1),
(SampleBatch.DONES, self.done_mask),
(PRIO_WEIGHTS, self.importance_weights),
]
TFPolicyGraph.__init__(
self,
observation_space,
action_space,
self.sess,
obs_input=self.cur_observations,
action_sampler=self.output_actions,
action_prob=self.action_prob,
loss=self.loss.loss,
model=model,
loss_inputs=self.loss_inputs,
update_ops=q_batchnorm_update_ops,
)
self.sess.run(tf.global_variables_initializer())
|
def __init__(self, observation_space, action_space, config):
config = dict(ray.rllib.agents.dqn.dqn.DEFAULT_CONFIG, **config)
if not isinstance(action_space, Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space)
)
self.config = config
self.cur_epsilon = 1.0
self.num_actions = action_space.n
# Action inputs
self.stochastic = tf.placeholder(tf.bool, (), name="stochastic")
self.eps = tf.placeholder(tf.float32, (), name="eps")
self.cur_observations = tf.placeholder(
tf.float32, shape=(None,) + observation_space.shape
)
# Action Q network
with tf.variable_scope(Q_SCOPE) as scope:
q_values, q_logits, q_dist, _ = self._build_q_network(
self.cur_observations, observation_space, action_space
)
self.q_values = q_values
self.q_func_vars = _scope_vars(scope.name)
# Noise vars for Q network except for layer normalization vars
if self.config["parameter_noise"]:
self._build_parameter_noise(
[var for var in self.q_func_vars if "LayerNorm" not in var.name]
)
self.action_probs = tf.nn.softmax(self.q_values)
# Action outputs
self.output_actions, self.action_prob = self._build_q_value_policy(q_values)
# Replay inputs
self.obs_t = tf.placeholder(tf.float32, shape=(None,) + observation_space.shape)
self.act_t = tf.placeholder(tf.int32, [None], name="action")
self.rew_t = tf.placeholder(tf.float32, [None], name="reward")
self.obs_tp1 = tf.placeholder(tf.float32, shape=(None,) + observation_space.shape)
self.done_mask = tf.placeholder(tf.float32, [None], name="done")
self.importance_weights = tf.placeholder(tf.float32, [None], name="weight")
# q network evaluation
with tf.variable_scope(Q_SCOPE, reuse=True):
prev_update_ops = set(tf.get_collection(tf.GraphKeys.UPDATE_OPS))
q_t, q_logits_t, q_dist_t, model = self._build_q_network(
self.obs_t, observation_space, action_space
)
q_batchnorm_update_ops = list(
set(tf.get_collection(tf.GraphKeys.UPDATE_OPS)) - prev_update_ops
)
# target q network evalution
with tf.variable_scope(Q_TARGET_SCOPE) as scope:
q_tp1, q_logits_tp1, q_dist_tp1, _ = self._build_q_network(
self.obs_tp1, observation_space, action_space
)
self.target_q_func_vars = _scope_vars(scope.name)
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(self.act_t, self.num_actions)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
q_logits_t_selected = tf.reduce_sum(
q_logits_t * tf.expand_dims(one_hot_selection, -1), 1
)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
with tf.variable_scope(Q_SCOPE, reuse=True):
(
q_tp1_using_online_net,
q_logits_tp1_using_online_net,
q_dist_tp1_using_online_net,
_,
) = self._build_q_network(self.obs_tp1, observation_space, action_space)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = tf.one_hot(
q_tp1_best_using_online_net, self.num_actions
)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
else:
q_tp1_best_one_hot_selection = tf.one_hot(tf.argmax(q_tp1, 1), self.num_actions)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
self.loss = self._build_q_loss(
q_t_selected, q_logits_t_selected, q_tp1_best, q_dist_tp1_best
)
# update_target_fn will be called periodically to copy Q network to
# target Q network
update_target_expr = []
for var, var_target in zip(
sorted(self.q_func_vars, key=lambda v: v.name),
sorted(self.target_q_func_vars, key=lambda v: v.name),
):
update_target_expr.append(var_target.assign(var))
self.update_target_expr = tf.group(*update_target_expr)
# initialize TFPolicyGraph
self.sess = tf.get_default_session()
self.loss_inputs = [
(SampleBatch.CUR_OBS, self.obs_t),
(SampleBatch.ACTIONS, self.act_t),
(SampleBatch.REWARDS, self.rew_t),
(SampleBatch.NEXT_OBS, self.obs_tp1),
(SampleBatch.DONES, self.done_mask),
(PRIO_WEIGHTS, self.importance_weights),
]
TFPolicyGraph.__init__(
self,
observation_space,
action_space,
self.sess,
obs_input=self.cur_observations,
action_sampler=self.output_actions,
action_prob=self.action_prob,
loss=self.loss.loss,
model=model,
loss_inputs=self.loss_inputs,
update_ops=q_batchnorm_update_ops,
)
self.sess.run(tf.global_variables_initializer())
|
https://github.com/ray-project/ray/issues/4502
|
2019-03-28 18:34:31,402 WARNING worker.py:1397 -- WARNING: Not updating worker name since `setproctitle` is not installed. Install this with `pip install setproctitle` (or ray[debug]) to enable monitoring of worker processes.
2019-03-28 18:34:31.415440: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
WARNING:tensorflow:From /Users/ristovuorio/miniconda3/envs/ray_fiddle/lib/python3.6/site-packages/tensorflow/python/util/decorator_utils.py:127: GraphKeys.VARIABLES (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.
Traceback (most recent call last):
File "dqn_fail_demonstrator.py", line 37, in <module>
trainer = DQNAgent(env="CartPole-v0", config=config)
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/agent.py", line 280, in __init__
Trainable.__init__(self, config, logger_creator)
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/tune/trainable.py", line 88, in __init__
self._setup(copy.deepcopy(self.config))
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/agent.py", line 377, in _setup
self._init()
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/dqn/dqn.py", line 207, in _init
self.env_creator, self._policy_graph)
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/agent.py", line 510, in make_local_evaluator
extra_config or {}))
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/agent.py", line 727, in _make_evaluator
async_remote_worker_envs=config["async_remote_worker_envs"])
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/evaluation/policy_evaluator.py", line 296, in __init__
self._build_policy_map(policy_dict, policy_config) File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/evaluation/policy_evaluator.py", line 692, in _build_policy_map
policy_map[name] = cls(obs_space, act_space, merged_conf)
File "/Users/ristovuorio/projects/ray_doodle/ray/python/ray/rllib/agents/dqn/dqn_policy_graph.py", line 394, in __init__
update_target_expr.append(var_target.assign(var))
File "/Users/ristovuorio/miniconda3/envs/ray_fiddle/lib/python3.6/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 951, in assign
self._shape.assert_is_compatible_with(value_tensor.shape)
File "/Users/ristovuorio/miniconda3/envs/ray_fiddle/lib/python3.6/site-packages/tensorflow/python/framework/tensor_shape.py", line 848, in assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (3,) and (11,) are incompatible
|
ValueError
|
def train(self):
"""Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train().
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
"""
start = time.time()
result = self._train()
assert isinstance(result, dict), "_train() needs to return a dict."
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
if result.get(TIMESTEPS_THIS_ITER) is not None:
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._episodes_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER) is not None:
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
# Provides auto-filled neg_mean_loss for avoiding regressions
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
training_iteration=self._iteration,
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=os.uname()[1],
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore,
)
self._result_logger.on_result(result)
return result
|
def train(self):
"""Runs one logical iteration of training.
Subclasses should override ``_train()`` instead to return results.
This class automatically fills the following fields in the result:
`done` (bool): training is terminated. Filled only if not provided.
`time_this_iter_s` (float): Time in seconds this iteration
took to run. This may be overriden in order to override the
system-computed time difference.
`time_total_s` (float): Accumulated time in seconds for this
entire experiment.
`experiment_id` (str): Unique string identifier
for this experiment. This id is preserved
across checkpoint / restore calls.
`training_iteration` (int): The index of this
training iteration, e.g. call to train().
`pid` (str): The pid of the training process.
`date` (str): A formatted date of when the result was processed.
`timestamp` (str): A UNIX timestamp of when the result
was processed.
`hostname` (str): Hostname of the machine hosting the training
process.
`node_ip` (str): Node ip of the machine hosting the training
process.
Returns:
A dict that describes training progress.
"""
start = time.time()
result = self._train()
assert isinstance(result, dict), "_train() needs to return a dict."
result = result.copy()
self._iteration += 1
self._iterations_since_restore += 1
if result.get(TIME_THIS_ITER_S) is not None:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = time.time() - start
self._time_total += time_this_iter
self._time_since_restore += time_this_iter
result.setdefault(DONE, False)
# self._timesteps_total should only be tracked if increments provided
if result.get(TIMESTEPS_THIS_ITER):
if self._timesteps_total is None:
self._timesteps_total = 0
self._timesteps_total += result[TIMESTEPS_THIS_ITER]
self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER]
# self._timesteps_total should only be tracked if increments provided
if result.get(EPISODES_THIS_ITER):
if self._episodes_total is None:
self._episodes_total = 0
self._episodes_total += result[EPISODES_THIS_ITER]
# self._timesteps_total should not override user-provided total
result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total)
result.setdefault(EPISODES_TOTAL, self._episodes_total)
# Provides auto-filled neg_mean_loss for avoiding regressions
if result.get("mean_loss"):
result.setdefault("neg_mean_loss", -result["mean_loss"])
now = datetime.today()
result.update(
experiment_id=self._experiment_id,
date=now.strftime("%Y-%m-%d_%H-%M-%S"),
timestamp=int(time.mktime(now.timetuple())),
training_iteration=self._iteration,
time_this_iter_s=time_this_iter,
time_total_s=self._time_total,
pid=os.getpid(),
hostname=os.uname()[1],
node_ip=self._local_ip,
config=self.config,
time_since_restore=self._time_since_restore,
timesteps_since_restore=self._timesteps_since_restore,
iterations_since_restore=self._iterations_since_restore,
)
self._result_logger.on_result(result)
return result
|
https://github.com/ray-project/ray/issues/3057
|
Traceback (most recent call last):
File "/home/eric/Desktop/ray-private/python/ray/tune/trial_runner.py", line 242, in _process_events
if trial.should_stop(result):
File "/home/eric/Desktop/ray-private/python/ray/tune/trial.py", line 213, in should_stop
if result[criteria] >= stop_value:
TypeError: unorderable types: NoneType() >= int()
Worker ip unknown, skipping log sync for /home/eric/ray_results/test/IMPALA_cartpole_stateless_4_2018-10-14_00-11-54wk3lun7w
== Status ==
Using FIFO scheduling algorithm.
Resources requested: 0/4 CPUs, 0/0 GPUs
Result logdir: /home/eric/ray_results/test
ERROR trials:
- IMPALA_cartpole_stateless_1: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_1_2018-10-14_00-11-08bzsn9bjz/error_2018-10-14_00-11-23.txt
- IMPALA_cartpole_stateless_2: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_2_2018-10-14_00-11-23zv6jbrbr/error_2018-10-14_00-11-38.txt
- IMPALA_cartpole_stateless_3: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_3_2018-10-14_00-11-38p18gjmul/error_2018-10-14_00-11-54.txt
- IMPALA_cartpole_stateless_4: ERROR, 1 failures: /home/eric/ray_results/test/IMPALA_cartpole_stateless_4_2018-10-14_00-11-54wk3lun7w/error_2018-10-14_00-12-09.txt
TERMINATED trials:
- IMPALA_cartpole_stateless_0: TERMINATED [pid=19362], 173 s, 17 iter, 143900 ts, 221 rew
|
TypeError
|
def compute_function_id(function):
"""Compute an function ID for a function.
Args:
func: The actual function.
Returns:
This returns the function ID.
"""
function_id_hash = hashlib.sha1()
# Include the function module and name in the hash.
function_id_hash.update(function.__module__.encode("ascii"))
function_id_hash.update(function.__name__.encode("ascii"))
try:
# If we are running a script or are in IPython, include the source code
# in the hash.
source = inspect.getsource(function).encode("ascii")
function_id_hash.update(source)
except (IOError, OSError, TypeError):
# Source code may not be available: e.g. Cython or Python interpreter.
pass
# Compute the function ID.
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = ray.ObjectID(function_id)
return function_id
|
def compute_function_id(function):
"""Compute an function ID for a function.
Args:
func: The actual function.
Returns:
This returns the function ID.
"""
function_id_hash = hashlib.sha1()
# Include the function module and name in the hash.
function_id_hash.update(function.__module__.encode("ascii"))
function_id_hash.update(function.__name__.encode("ascii"))
# If we are running a script or are in IPython, include the source code in
# the hash. If we are in a regular Python interpreter we skip this part
# because the source code is not accessible. If the function is a built-in
# (e.g., Cython), the source code is not accessible.
import __main__ as main
if (hasattr(main, "__file__") or in_ipython()) and inspect.isfunction(function):
function_id_hash.update(inspect.getsource(function).encode("ascii"))
# Compute the function ID.
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = ray.ObjectID(function_id)
return function_id
|
https://github.com/ray-project/ray/issues/1446
|
$ python train.py --run=ES --env=CartPole-v0 --redis-address=172.31.5.255:6379
/home/ubuntu/anaconda3/lib/python3.6/importlib/_bootstrap.py:205: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
return f(*args, **kwds)
== Status ==
Using FIFO scheduling algorithm.
Result logdir: /home/ubuntu/ray_results/default
- ES_CartPole-v0_0: PENDING
Unified logger created with logdir '/home/ubuntu/ray_results/default/ES_CartPole-v0_0_2018-01-19_01-37-30wdhanz66'
== Status ==
Using FIFO scheduling algorithm.
Resources used: 1/8 CPUs, 0/0 GPUs
Result logdir: /home/ubuntu/ray_results/default
- ES_CartPole-v0_0: RUNNING
Remote function __init__ failed with:
Traceback (most recent call last):
File "/home/ubuntu/ray3/python/ray/worker.py", line 771, in _process_task
*arguments)
File "/home/ubuntu/ray3/python/ray/actor.py", line 196, in actor_method_executor
return method(actor, *args)
File "/home/ubuntu/ray3/python/ray/rllib/agent.py", line 127, in __init__
self._init()
File "/home/ubuntu/ray3/python/ray/rllib/es/es.py", line 157, in _init
noise_id = create_shared_noise.remote()
File "/home/ubuntu/ray3/python/ray/worker.py", line 2509, in func_call
objectids = _submit_task(function_id, args)
File "/home/ubuntu/ray3/python/ray/worker.py", line 2364, in _submit_task
return worker.submit_task(function_id, args)
File "/home/ubuntu/ray3/python/ray/worker.py", line 543, in submit_task
self.task_driver_id.id()][function_id.id()]
KeyError: b'Z`\xd9\xd5?/\x88\x04>\xa4Xph\xb9\xe3\xca\xf4\xa1\x1b\x13'
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="172.31.5.255:6379")
Remote function train failed with:
Traceback (most recent call last):
File "/home/ubuntu/ray3/python/ray/worker.py", line 771, in _process_task
*arguments)
File "/home/ubuntu/ray3/python/ray/actor.py", line 196, in actor_method_executor
return method(actor, *args)
File "/home/ubuntu/ray3/python/ray/rllib/agent.py", line 145, in train
"Agent initialization failed, see previous errors")
ValueError: Agent initialization failed, see previous errors
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="172.31.5.255:6379")
Error processing event: Traceback (most recent call last):
File "/home/ubuntu/ray3/python/ray/tune/trial_runner.py", line 162, in _process_events
result = ray.get(result_id)
File "/home/ubuntu/ray3/python/ray/worker.py", line 2240, in get
raise RayGetError(object_ids, value)
ray.worker.RayGetError: Could not get objectid ObjectID(a87f1adc2ec2e19f0199e246b9f733c6ea16750c). It was created by remote function train which failed with:
Remote function train failed with:
Traceback (most recent call last):
File "/home/ubuntu/ray3/python/ray/worker.py", line 771, in _process_task
*arguments)
File "/home/ubuntu/ray3/python/ray/actor.py", line 196, in actor_method_executor
return method(actor, *args)
File "/home/ubuntu/ray3/python/ray/rllib/agent.py", line 145, in train
"Agent initialization failed, see previous errors")
ValueError: Agent initialization failed, see previous errors
Stopping ES_CartPole-v0_0 Actor timed out, but moving on...
== Status ==
Using FIFO scheduling algorithm.
Resources used: 0/8 CPUs, 0/0 GPUs
Result logdir: /home/ubuntu/ray_results/default
- ES_CartPole-v0_0: ERROR
Traceback (most recent call last):
File "train.py", line 82, in <module>
num_cpus=args.num_cpus, num_gpus=args.num_gpus)
File "/home/ubuntu/ray3/python/ray/tune/tune.py", line 82, in run_experiments
raise TuneError("Trial did not complete", trial)
ray.tune.error.TuneError: ('Trial did not complete', <ray.tune.trial.Trial object at 0x7f30baab6c18>)
|
KeyError
|
def _serialization_helper(self, ray_forking):
"""This is defined in order to make pickling work.
Args:
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
Returns:
A dictionary of the information needed to reconstruct the object.
"""
state = {
"actor_id": self._ray_actor_id.id(),
"class_name": self._ray_class_name,
"actor_forks": self._ray_actor_forks,
"actor_cursor": self._ray_actor_cursor.id()
if self._ray_actor_cursor is not None
else None,
"actor_counter": 0, # Reset the actor counter.
"actor_method_names": self._ray_actor_method_names,
"method_signatures": self._ray_method_signatures,
"method_num_return_vals": self._ray_method_num_return_vals,
"actor_creation_dummy_object_id": self._ray_actor_creation_dummy_object_id.id()
if self._ray_actor_creation_dummy_object_id is not None
else None,
"actor_method_cpus": self._ray_actor_method_cpus,
"actor_driver_id": self._ray_actor_driver_id.id(),
"previous_actor_handle_id": self._ray_actor_handle_id.id()
if self._ray_actor_handle_id
else None,
"ray_forking": ray_forking,
}
if ray_forking:
self._ray_actor_forks += 1
return state
|
def _serialization_helper(self, ray_forking):
"""This is defined in order to make pickling work.
Args:
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
Returns:
A dictionary of the information needed to reconstruct the object.
"""
state = {
"actor_id": self._ray_actor_id.id(),
"class_name": self._ray_class_name,
"actor_forks": self._ray_actor_forks,
"actor_cursor": self._ray_actor_cursor.id(),
"actor_counter": 0, # Reset the actor counter.
"actor_method_names": self._ray_actor_method_names,
"method_signatures": self._ray_method_signatures,
"method_num_return_vals": self._ray_method_num_return_vals,
"actor_creation_dummy_object_id": self._ray_actor_creation_dummy_object_id.id(),
"actor_method_cpus": self._ray_actor_method_cpus,
"actor_driver_id": self._ray_actor_driver_id.id(),
"previous_actor_handle_id": self._ray_actor_handle_id.id()
if self._ray_actor_handle_id
else None,
"ray_forking": ray_forking,
}
if ray_forking:
self._ray_actor_forks += 1
return state
|
https://github.com/ray-project/ray/issues/2253
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-8b7c665c4619> in <module>()
16
17 a1 = Actor1.remote()
---> 18 a2 = Actor2.remote(a1)
19 ray.get(a2.method.remote())
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
556 A handle to the newly created actor.
557 """
--> 558 return self._submit(args=args, kwargs=kwargs)
559
560 def _submit(self,
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_cpus, num_gpus, resources)
632 # Call __init__ as a remote function.
633 if "__init__" in actor_handle._ray_actor_method_names:
--> 634 actor_handle.__init__.remote(*args, **kwargs)
635 else:
636 if len(args) != 0 or len(kwargs) != 0:
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
454
455 def remote(self, *args, **kwargs):
--> 456 return self._submit(args, kwargs)
457
458 def _submit(self, args, kwargs, num_return_vals=None):
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_return_vals)
465 kwargs=kwargs,
466 num_return_vals=num_return_vals,
--> 467 dependency=self._actor._ray_actor_cursor)
468
469
~/Workspace/ray/python/ray/actor.py in _actor_method_call(self, method_name, args, kwargs, num_return_vals, dependency)
774 if worker.mode == ray.PYTHON_MODE:
775 return getattr(worker.actors[self._ray_actor_id],
--> 776 method_name)(*copy.deepcopy(args))
777
778 # Add the execution dependency.
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
/anaconda3/lib/python3.6/copy.py in _deepcopy_list(x, memo, deepcopy)
213 append = y.append
214 for a in x:
--> 215 append(deepcopy(a, memo))
216 return y
217 d[list] = _deepcopy_list
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
167 reductor = getattr(x, "__reduce_ex__", None)
168 if reductor:
--> 169 rv = reductor(4)
170 else:
171 reductor = getattr(x, "__reduce__", None)
~/Workspace/ray/python/ray/actor.py in __getstate__(self)
944 def __getstate__(self):
945 """This code path is used by pickling but not by Ray forking."""
--> 946 return self._serialization_helper(False)
947
948 def __setstate__(self, state):
~/Workspace/ray/python/ray/actor.py in _serialization_helper(self, ray_forking)
885 "class_name": self._ray_class_name,
886 "actor_forks": self._ray_actor_forks,
--> 887 "actor_cursor": self._ray_actor_cursor.id(),
888 "actor_counter": 0, # Reset the actor counter.
889 "actor_method_names": self._ray_actor_method_names,
AttributeError: 'NoneType' object has no attribute 'id'
|
AttributeError
|
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
ray.worker.check_main_thread()
if state["ray_forking"]:
actor_handle_id = compute_actor_handle_id(
ray.ObjectID(state["previous_actor_handle_id"]), state["actor_forks"]
)
else:
actor_handle_id = None
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
actor_driver_id = ray.ObjectID(state["actor_driver_id"])
self.__init__(
ray.ObjectID(state["actor_id"]),
state["class_name"],
ray.ObjectID(state["actor_cursor"])
if state["actor_cursor"] is not None
else None,
state["actor_counter"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
ray.ObjectID(state["actor_creation_dummy_object_id"])
if state["actor_creation_dummy_object_id"] is not None
else None,
state["actor_method_cpus"],
actor_driver_id,
actor_handle_id=actor_handle_id,
previous_actor_handle_id=ray.ObjectID(state["previous_actor_handle_id"]),
)
|
def _deserialization_helper(self, state, ray_forking):
"""This is defined in order to make pickling work.
Args:
state: The serialized state of the actor handle.
ray_forking: True if this is being called because Ray is forking
the actor handle and false if it is being called by pickling.
"""
worker = ray.worker.get_global_worker()
worker.check_connected()
ray.worker.check_main_thread()
if state["ray_forking"]:
actor_handle_id = compute_actor_handle_id(
ray.ObjectID(state["previous_actor_handle_id"]), state["actor_forks"]
)
else:
actor_handle_id = None
# This is the driver ID of the driver that owns the actor, not
# necessarily the driver that owns this actor handle.
actor_driver_id = ray.ObjectID(state["actor_driver_id"])
self.__init__(
ray.ObjectID(state["actor_id"]),
state["class_name"],
ray.ObjectID(state["actor_cursor"]),
state["actor_counter"],
state["actor_method_names"],
state["method_signatures"],
state["method_num_return_vals"],
ray.ObjectID(state["actor_creation_dummy_object_id"]),
state["actor_method_cpus"],
actor_driver_id,
actor_handle_id=actor_handle_id,
previous_actor_handle_id=ray.ObjectID(state["previous_actor_handle_id"]),
)
|
https://github.com/ray-project/ray/issues/2253
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-8b7c665c4619> in <module>()
16
17 a1 = Actor1.remote()
---> 18 a2 = Actor2.remote(a1)
19 ray.get(a2.method.remote())
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
556 A handle to the newly created actor.
557 """
--> 558 return self._submit(args=args, kwargs=kwargs)
559
560 def _submit(self,
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_cpus, num_gpus, resources)
632 # Call __init__ as a remote function.
633 if "__init__" in actor_handle._ray_actor_method_names:
--> 634 actor_handle.__init__.remote(*args, **kwargs)
635 else:
636 if len(args) != 0 or len(kwargs) != 0:
~/Workspace/ray/python/ray/actor.py in remote(self, *args, **kwargs)
454
455 def remote(self, *args, **kwargs):
--> 456 return self._submit(args, kwargs)
457
458 def _submit(self, args, kwargs, num_return_vals=None):
~/Workspace/ray/python/ray/actor.py in _submit(self, args, kwargs, num_return_vals)
465 kwargs=kwargs,
466 num_return_vals=num_return_vals,
--> 467 dependency=self._actor._ray_actor_cursor)
468
469
~/Workspace/ray/python/ray/actor.py in _actor_method_call(self, method_name, args, kwargs, num_return_vals, dependency)
774 if worker.mode == ray.PYTHON_MODE:
775 return getattr(worker.actors[self._ray_actor_id],
--> 776 method_name)(*copy.deepcopy(args))
777
778 # Add the execution dependency.
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
148 copier = _deepcopy_dispatch.get(cls)
149 if copier:
--> 150 y = copier(x, memo)
151 else:
152 try:
/anaconda3/lib/python3.6/copy.py in _deepcopy_list(x, memo, deepcopy)
213 append = y.append
214 for a in x:
--> 215 append(deepcopy(a, memo))
216 return y
217 d[list] = _deepcopy_list
/anaconda3/lib/python3.6/copy.py in deepcopy(x, memo, _nil)
167 reductor = getattr(x, "__reduce_ex__", None)
168 if reductor:
--> 169 rv = reductor(4)
170 else:
171 reductor = getattr(x, "__reduce__", None)
~/Workspace/ray/python/ray/actor.py in __getstate__(self)
944 def __getstate__(self):
945 """This code path is used by pickling but not by Ray forking."""
--> 946 return self._serialization_helper(False)
947
948 def __setstate__(self, state):
~/Workspace/ray/python/ray/actor.py in _serialization_helper(self, ray_forking)
885 "class_name": self._ray_class_name,
886 "actor_forks": self._ray_actor_forks,
--> 887 "actor_cursor": self._ray_actor_cursor.id(),
888 "actor_counter": 0, # Reset the actor counter.
889 "actor_method_names": self._ray_actor_method_names,
AttributeError: 'NoneType' object has no attribute 'id'
|
AttributeError
|
def __delitem__(self, key):
"""Delete a column by key. `del a[key]` for example.
Operation happens in place.
Notes: This operation happen on row and column partition
simultaneously. No rebuild.
Args:
key: key to delete
"""
# Create helper method for deleting column(s) in row partition.
def del_helper(df, to_delete):
cols = df.columns[to_delete] # either int or an array of ints
if not is_list_like(cols):
cols = [cols]
for col in cols:
df.__delitem__(col)
# Reset the column index to conserve space
df.columns = pd.RangeIndex(0, len(df.columns))
return df
# This structure is used to get the correct index inside the partition.
del_df = self._col_metadata[key]
# We need to standardize between multiple and single occurrences in the
# columns. Putting single occurrences in a pd.DataFrame and transposing
# results in the same structure as multiple with 'loc'.
if isinstance(del_df, pd.Series):
del_df = pd.DataFrame(del_df).T
# Cast cols as pd.Series as duplicate columns mean result may be
# np.int64 or pd.Series
col_parts_to_del = pd.Series(del_df["partition"].copy()).unique()
self._col_metadata.drop(key)
for i in col_parts_to_del:
# Compute the correct index inside the partition to delete.
to_delete_in_partition = del_df[del_df["partition"] == i][
"index_within_partition"
]
for j in range(self._block_partitions.shape[0]):
self._block_partitions[j, i] = _deploy_func.remote(
del_helper, self._block_partitions[j, i], to_delete_in_partition
)
self._col_metadata.reset_partition_coords(col_parts_to_del)
|
def __delitem__(self, key):
"""Delete a column by key. `del a[key]` for example.
Operation happens in place.
Notes: This operation happen on row and column partition
simultaneously. No rebuild.
Args:
key: key to delete
"""
# Create helper method for deleting column(s) in row partition.
def del_helper(df, to_delete):
cols = df.columns[to_delete] # either int or an array of ints
if not is_list_like(cols):
cols = [cols]
for col in cols:
df.__delitem__(col)
# Reset the column index to conserve space
df.columns = pd.RangeIndex(0, len(df.columns))
return df
to_delete = self.columns.get_loc(key)
self._row_partitions = _map_partitions(del_helper, self._row_partitions, to_delete)
# This structure is used to get the correct index inside the partition.
del_df = self._col_metadata[key]
# We need to standardize between multiple and single occurrences in the
# columns. Putting single occurrences in a pd.DataFrame and transposing
# results in the same structure as multiple with 'loc'.
if isinstance(del_df, pd.Series):
del_df = pd.DataFrame(del_df).T
# Cast cols as pd.Series as duplicate columns mean result may be
# np.int64 or pd.Series
col_parts_to_del = pd.Series(self._col_metadata[key, "partition"]).unique()
self._col_metadata.drop(key)
for i in col_parts_to_del:
# Compute the correct index inside the partition to delete.
to_delete_in_partition = del_df[del_df["partition"] == i][
"index_within_partition"
]
self._col_partitions[i] = _deploy_func.remote(
del_helper, self._col_partitions[i], to_delete_in_partition
)
self._col_metadata.reset_partition_coords(col_parts_to_del)
|
https://github.com/ray-project/ray/issues/2027
|
Remote function ray.dataframe.utils._deploy_func failed with:
Traceback (most recent call last):
File "/home/peter/workspace/ray/python/ray/dataframe/utils.py", line 132, in _deploy_func
return func(dataframe, *args)
File "/home/peter/workspace/ray/python/ray/dataframe/dataframe.py", line 4728, in del_helper
cols = df.columns[to_delete] # either int or an array of ints
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/range.py", line 544, in __getitem__
return super_getitem(key)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1754, in __getitem__
result = getitem(key)
IndexError: index 0 is out of bounds for axis 1 with size 0
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="192.168.0.114:49682")
|
IndexError
|
def reset_partition_coords(self, partitions=None):
partitions = np.array(partitions)
for partition in partitions:
partition_mask = self._coord_df["partition"] == partition
# Since we are replacing columns with RangeIndex inside the
# partition, we have to make sure that our reference to it is
# updated as well.
try:
self._coord_df.loc[partition_mask, "index_within_partition"] = np.arange(
sum(partition_mask)
).astype(int)
except ValueError:
# Copy the arrow sealed dataframe so we can mutate it.
# We only do this the first time we try to mutate the sealed.
self._coord_df = self._coord_df.copy()
self._coord_df.loc[partition_mask, "index_within_partition"] = np.arange(
sum(partition_mask)
).astype(int)
|
def reset_partition_coords(self, partitions=None):
partitions = np.array(partitions)
for partition in partitions:
partition_mask = self._coord_df["partition"] == partition
# Since we are replacing columns with RangeIndex inside the
# partition, we have to make sure that our reference to it is
# updated as well.
try:
self._coord_df.loc[partition_mask, "index_within_partition"] = [
p for p in range(sum(partition_mask))
]
except ValueError:
# Copy the arrow sealed dataframe so we can mutate it.
# We only do this the first time we try to mutate the sealed.
self._coord_df = self._coord_df.copy()
self._coord_df.loc[partition_mask, "index_within_partition"] = [
p for p in range(sum(partition_mask))
]
|
https://github.com/ray-project/ray/issues/2027
|
Remote function ray.dataframe.utils._deploy_func failed with:
Traceback (most recent call last):
File "/home/peter/workspace/ray/python/ray/dataframe/utils.py", line 132, in _deploy_func
return func(dataframe, *args)
File "/home/peter/workspace/ray/python/ray/dataframe/dataframe.py", line 4728, in del_helper
cols = df.columns[to_delete] # either int or an array of ints
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/range.py", line 544, in __getitem__
return super_getitem(key)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1754, in __getitem__
result = getitem(key)
IndexError: index 0 is out of bounds for axis 1 with size 0
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="192.168.0.114:49682")
|
IndexError
|
def drop(self, labels, errors="raise"):
"""Drop the specified labels from the IndexMetadata
Args:
labels (scalar or list-like):
The labels to drop
errors ('raise' or 'ignore'):
If 'ignore', suppress errors for when labels don't exist
Returns:
DataFrame with coordinates of dropped labels
"""
dropped = self.coords_of(labels)
# Update first lengths to prevent possible length inconsistencies
if isinstance(dropped, pd.DataFrame):
try:
drop_per_part = (
dropped.groupby(["partition"])
.size()
.reindex(index=pd.RangeIndex(len(self._lengths)), fill_value=0)
)
except ValueError:
# Copy the arrow sealed dataframe so we can mutate it.
dropped = dropped.copy()
drop_per_part = (
dropped.groupby(["partition"])
.size()
.reindex(index=pd.RangeIndex(len(self._lengths)), fill_value=0)
)
elif isinstance(dropped, pd.Series):
drop_per_part = np.zeros_like(self._lengths)
drop_per_part[dropped["partition"]] = 1
else:
raise AssertionError("Unrecognized result from `coords_of`")
self._lengths = self._lengths - drop_per_part
self._coord_df = self._coord_df.drop(labels, errors=errors)
return dropped
|
def drop(self, labels, errors="raise"):
"""Drop the specified labels from the IndexMetadata
Args:
labels (scalar or list-like):
The labels to drop
errors ('raise' or 'ignore'):
If 'ignore', suppress errors for when labels don't exist
Returns:
DataFrame with coordinates of dropped labels
"""
dropped = self.coords_of(labels)
# Update first lengths to prevent possible length inconsistencies
if isinstance(dropped, pd.DataFrame):
drop_per_part = (
dropped.groupby(["partition"])
.size()
.reindex(index=pd.RangeIndex(len(self._lengths)), fill_value=0)
)
elif isinstance(dropped, pd.Series):
drop_per_part = np.zeros_like(self._lengths)
drop_per_part[dropped["partition"]] = 1
else:
raise AssertionError("Unrecognized result from `coords_of`")
self._lengths = self._lengths - drop_per_part
self._coord_df = self._coord_df.drop(labels, errors=errors)
return dropped
|
https://github.com/ray-project/ray/issues/2027
|
Remote function ray.dataframe.utils._deploy_func failed with:
Traceback (most recent call last):
File "/home/peter/workspace/ray/python/ray/dataframe/utils.py", line 132, in _deploy_func
return func(dataframe, *args)
File "/home/peter/workspace/ray/python/ray/dataframe/dataframe.py", line 4728, in del_helper
cols = df.columns[to_delete] # either int or an array of ints
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/range.py", line 544, in __getitem__
return super_getitem(key)
File "/home/peter/workspace/ray_env/lib/python3.6/site-packages/pandas/core/indexes/base.py", line 1754, in __getitem__
result = getitem(key)
IndexError: index 0 is out of bounds for axis 1 with size 0
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="192.168.0.114:49682")
|
IndexError
|
def dump_catapult_trace(
self, path, task_info, breakdowns=True, task_dep=True, obj_dep=True
):
"""Dump task profiling information to a file.
This information can be viewed as a timeline of profiling information
by going to chrome://tracing in the chrome web browser and loading the
appropriate file.
Args:
path: The filepath to dump the profiling information to.
task_info: The task info to use to generate the trace. Should be
the output of ray.global_state.task_profiles().
breakdowns: Boolean indicating whether to break down the tasks into
more fine-grained segments.
task_dep: Boolean indicating whether or not task submission edges
should be included in the trace.
obj_dep: Boolean indicating whether or not object dependency edges
should be included in the trace.
"""
workers = self.workers()
task_table = {}
# TODO(ekl) reduce the number of RPCs here with MGET
for task_id, _ in task_info.items():
try:
# TODO (hme): do something to correct slider here,
# slider should be correct to begin with, though.
task_table[task_id] = self.task_table(task_id)
task_table[task_id]["TaskSpec"]["Args"] = [
repr(arg) for arg in task_table[task_id]["TaskSpec"]["Args"]
]
except Exception as e:
print("Could not find task {}".format(task_id))
# filter out tasks not in task_table
task_info = {k: v for k, v in task_info.items() if k in task_table}
start_time = None
for info in task_info.values():
task_start = min(self._get_times(info))
if not start_time or task_start < start_time:
start_time = task_start
def micros(ts):
return int(1e6 * ts)
def micros_rel(ts):
return micros(ts - start_time)
seen_obj = {}
full_trace = []
for task_id, info in task_info.items():
worker = workers[info["worker_id"]]
task_t_info = task_table[task_id]
# The total_info dictionary is what is displayed when selecting a
# task in the timeline. We copy the task spec so that we don't
# modify it in place since we will use the original values later.
total_info = copy.copy(task_table[task_id]["TaskSpec"])
total_info["Args"] = [
oid.hex() if isinstance(oid, ray.local_scheduler.ObjectID) else oid
for oid in task_t_info["TaskSpec"]["Args"]
]
total_info["ReturnObjectIDs"] = [
oid.hex() for oid in task_t_info["TaskSpec"]["ReturnObjectIDs"]
]
total_info["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
total_info["get_arguments"] = (
info["get_arguments_end"] - info["get_arguments_start"]
)
total_info["execute"] = info["execute_end"] - info["execute_start"]
total_info["store_outputs"] = (
info["store_outputs_end"] - info["store_outputs_start"]
)
total_info["function_name"] = info["function_name"]
total_info["worker_id"] = info["worker_id"]
parent_info = task_info.get(task_table[task_id]["TaskSpec"]["ParentTaskID"])
worker = workers[info["worker_id"]]
# The catapult trace format documentation can be found here:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # noqa: E501
if breakdowns:
if "get_arguments_end" in info:
get_args_trace = {
"cat": "get_arguments",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"] + ":get_arguments",
"args": total_info,
"dur": micros(
info["get_arguments_end"] - info["get_arguments_start"]
),
"cname": "rail_idle",
}
full_trace.append(get_args_trace)
if "store_outputs_end" in info:
outputs_trace = {
"cat": "store_outputs",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["store_outputs_start"]),
"ph": "X",
"name": info["function_name"] + ":store_outputs",
"args": total_info,
"dur": micros(
info["store_outputs_end"] - info["store_outputs_start"]
),
"cname": "thread_state_runnable",
}
full_trace.append(outputs_trace)
if "execute_end" in info:
execute_trace = {
"cat": "execute",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["execute_start"]),
"ph": "X",
"name": info["function_name"] + ":execute",
"args": total_info,
"dur": micros(info["execute_end"] - info["execute_start"]),
"cname": "rail_animation",
}
full_trace.append(execute_trace)
else:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent_profile = task_info.get(
task_table[task_id]["TaskSpec"]["ParentTaskID"]
)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
parent_profile
and parent_profile["get_arguments_start"]
or start_time
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
"cname": "olive",
}
full_trace.append(task_trace)
task = {
"cat": "task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"],
"args": total_info,
"dur": micros(info["store_outputs_end"] - info["get_arguments_start"]),
"cname": "thread_state_runnable",
}
full_trace.append(task)
if task_dep:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent_profile = task_info.get(
task_table[task_id]["TaskSpec"]["ParentTaskID"]
)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
parent_profile
and parent_profile["get_arguments_start"]
or start_time
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
}
full_trace.append(task_trace)
if obj_dep:
args = task_table[task_id]["TaskSpec"]["Args"]
for arg in args:
# Don't visualize arguments that are not object IDs.
if isinstance(arg, ray.local_scheduler.ObjectID):
object_info = self._object_table(arg)
# Don't visualize objects that were created by calls to
# put.
if not object_info["IsPut"]:
if arg not in seen_obj:
seen_obj[arg] = 0
seen_obj[arg] += 1
owner_task = self._object_table(arg)["TaskID"]
if owner_task in task_info:
owner_worker = workers[task_info[owner_task]["worker_id"]]
# Adding/subtracting 2 to the time associated
# with the beginning/ending of the flow event
# is necessary to make the flow events show up
# reliably. When these times are exact, this is
# presumably an edge case, and catapult doesn't
# recognize that there is a duration event at
# that exact point in time that the flow event
# should be bound to. This issue is solved by
# adding the 2 ms to the start/end time of the
# flow event, which guarantees overlap with the
# duration event that it's associated with, and
# the flow event therefore always gets drawn.
owner = {
"cat": "obj_dependency",
"pid": ("Node " + owner_worker["node_ip_address"]),
"tid": task_info[owner_task]["worker_id"],
"ts": micros_rel(
task_info[owner_task]["store_outputs_end"]
)
- 2,
"ph": "s",
"name": "ObjectDependency",
"args": {},
"bp": "e",
"cname": "cq_build_attempt_failed",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(owner)
dependent = {
"cat": "obj_dependency",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]) + 2,
"ph": "f",
"name": "ObjectDependency",
"args": {},
"cname": "cq_build_attempt_failed",
"bp": "e",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(dependent)
print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
with open(path, "w") as outfile:
json.dump(full_trace, outfile)
|
def dump_catapult_trace(
self, path, task_info, breakdowns=True, task_dep=True, obj_dep=True
):
"""Dump task profiling information to a file.
This information can be viewed as a timeline of profiling information
by going to chrome://tracing in the chrome web browser and loading the
appropriate file.
Args:
path: The filepath to dump the profiling information to.
task_info: The task info to use to generate the trace. Should be
the output of ray.global_state.task_profiles().
breakdowns: Boolean indicating whether to break down the tasks into
more fine-grained segments.
task_dep: Boolean indicating whether or not task submission edges
should be included in the trace.
obj_dep: Boolean indicating whether or not object dependency edges
should be included in the trace.
"""
workers = self.workers()
task_table = {}
# TODO(ekl) reduce the number of RPCs here with MGET
for task_id, _ in task_info.items():
try:
# TODO (hme): do something to correct slider here,
# slider should be correct to begin with, though.
task_table[task_id] = self.task_table(task_id)
except Exception as e:
print("Could not find task {}".format(task_id))
# filter out tasks not in task_table
task_info = {k: v for k, v in task_info.items() if k in task_table}
start_time = None
for info in task_info.values():
task_start = min(self._get_times(info))
if not start_time or task_start < start_time:
start_time = task_start
def micros(ts):
return int(1e6 * ts)
def micros_rel(ts):
return micros(ts - start_time)
seen_obj = {}
full_trace = []
for task_id, info in task_info.items():
worker = workers[info["worker_id"]]
task_t_info = task_table[task_id]
# The total_info dictionary is what is displayed when selecting a
# task in the timeline. We copy the task spec so that we don't
# modify it in place since we will use the original values later.
total_info = copy.copy(task_table[task_id]["TaskSpec"])
total_info["Args"] = [
oid.hex() if isinstance(oid, ray.local_scheduler.ObjectID) else oid
for oid in task_t_info["TaskSpec"]["Args"]
]
total_info["ReturnObjectIDs"] = [
oid.hex() for oid in task_t_info["TaskSpec"]["ReturnObjectIDs"]
]
total_info["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
total_info["get_arguments"] = (
info["get_arguments_end"] - info["get_arguments_start"]
)
total_info["execute"] = info["execute_end"] - info["execute_start"]
total_info["store_outputs"] = (
info["store_outputs_end"] - info["store_outputs_start"]
)
total_info["function_name"] = info["function_name"]
total_info["worker_id"] = info["worker_id"]
parent_info = task_info.get(task_table[task_id]["TaskSpec"]["ParentTaskID"])
worker = workers[info["worker_id"]]
# The catapult trace format documentation can be found here:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # noqa: E501
if breakdowns:
if "get_arguments_end" in info:
get_args_trace = {
"cat": "get_arguments",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"] + ":get_arguments",
"args": total_info,
"dur": micros(
info["get_arguments_end"] - info["get_arguments_start"]
),
"cname": "rail_idle",
}
full_trace.append(get_args_trace)
if "store_outputs_end" in info:
outputs_trace = {
"cat": "store_outputs",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["store_outputs_start"]),
"ph": "X",
"name": info["function_name"] + ":store_outputs",
"args": total_info,
"dur": micros(
info["store_outputs_end"] - info["store_outputs_start"]
),
"cname": "thread_state_runnable",
}
full_trace.append(outputs_trace)
if "execute_end" in info:
execute_trace = {
"cat": "execute",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["execute_start"]),
"ph": "X",
"name": info["function_name"] + ":execute",
"args": total_info,
"dur": micros(info["execute_end"] - info["execute_start"]),
"cname": "rail_animation",
}
full_trace.append(execute_trace)
else:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent_profile = task_info.get(
task_table[task_id]["TaskSpec"]["ParentTaskID"]
)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
parent_profile
and parent_profile["get_arguments_start"]
or start_time
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
"cname": "olive",
}
full_trace.append(task_trace)
task = {
"cat": "task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"],
"args": total_info,
"dur": micros(info["store_outputs_end"] - info["get_arguments_start"]),
"cname": "thread_state_runnable",
}
full_trace.append(task)
if task_dep:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent_profile = task_info.get(
task_table[task_id]["TaskSpec"]["ParentTaskID"]
)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
parent_profile
and parent_profile["get_arguments_start"]
or start_time
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
}
full_trace.append(task_trace)
if obj_dep:
args = task_table[task_id]["TaskSpec"]["Args"]
for arg in args:
# Don't visualize arguments that are not object IDs.
if isinstance(arg, ray.local_scheduler.ObjectID):
object_info = self._object_table(arg)
# Don't visualize objects that were created by calls to
# put.
if not object_info["IsPut"]:
if arg not in seen_obj:
seen_obj[arg] = 0
seen_obj[arg] += 1
owner_task = self._object_table(arg)["TaskID"]
if owner_task in task_info:
owner_worker = workers[task_info[owner_task]["worker_id"]]
# Adding/subtracting 2 to the time associated
# with the beginning/ending of the flow event
# is necessary to make the flow events show up
# reliably. When these times are exact, this is
# presumably an edge case, and catapult doesn't
# recognize that there is a duration event at
# that exact point in time that the flow event
# should be bound to. This issue is solved by
# adding the 2 ms to the start/end time of the
# flow event, which guarantees overlap with the
# duration event that it's associated with, and
# the flow event therefore always gets drawn.
owner = {
"cat": "obj_dependency",
"pid": ("Node " + owner_worker["node_ip_address"]),
"tid": task_info[owner_task]["worker_id"],
"ts": micros_rel(
task_info[owner_task]["store_outputs_end"]
)
- 2,
"ph": "s",
"name": "ObjectDependency",
"args": {},
"bp": "e",
"cname": "cq_build_attempt_failed",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(owner)
dependent = {
"cat": "obj_dependency",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]) + 2,
"ph": "f",
"name": "ObjectDependency",
"args": {},
"cname": "cq_build_attempt_failed",
"bp": "e",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(dependent)
print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
with open(path, "w") as outfile:
json.dump(full_trace, outfile)
|
https://github.com/ray-project/ray/issues/1878
|
Collected profiles for 2 tasks.
Dumping task profile data to /var/folders/15/54jf68993rd7753c5fms424r0000gn/T/tmpm91862cq.json, this might take a while...
Creating JSON 6/2
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Workspace/ray/python/ray/experimental/ui.py in handle_submit(sender)
390 breakdowns=breakdown,
391 obj_dep=obj_dep.value,
--> 392 task_dep=task_dep.value)
393 print("Opening html file in browser...")
394
~/Workspace/ray/python/ray/experimental/state.py in dump_catapult_trace(self, path, task_info, breakdowns, task_dep, obj_dep)
823 print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
824 with open(path, "w") as outfile:
--> 825 json.dump(full_trace, outfile)
826
827 def _get_times(self, data):
~/anaconda3/lib/python3.6/json/__init__.py in dump(obj, fp, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
177 # could accelerate with writelines in some versions of Python, at
178 # a debuggability cost
--> 179 for chunk in iterable:
180 fp.write(chunk)
181
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode(o, _current_indent_level)
426 yield _floatstr(o)
427 elif isinstance(o, (list, tuple)):
--> 428 yield from _iterencode_list(o, _current_indent_level)
429 elif isinstance(o, dict):
430 yield from _iterencode_dict(o, _current_indent_level)
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_list(lst, _current_indent_level)
323 else:
324 chunks = _iterencode(value, _current_indent_level)
--> 325 yield from chunks
326 if newline_indent is not None:
327 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_dict(dct, _current_indent_level)
402 else:
403 chunks = _iterencode(value, _current_indent_level)
--> 404 yield from chunks
405 if newline_indent is not None:
406 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_dict(dct, _current_indent_level)
402 else:
403 chunks = _iterencode(value, _current_indent_level)
--> 404 yield from chunks
405 if newline_indent is not None:
406 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode_list(lst, _current_indent_level)
323 else:
324 chunks = _iterencode(value, _current_indent_level)
--> 325 yield from chunks
326 if newline_indent is not None:
327 _current_indent_level -= 1
~/anaconda3/lib/python3.6/json/encoder.py in _iterencode(o, _current_indent_level)
435 raise ValueError("Circular reference detected")
436 markers[markerid] = o
--> 437 o = _default(o)
438 yield from _iterencode(o, _current_indent_level)
439 if markers is not None:
~/anaconda3/lib/python3.6/json/encoder.py in default(self, o)
178 """
179 raise TypeError("Object of type '%s' is not JSON serializable" %
--> 180 o.__class__.__name__)
181
182 def encode(self, o):
TypeError: Object of type 'bytes' is not JSON serializable
|
TypeError
|
def get_agent_class(alg):
"""Returns the class of a known agent given its name."""
if alg == "PPO":
from ray.rllib import ppo
return ppo.PPOAgent
elif alg == "ES":
from ray.rllib import es
return es.ESAgent
elif alg == "DQN":
from ray.rllib import dqn
return dqn.DQNAgent
elif alg == "APEX":
from ray.rllib import dqn
return dqn.ApexAgent
elif alg == "A3C":
from ray.rllib import a3c
return a3c.A3CAgent
elif alg == "BC":
from ray.rllib import bc
return bc.BCAgent
elif alg == "PG":
from ray.rllib import pg
return pg.PGAgent
elif alg == "script":
from ray.tune import script_runner
return script_runner.ScriptRunner
elif alg == "__fake":
return _MockAgent
elif alg == "__sigmoid_fake_data":
return _SigmoidFakeData
elif alg == "__parameter_tuning":
return _ParameterTuningAgent
else:
raise Exception(("Unknown algorithm {}.").format(alg))
|
def get_agent_class(alg):
"""Returns the class of an known agent given its name."""
if alg == "PPO":
from ray.rllib import ppo
return ppo.PPOAgent
elif alg == "ES":
from ray.rllib import es
return es.ESAgent
elif alg == "DQN":
from ray.rllib import dqn
return dqn.DQNAgent
elif alg == "APEX":
from ray.rllib import dqn
return dqn.ApexAgent
elif alg == "A3C":
from ray.rllib import a3c
return a3c.A3CAgent
elif alg == "BC":
from ray.rllib import bc
return bc.BCAgent
elif alg == "PG":
from ray.rllib import pg
return pg.PGAgent
elif alg == "script":
from ray.tune import script_runner
return script_runner.ScriptRunner
elif alg == "__fake":
return _MockAgent
elif alg == "__sigmoid_fake_data":
return _SigmoidFakeData
elif alg == "__parameter_tuning":
return _ParameterTuningAgent
else:
raise Exception(("Unknown algorithm {}.").format(alg))
|
https://github.com/ray-project/ray/issues/1773
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
|
_frozen_importlib._DeadlockError
|
def __init__(
self, registry, config, policy_params, env_creator, noise, min_task_runtime=0.2
):
self.min_task_runtime = min_task_runtime
self.config = config
self.policy_params = policy_params
self.noise = SharedNoiseTable(noise)
self.env = env_creator(config["env_config"])
from ray.rllib import models
self.preprocessor = models.ModelCatalog.get_preprocessor(registry, self.env)
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
registry,
self.sess,
self.env.action_space,
self.preprocessor,
config["observation_filter"],
**policy_params,
)
|
def __init__(
self, registry, config, policy_params, env_creator, noise, min_task_runtime=0.2
):
self.min_task_runtime = min_task_runtime
self.config = config
self.policy_params = policy_params
self.noise = SharedNoiseTable(noise)
self.env = env_creator(config["env_config"])
self.preprocessor = ModelCatalog.get_preprocessor(registry, self.env)
self.sess = utils.make_session(single_threaded=True)
self.policy = policies.GenericPolicy(
registry,
self.sess,
self.env.action_space,
self.preprocessor,
config["observation_filter"],
**policy_params,
)
|
https://github.com/ray-project/ray/issues/1773
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
|
_frozen_importlib._DeadlockError
|
def _init(self):
policy_params = {"action_noise_std": 0.01}
env = self.env_creator(self.config["env_config"])
from ray.rllib import models
preprocessor = models.ModelCatalog.get_preprocessor(self.registry, env)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
self.registry,
self.sess,
env.action_space,
preprocessor,
self.config["observation_filter"],
**policy_params,
)
self.optimizer = optimizers.Adam(self.policy, self.config["stepsize"])
# Create the shared noise table.
print("Creating shared noise table.")
noise_id = create_shared_noise.remote(self.config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
print("Creating actors.")
self.workers = [
Worker.remote(
self.registry, self.config, policy_params, self.env_creator, noise_id
)
for _ in range(self.config["num_workers"])
]
self.episodes_so_far = 0
self.timesteps_so_far = 0
self.tstart = time.time()
|
def _init(self):
policy_params = {"action_noise_std": 0.01}
env = self.env_creator(self.config["env_config"])
preprocessor = ModelCatalog.get_preprocessor(self.registry, env)
self.sess = utils.make_session(single_threaded=False)
self.policy = policies.GenericPolicy(
self.registry,
self.sess,
env.action_space,
preprocessor,
self.config["observation_filter"],
**policy_params,
)
self.optimizer = optimizers.Adam(self.policy, self.config["stepsize"])
# Create the shared noise table.
print("Creating shared noise table.")
noise_id = create_shared_noise.remote(self.config["noise_size"])
self.noise = SharedNoiseTable(ray.get(noise_id))
# Create the actors.
print("Creating actors.")
self.workers = [
Worker.remote(
self.registry, self.config, policy_params, self.env_creator, noise_id
)
for _ in range(self.config["num_workers"])
]
self.episodes_so_far = 0
self.timesteps_so_far = 0
self.tstart = time.time()
|
https://github.com/ray-project/ray/issues/1773
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
|
_frozen_importlib._DeadlockError
|
def _train(self):
config = self.config
step_tstart = time.time()
theta = self.policy.get_weights()
assert theta.dtype == np.float32
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["episodes_per_batch"], config["timesteps_per_batch"]
)
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (
len(all_noise_indices) == len(all_training_returns) == len(all_training_lengths)
)
self.episodes_so_far += num_episodes
self.timesteps_so_far += num_timesteps
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# Process the returns.
if config["return_proc_mode"] == "centered_rank":
proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)
else:
raise NotImplementedError(config["return_proc_mode"])
# Compute and take a step.
g, count = utils.batched_weighted_sum(
proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params) for index in noise_indices),
batch_size=500,
)
g /= noisy_returns.size
assert (
g.shape == (self.policy.num_params,)
and g.dtype == np.float32
and count == len(noise_indices)
)
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g + config["l2_coeff"] * theta)
# Set the new weights in the local copy of the policy.
self.policy.set_weights(theta)
step_tend = time.time()
tlogger.record_tabular("EvalEpRewMean", eval_returns.mean())
tlogger.record_tabular("EvalEpRewStd", eval_returns.std())
tlogger.record_tabular("EvalEpLenMean", eval_lengths.mean())
tlogger.record_tabular("EpRewMean", noisy_returns.mean())
tlogger.record_tabular("EpRewStd", noisy_returns.std())
tlogger.record_tabular("EpLenMean", noisy_lengths.mean())
tlogger.record_tabular("Norm", float(np.square(theta).sum()))
tlogger.record_tabular("GradNorm", float(np.square(g).sum()))
tlogger.record_tabular("UpdateRatio", float(update_ratio))
tlogger.record_tabular("EpisodesThisIter", noisy_lengths.size)
tlogger.record_tabular("EpisodesSoFar", self.episodes_so_far)
tlogger.record_tabular("TimestepsThisIter", noisy_lengths.sum())
tlogger.record_tabular("TimestepsSoFar", self.timesteps_so_far)
tlogger.record_tabular("TimeElapsedThisIter", step_tend - step_tstart)
tlogger.record_tabular("TimeElapsed", step_tend - self.tstart)
tlogger.dump_tabular()
info = {
"weights_norm": np.square(theta).sum(),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
"timesteps_this_iter": noisy_lengths.sum(),
"timesteps_so_far": self.timesteps_so_far,
"time_elapsed_this_iter": step_tend - step_tstart,
"time_elapsed": step_tend - self.tstart,
}
result = ray.tune.result.TrainingResult(
episode_reward_mean=eval_returns.mean(),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info,
)
return result
|
def _train(self):
config = self.config
step_tstart = time.time()
theta = self.policy.get_weights()
assert theta.dtype == np.float32
# Put the current policy weights in the object store.
theta_id = ray.put(theta)
# Use the actors to do rollouts, note that we pass in the ID of the
# policy weights.
results, num_episodes, num_timesteps = self._collect_results(
theta_id, config["episodes_per_batch"], config["timesteps_per_batch"]
)
all_noise_indices = []
all_training_returns = []
all_training_lengths = []
all_eval_returns = []
all_eval_lengths = []
# Loop over the results.
for result in results:
all_eval_returns += result.eval_returns
all_eval_lengths += result.eval_lengths
all_noise_indices += result.noise_indices
all_training_returns += result.noisy_returns
all_training_lengths += result.noisy_lengths
assert len(all_eval_returns) == len(all_eval_lengths)
assert (
len(all_noise_indices) == len(all_training_returns) == len(all_training_lengths)
)
self.episodes_so_far += num_episodes
self.timesteps_so_far += num_timesteps
# Assemble the results.
eval_returns = np.array(all_eval_returns)
eval_lengths = np.array(all_eval_lengths)
noise_indices = np.array(all_noise_indices)
noisy_returns = np.array(all_training_returns)
noisy_lengths = np.array(all_training_lengths)
# Process the returns.
if config["return_proc_mode"] == "centered_rank":
proc_noisy_returns = utils.compute_centered_ranks(noisy_returns)
else:
raise NotImplementedError(config["return_proc_mode"])
# Compute and take a step.
g, count = utils.batched_weighted_sum(
proc_noisy_returns[:, 0] - proc_noisy_returns[:, 1],
(self.noise.get(index, self.policy.num_params) for index in noise_indices),
batch_size=500,
)
g /= noisy_returns.size
assert (
g.shape == (self.policy.num_params,)
and g.dtype == np.float32
and count == len(noise_indices)
)
# Compute the new weights theta.
theta, update_ratio = self.optimizer.update(-g + config["l2_coeff"] * theta)
# Set the new weights in the local copy of the policy.
self.policy.set_weights(theta)
step_tend = time.time()
tlogger.record_tabular("EvalEpRewMean", eval_returns.mean())
tlogger.record_tabular("EvalEpRewStd", eval_returns.std())
tlogger.record_tabular("EvalEpLenMean", eval_lengths.mean())
tlogger.record_tabular("EpRewMean", noisy_returns.mean())
tlogger.record_tabular("EpRewStd", noisy_returns.std())
tlogger.record_tabular("EpLenMean", noisy_lengths.mean())
tlogger.record_tabular("Norm", float(np.square(theta).sum()))
tlogger.record_tabular("GradNorm", float(np.square(g).sum()))
tlogger.record_tabular("UpdateRatio", float(update_ratio))
tlogger.record_tabular("EpisodesThisIter", noisy_lengths.size)
tlogger.record_tabular("EpisodesSoFar", self.episodes_so_far)
tlogger.record_tabular("TimestepsThisIter", noisy_lengths.sum())
tlogger.record_tabular("TimestepsSoFar", self.timesteps_so_far)
tlogger.record_tabular("TimeElapsedThisIter", step_tend - step_tstart)
tlogger.record_tabular("TimeElapsed", step_tend - self.tstart)
tlogger.dump_tabular()
info = {
"weights_norm": np.square(theta).sum(),
"grad_norm": np.square(g).sum(),
"update_ratio": update_ratio,
"episodes_this_iter": noisy_lengths.size,
"episodes_so_far": self.episodes_so_far,
"timesteps_this_iter": noisy_lengths.sum(),
"timesteps_so_far": self.timesteps_so_far,
"time_elapsed_this_iter": step_tend - step_tstart,
"time_elapsed": step_tend - self.tstart,
}
result = TrainingResult(
episode_reward_mean=eval_returns.mean(),
episode_len_mean=eval_lengths.mean(),
timesteps_this_iter=noisy_lengths.sum(),
info=info,
)
return result
|
https://github.com/ray-project/ray/issues/1773
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
|
_frozen_importlib._DeadlockError
|
def _become_actor(self, task):
"""Turn this worker into an actor.
Args:
task: The actor creation task.
"""
assert self.actor_id == NIL_ACTOR_ID
arguments = task.arguments()
assert len(arguments) == 1
self.actor_id = task.actor_creation_id().id()
class_id = arguments[0]
key = b"ActorClass:" + class_id
# Wait for the actor class key to have been imported by the import
# thread. TODO(rkn): It shouldn't be possible to end up in an infinite
# loop here, but we should push an error to the driver if too much time
# is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
with self.lock:
self.fetch_and_register_actor(key, task.required_resources(), self)
|
def _become_actor(self, task):
"""Turn this worker into an actor.
Args:
task: The actor creation task.
"""
assert self.actor_id == NIL_ACTOR_ID
arguments = task.arguments()
assert len(arguments) == 1
self.actor_id = task.actor_creation_id().id()
class_id = arguments[0]
key = b"ActorClass:" + class_id
# Wait for the actor class key to have been imported by the import
# thread. TODO(rkn): It shouldn't be possible to end up in an infinite
# loop here, but we should push an error to the driver if too much time
# is spent here.
while key not in self.imported_actor_classes:
time.sleep(0.001)
self.fetch_and_register_actor(key, task.required_resources(), self)
|
https://github.com/ray-project/ray/issues/1773
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1720, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 17, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 14, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 229, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 19, in <module>
from ray.rllib.es import policies
File "<frozen importlib._bootstrap>", line 968, in _find_and_load
File "<frozen importlib._bootstrap>", line 168, in __enter__
File "<frozen importlib._bootstrap>", line 110, in acquire
_frozen_importlib._DeadlockError: deadlock detected by _ModuleLock('ray.rllib.es.policies') at 139937598221224
|
_frozen_importlib._DeadlockError
|
def fetch_and_register_actor(actor_class_key, resources, worker):
"""Import an actor.
This will be called by the worker's import thread when the worker receives
the actor_class export, assuming that the worker is an actor for that
class.
Args:
actor_class_key: The key in Redis to use to fetch the actor.
resources: The resources required for this actor's lifetime.
worker: The worker to use.
"""
actor_id_str = worker.actor_id
(
driver_id,
class_id,
class_name,
module,
pickled_class,
checkpoint_interval,
actor_method_names,
actor_method_num_return_vals,
) = worker.redis_client.hmget(
actor_class_key,
[
"driver_id",
"class_id",
"class_name",
"module",
"class",
"checkpoint_interval",
"actor_method_names",
"actor_method_num_return_vals",
],
)
actor_name = class_name.decode("ascii")
module = module.decode("ascii")
checkpoint_interval = int(checkpoint_interval)
actor_method_names = json.loads(actor_method_names.decode("ascii"))
actor_method_num_return_vals = json.loads(
actor_method_num_return_vals.decode("ascii")
)
# Create a temporary actor with some temporary methods so that if the actor
# fails to be unpickled, the temporary actor can be used (just to produce
# error messages and to prevent the driver from hanging).
class TemporaryActor(object):
pass
worker.actors[actor_id_str] = TemporaryActor()
worker.actor_checkpoint_interval = checkpoint_interval
def temporary_actor_method(*xs):
raise Exception(
"The actor with name {} failed to be imported, and so "
"cannot execute this method".format(actor_name)
)
# Register the actor method signatures.
register_actor_signatures(
worker,
driver_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
)
# Register the actor method executors.
for actor_method_name in actor_method_names:
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
temporary_executor = make_actor_method_executor(
worker, actor_method_name, temporary_actor_method, actor_imported=False
)
worker.functions[driver_id][function_id] = (
actor_method_name,
temporary_executor,
)
worker.num_task_executions[driver_id][function_id] = 0
try:
unpickled_class = pickle.loads(pickled_class)
worker.actor_class = unpickled_class
except Exception:
# If an exception was thrown when the actor was imported, we record the
# traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(traceback.format_exc())
# Log the error message.
push_error_to_driver(
worker.redis_client,
"register_actor_signatures",
traceback_str,
driver_id,
data={"actor_id": actor_id_str},
)
# TODO(rkn): In the future, it might make sense to have the worker exit
# here. However, currently that would lead to hanging if someone calls
# ray.get on a method invoked on the actor.
else:
# TODO(pcm): Why is the below line necessary?
unpickled_class.__module__ = module
worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class)
actor_methods = inspect.getmembers(
unpickled_class,
predicate=(
lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x))
),
)
for actor_method_name, actor_method in actor_methods:
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
executor = make_actor_method_executor(
worker, actor_method_name, actor_method, actor_imported=True
)
worker.functions[driver_id][function_id] = (actor_method_name, executor)
|
def fetch_and_register_actor(actor_class_key, worker):
"""Import an actor.
This will be called by the worker's import thread when the worker receives
the actor_class export, assuming that the worker is an actor for that
class.
"""
actor_id_str = worker.actor_id
(
driver_id,
class_id,
class_name,
module,
pickled_class,
checkpoint_interval,
actor_method_names,
actor_method_num_return_vals,
) = worker.redis_client.hmget(
actor_class_key,
[
"driver_id",
"class_id",
"class_name",
"module",
"class",
"checkpoint_interval",
"actor_method_names",
"actor_method_num_return_vals",
],
)
actor_name = class_name.decode("ascii")
module = module.decode("ascii")
checkpoint_interval = int(checkpoint_interval)
actor_method_names = json.loads(actor_method_names.decode("ascii"))
actor_method_num_return_vals = json.loads(
actor_method_num_return_vals.decode("ascii")
)
# Create a temporary actor with some temporary methods so that if the actor
# fails to be unpickled, the temporary actor can be used (just to produce
# error messages and to prevent the driver from hanging).
class TemporaryActor(object):
pass
worker.actors[actor_id_str] = TemporaryActor()
worker.actor_checkpoint_interval = checkpoint_interval
def temporary_actor_method(*xs):
raise Exception(
"The actor with name {} failed to be imported, and so "
"cannot execute this method".format(actor_name)
)
# Register the actor method signatures.
register_actor_signatures(
worker, driver_id, class_name, actor_method_names, actor_method_num_return_vals
)
# Register the actor method executors.
for actor_method_name in actor_method_names:
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
temporary_executor = make_actor_method_executor(
worker, actor_method_name, temporary_actor_method, actor_imported=False
)
worker.functions[driver_id][function_id] = (
actor_method_name,
temporary_executor,
)
worker.num_task_executions[driver_id][function_id] = 0
try:
unpickled_class = pickle.loads(pickled_class)
worker.actor_class = unpickled_class
except Exception:
# If an exception was thrown when the actor was imported, we record the
# traceback and notify the scheduler of the failure.
traceback_str = ray.utils.format_error_message(traceback.format_exc())
# Log the error message.
push_error_to_driver(
worker.redis_client,
"register_actor_signatures",
traceback_str,
driver_id,
data={"actor_id": actor_id_str},
)
# TODO(rkn): In the future, it might make sense to have the worker exit
# here. However, currently that would lead to hanging if someone calls
# ray.get on a method invoked on the actor.
else:
# TODO(pcm): Why is the below line necessary?
unpickled_class.__module__ = module
worker.actors[actor_id_str] = unpickled_class.__new__(unpickled_class)
actor_methods = inspect.getmembers(
unpickled_class,
predicate=(
lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x))
),
)
for actor_method_name, actor_method in actor_methods:
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
executor = make_actor_method_executor(
worker, actor_method_name, actor_method, actor_imported=True
)
worker.functions[driver_id][function_id] = (actor_method_name, executor)
# We do not set worker.function_properties[driver_id][function_id]
# because we currently do need the actor worker to submit new tasks
# for the actor.
# Store some extra information that will be used when the actor exits
# to release GPU resources.
worker.driver_id = binary_to_hex(driver_id)
local_scheduler_id = worker.redis_client.hget(
b"Actor:" + actor_id_str, "local_scheduler_id"
)
worker.local_scheduler_id = binary_to_hex(local_scheduler_id)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def register_actor_signatures(
worker,
driver_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources=None,
actor_method_cpus=None,
):
"""Register an actor's method signatures in the worker.
Args:
worker: The worker to register the signatures on.
driver_id: The ID of the driver that this actor is associated with.
class_id: The ID of the actor class.
class_name: The name of the actor class.
actor_method_names: The names of the methods to register.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
actor_creation_resources: The resources required by the actor creation
task.
actor_method_cpus: The number of CPUs required by each actor method.
"""
assert len(actor_method_names) == len(actor_method_num_return_vals)
for actor_method_name, num_return_vals in zip(
actor_method_names, actor_method_num_return_vals
):
# TODO(rkn): When we create a second actor, we are probably overwriting
# the values from the first actor here. This may or may not be a
# problem.
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
worker.function_properties[driver_id][function_id] = (
# The extra return value is an actor dummy object.
# In the cases where actor_method_cpus is None, that value should
# never be used.
FunctionProperties(
num_return_vals=num_return_vals + 1,
resources={"CPU": actor_method_cpus},
max_calls=0,
)
)
if actor_creation_resources is not None:
# Also register the actor creation task.
function_id = compute_actor_creation_function_id(class_id)
worker.function_properties[driver_id][function_id.id()] = (
# The extra return value is an actor dummy object.
FunctionProperties(
num_return_vals=0 + 1, resources=actor_creation_resources, max_calls=0
)
)
|
def register_actor_signatures(
worker, driver_id, class_name, actor_method_names, actor_method_num_return_vals
):
"""Register an actor's method signatures in the worker.
Args:
worker: The worker to register the signatures on.
driver_id: The ID of the driver that this actor is associated with.
actor_id: The ID of the actor.
actor_method_names: The names of the methods to register.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
"""
assert len(actor_method_names) == len(actor_method_num_return_vals)
for actor_method_name, num_return_vals in zip(
actor_method_names, actor_method_num_return_vals
):
# TODO(rkn): When we create a second actor, we are probably overwriting
# the values from the first actor here. This may or may not be a
# problem.
function_id = compute_actor_method_function_id(
class_name, actor_method_name
).id()
worker.function_properties[driver_id][function_id] = (
# The extra return value is an actor dummy object.
FunctionProperties(
num_return_vals=num_return_vals + 1, resources={"CPU": 1}, max_calls=0
)
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources,
actor_method_cpus,
worker,
):
"""Export an actor to redis.
Args:
actor_id (common.ObjectID): The ID of the actor.
class_id (str): A random ID for the actor class.
class_name (str): The actor class name.
actor_method_names (list): A list of the names of this actor's methods.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
actor_creation_resources: A dictionary mapping resource name to the
quantity of that resource required by the actor.
actor_method_cpus: The number of CPUs required by actor methods.
"""
ray.worker.check_main_thread()
if worker.mode is None:
raise Exception(
"Actors cannot be created before Ray has been "
"started. You can start Ray with 'ray.init()'."
)
driver_id = worker.task_driver_id.id()
register_actor_signatures(
worker,
driver_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources=actor_creation_resources,
actor_method_cpus=actor_method_cpus,
)
args = [class_id]
function_id = compute_actor_creation_function_id(class_id)
return worker.submit_task(function_id, args, actor_creation_id=actor_id)[0]
|
def export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
resources,
worker,
):
"""Export an actor to redis.
Args:
actor_id (common.ObjectID): The ID of the actor.
class_id (str): A random ID for the actor class.
class_name (str): The actor class name.
actor_method_names (list): A list of the names of this actor's methods.
actor_method_num_return_vals: A list of the number of return values for
each of the actor's methods.
resources: A dictionary mapping resource name to the quantity of that
resource required by the actor.
"""
ray.worker.check_main_thread()
if worker.mode is None:
raise Exception(
"Actors cannot be created before Ray has been "
"started. You can start Ray with 'ray.init()'."
)
driver_id = worker.task_driver_id.id()
register_actor_signatures(
worker, driver_id, class_name, actor_method_names, actor_method_num_return_vals
)
# Select a local scheduler for the actor.
key = b"Actor:" + actor_id.id()
local_scheduler_id = select_local_scheduler(
worker.task_driver_id.id(),
ray.global_state.local_schedulers(),
resources.get("GPU", 0),
worker.redis_client,
)
assert local_scheduler_id is not None
# We must put the actor information in Redis before publishing the actor
# notification so that when the newly created actor attempts to fetch the
# information from Redis, it is already there.
driver_id = worker.task_driver_id.id()
worker.redis_client.hmset(
key,
{
"class_id": class_id,
"driver_id": driver_id,
"local_scheduler_id": local_scheduler_id,
"num_gpus": resources.get("GPU", 0),
"removed": False,
},
)
# TODO(rkn): There is actually no guarantee that the local scheduler that
# we are publishing to has already subscribed to the actor_notifications
# channel. Therefore, this message may be missed and the workload will
# hang. This is a bug.
ray.utils.publish_actor_creation(
actor_id.id(), driver_id, local_scheduler_id, False, worker.redis_client
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def __init__(
self,
actor_id,
class_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
class_name,
actor_creation_dummy_object_id,
actor_creation_resources,
actor_method_cpus,
):
# TODO(rkn): Some of these fields are probably not necessary. We should
# strip out the unnecessary fields to keep actor handles lightweight.
self.actor_id = actor_id
self.class_id = class_id
self.actor_handle_id = actor_handle_id
self.actor_cursor = actor_cursor
self.actor_counter = actor_counter
self.actor_method_names = actor_method_names
self.actor_method_num_return_vals = actor_method_num_return_vals
# TODO(swang): Fetch this information from Redis so that we don't have
# to fall back to pickle.
self.method_signatures = method_signatures
self.checkpoint_interval = checkpoint_interval
self.class_name = class_name
self.actor_creation_dummy_object_id = actor_creation_dummy_object_id
self.actor_creation_resources = actor_creation_resources
self.actor_method_cpus = actor_method_cpus
|
def __init__(
self,
actor_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
class_name,
):
self.actor_id = actor_id
self.actor_handle_id = actor_handle_id
self.actor_cursor = actor_cursor
self.actor_counter = actor_counter
self.actor_method_names = actor_method_names
self.actor_method_num_return_vals = actor_method_num_return_vals
# TODO(swang): Fetch this information from Redis so that we don't have
# to fall back to pickle.
self.method_signatures = method_signatures
self.checkpoint_interval = checkpoint_interval
self.class_name = class_name
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def wrap_actor_handle(actor_handle):
"""Wrap the ActorHandle to store the fields.
Args:
actor_handle: The ActorHandle instance to wrap.
Returns:
An ActorHandleWrapper instance that stores the ActorHandle's fields.
"""
wrapper = ActorHandleWrapper(
actor_handle._ray_actor_id,
actor_handle._ray_class_id,
compute_actor_handle_id(
actor_handle._ray_actor_handle_id, actor_handle._ray_actor_forks
),
actor_handle._ray_actor_cursor,
0, # Reset the actor counter.
actor_handle._ray_actor_method_names,
actor_handle._ray_actor_method_num_return_vals,
actor_handle._ray_method_signatures,
actor_handle._ray_checkpoint_interval,
actor_handle._ray_class_name,
actor_handle._ray_actor_creation_dummy_object_id,
actor_handle._ray_actor_creation_resources,
actor_handle._ray_actor_method_cpus,
)
actor_handle._ray_actor_forks += 1
return wrapper
|
def wrap_actor_handle(actor_handle):
"""Wrap the ActorHandle to store the fields.
Args:
actor_handle: The ActorHandle instance to wrap.
Returns:
An ActorHandleWrapper instance that stores the ActorHandle's fields.
"""
wrapper = ActorHandleWrapper(
actor_handle._ray_actor_id,
compute_actor_handle_id(
actor_handle._ray_actor_handle_id, actor_handle._ray_actor_forks
),
actor_handle._ray_actor_cursor,
0, # Reset the actor counter.
actor_handle._ray_actor_method_names,
actor_handle._ray_actor_method_num_return_vals,
actor_handle._ray_method_signatures,
actor_handle._ray_checkpoint_interval,
actor_handle._ray_class_name,
)
actor_handle._ray_actor_forks += 1
return wrapper
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def unwrap_actor_handle(worker, wrapper):
"""Make an ActorHandle from the stored fields.
Args:
worker: The worker that is unwrapping the actor handle.
wrapper: An ActorHandleWrapper instance to unwrap.
Returns:
The unwrapped ActorHandle instance.
"""
driver_id = worker.task_driver_id.id()
register_actor_signatures(
worker,
driver_id,
wrapper.class_id,
wrapper.class_name,
wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
wrapper.actor_creation_resources,
wrapper.actor_method_cpus,
)
actor_handle_class = make_actor_handle_class(wrapper.class_name)
actor_object = actor_handle_class.__new__(actor_handle_class)
actor_object._manual_init(
wrapper.actor_id,
wrapper.class_id,
wrapper.actor_handle_id,
wrapper.actor_cursor,
wrapper.actor_counter,
wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
wrapper.method_signatures,
wrapper.checkpoint_interval,
wrapper.actor_creation_dummy_object_id,
wrapper.actor_creation_resources,
wrapper.actor_method_cpus,
)
return actor_object
|
def unwrap_actor_handle(worker, wrapper):
"""Make an ActorHandle from the stored fields.
Args:
worker: The worker that is unwrapping the actor handle.
wrapper: An ActorHandleWrapper instance to unwrap.
Returns:
The unwrapped ActorHandle instance.
"""
driver_id = worker.task_driver_id.id()
register_actor_signatures(
worker,
driver_id,
wrapper.class_name,
wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
)
actor_handle_class = make_actor_handle_class(wrapper.class_name)
actor_object = actor_handle_class.__new__(actor_handle_class)
actor_object._manual_init(
wrapper.actor_id,
wrapper.actor_handle_id,
wrapper.actor_cursor,
wrapper.actor_counter,
wrapper.actor_method_names,
wrapper.actor_method_num_return_vals,
wrapper.method_signatures,
wrapper.checkpoint_interval,
)
return actor_object
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def make_actor_handle_class(class_name):
class ActorHandle(ActorHandleParent):
def __init__(self, *args, **kwargs):
raise Exception(
"Actor classes cannot be instantiated directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
class_name, class_name
)
)
@classmethod
def remote(cls, *args, **kwargs):
raise NotImplementedError(
"The classmethod remote() can only be called on the original Class."
)
def _manual_init(
self,
actor_id,
class_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
actor_creation_dummy_object_id,
actor_creation_resources,
actor_method_cpus,
):
self._ray_actor_id = actor_id
self._ray_class_id = class_id
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = actor_counter
self._ray_actor_method_names = actor_method_names
self._ray_actor_method_num_return_vals = actor_method_num_return_vals
self._ray_method_signatures = method_signatures
self._ray_checkpoint_interval = checkpoint_interval
self._ray_class_name = class_name
self._ray_actor_forks = 0
self._ray_actor_creation_dummy_object_id = actor_creation_dummy_object_id
self._ray_actor_creation_resources = actor_creation_resources
self._ray_actor_method_cpus = actor_method_cpus
def _actor_method_call(
self, method_name, args=None, kwargs=None, dependency=None
):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
self: The local actor handle.
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
dependency: The object ID that this method is dependent on.
Defaults to None, for no dependencies. Most tasks should
pass in the dummy object returned by the preceding task.
Some tasks, such as checkpoint and terminate methods, have
no dependencies.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
ray.worker.check_connected()
ray.worker.check_main_thread()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in PYTHON_MODE
# Copy args to prevent the function from mutating them.
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
return getattr(
ray.worker.global_worker.actors[self._ray_actor_id], method_name
)(*copy.deepcopy(args))
# Add the execution dependency.
if dependency is None:
execution_dependencies = []
else:
execution_dependencies = [dependency]
is_actor_checkpoint_method = method_name == "__ray_checkpoint__"
function_id = compute_actor_method_function_id(
self._ray_class_name, method_name
)
object_ids = ray.worker.global_worker.submit_task(
function_id,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
is_actor_checkpoint_method=is_actor_checkpoint_method,
actor_creation_dummy_object_id=(
self._ray_actor_creation_dummy_object_id
),
execution_dependencies=execution_dependencies,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
self._ray_actor_cursor = object_ids.pop()
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
# Make tab completion work.
def __dir__(self):
return self._ray_actor_method_names
def __getattribute__(self, attr):
try:
# Check whether this is an actor method.
actor_method_names = object.__getattribute__(
self, "_ray_actor_method_names"
)
if attr in actor_method_names:
# We create the ActorMethod on the fly here so that the
# ActorHandle doesn't need a reference to the ActorMethod.
# The ActorMethod has a reference to the ActorHandle and
# this was causing cyclic references which were prevent
# object deallocation from behaving in a predictable
# manner.
actor_method_cls = ActorMethod
return actor_method_cls(self, attr)
except AttributeError:
pass
# If the requested attribute is not a registered method, fall back
# to default __getattribute__.
return object.__getattribute__(self, attr)
def __repr__(self):
return "Actor(" + self._ray_actor_id.hex() + ")"
def __reduce__(self):
raise Exception("Actor objects cannot be pickled.")
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote().
if (
ray.worker.global_worker.connected
and self._ray_actor_handle_id.id() == ray.worker.NIL_ACTOR_ID
):
# TODO(rkn): Should we be passing in the actor cursor as a
# dependency here?
self._actor_method_call(
"__ray_terminate__", args=[self._ray_actor_id.id()]
)
return ActorHandle
|
def make_actor_handle_class(class_name):
class ActorHandle(ActorHandleParent):
def __init__(self, *args, **kwargs):
raise Exception(
"Actor classes cannot be instantiated directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
class_name, class_name
)
)
@classmethod
def remote(cls, *args, **kwargs):
raise NotImplementedError(
"The classmethod remote() can only be called on the original Class."
)
def _manual_init(
self,
actor_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
):
self._ray_actor_id = actor_id
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = actor_counter
self._ray_actor_method_names = actor_method_names
self._ray_actor_method_num_return_vals = actor_method_num_return_vals
self._ray_method_signatures = method_signatures
self._ray_checkpoint_interval = checkpoint_interval
self._ray_class_name = class_name
self._ray_actor_forks = 0
def _actor_method_call(
self, method_name, args=None, kwargs=None, dependency=None
):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
self: The local actor handle.
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
dependency: The object ID that this method is dependent on.
Defaults to None, for no dependencies. Most tasks should
pass in the dummy object returned by the preceding task.
Some tasks, such as checkpoint and terminate methods, have
no dependencies.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
ray.worker.check_connected()
ray.worker.check_main_thread()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in PYTHON_MODE
# Copy args to prevent the function from mutating them.
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
return getattr(
ray.worker.global_worker.actors[self._ray_actor_id], method_name
)(*copy.deepcopy(args))
# Add the execution dependency.
if dependency is None:
execution_dependencies = []
else:
execution_dependencies = [dependency]
is_actor_checkpoint_method = method_name == "__ray_checkpoint__"
function_id = compute_actor_method_function_id(
self._ray_class_name, method_name
)
object_ids = ray.worker.global_worker.submit_task(
function_id,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
is_actor_checkpoint_method=is_actor_checkpoint_method,
execution_dependencies=execution_dependencies,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
self._ray_actor_cursor = object_ids.pop()
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
# Make tab completion work.
def __dir__(self):
return self._ray_actor_method_names
def __getattribute__(self, attr):
try:
# Check whether this is an actor method.
actor_method_names = object.__getattribute__(
self, "_ray_actor_method_names"
)
if attr in actor_method_names:
# We create the ActorMethod on the fly here so that the
# ActorHandle doesn't need a reference to the ActorMethod.
# The ActorMethod has a reference to the ActorHandle and
# this was causing cyclic references which were prevent
# object deallocation from behaving in a predictable
# manner.
actor_method_cls = ActorMethod
return actor_method_cls(self, attr)
except AttributeError:
pass
# If the requested attribute is not a registered method, fall back
# to default __getattribute__.
return object.__getattribute__(self, attr)
def __repr__(self):
return "Actor(" + self._ray_actor_id.hex() + ")"
def __reduce__(self):
raise Exception("Actor objects cannot be pickled.")
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote().
if (
ray.worker.global_worker.connected
and self._ray_actor_handle_id.id() == ray.worker.NIL_ACTOR_ID
):
self._actor_method_call(
"__ray_terminate__", args=[self._ray_actor_id.id()]
)
return ActorHandle
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def remote(cls, *args, **kwargs):
if ray.worker.global_worker.mode is None:
raise Exception("Actors cannot be created before ray.init() has been called.")
actor_id = random_actor_id()
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(ray.worker.NIL_ACTOR_ID)
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# The number of actor method invocations that we've called so far.
actor_counter = 0
# Get the actor methods of the given class.
actor_methods = inspect.getmembers(
Class,
predicate=(
lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x))
),
)
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
method_signatures = dict()
for k, v in actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, we there may not be much the user can do about
# it.
signature.check_signature_supported(v, warn=True)
method_signatures[k] = signature.extract_signature(v, ignore_first=True)
actor_method_names = [method_name for method_name, _ in actor_methods]
actor_method_num_return_vals = []
for _, method in actor_methods:
if hasattr(method, "__ray_num_return_vals__"):
actor_method_num_return_vals.append(method.__ray_num_return_vals__)
else:
actor_method_num_return_vals.append(1)
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
ray.worker.global_worker.actors[actor_id] = Class.__new__(Class)
else:
# Export the actor.
if not exported:
export_actor_class(
class_id,
Class,
actor_method_names,
actor_method_num_return_vals,
checkpoint_interval,
ray.worker.global_worker,
)
exported.append(0)
actor_cursor = export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources,
actor_method_cpus,
ray.worker.global_worker,
)
# Instantiate the actor handle.
actor_object = cls.__new__(cls)
actor_object._manual_init(
actor_id,
class_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
actor_cursor,
actor_creation_resources,
actor_method_cpus,
)
# Call __init__ as a remote function.
if "__init__" in actor_object._ray_actor_method_names:
actor_object._actor_method_call(
"__init__", args=args, kwargs=kwargs, dependency=actor_cursor
)
else:
print("WARNING: this object has no __init__ method.")
return actor_object
|
def remote(cls, *args, **kwargs):
if ray.worker.global_worker.mode is None:
raise Exception("Actors cannot be created before ray.init() has been called.")
actor_id = random_actor_id()
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(ray.worker.NIL_ACTOR_ID)
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# The number of actor method invocations that we've called so far.
actor_counter = 0
# Get the actor methods of the given class.
actor_methods = inspect.getmembers(
Class,
predicate=(
lambda x: (inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x))
),
)
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
method_signatures = dict()
for k, v in actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, we there may not be much the user can do about
# it.
signature.check_signature_supported(v, warn=True)
method_signatures[k] = signature.extract_signature(v, ignore_first=True)
actor_method_names = [method_name for method_name, _ in actor_methods]
actor_method_num_return_vals = []
for _, method in actor_methods:
if hasattr(method, "__ray_num_return_vals__"):
actor_method_num_return_vals.append(method.__ray_num_return_vals__)
else:
actor_method_num_return_vals.append(1)
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
ray.worker.global_worker.actors[actor_id] = Class.__new__(Class)
else:
# Export the actor.
if not exported:
export_actor_class(
class_id,
Class,
actor_method_names,
actor_method_num_return_vals,
checkpoint_interval,
ray.worker.global_worker,
)
exported.append(0)
export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
resources,
ray.worker.global_worker,
)
# Instantiate the actor handle.
actor_object = cls.__new__(cls)
actor_object._manual_init(
actor_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
)
# Call __init__ as a remote function.
if "__init__" in actor_object._ray_actor_method_names:
actor_object._actor_method_call("__init__", args=args, kwargs=kwargs)
else:
print("WARNING: this object has no __init__ method.")
return actor_object
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _manual_init(
self,
actor_id,
class_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
actor_creation_dummy_object_id,
actor_creation_resources,
actor_method_cpus,
):
self._ray_actor_id = actor_id
self._ray_class_id = class_id
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = actor_counter
self._ray_actor_method_names = actor_method_names
self._ray_actor_method_num_return_vals = actor_method_num_return_vals
self._ray_method_signatures = method_signatures
self._ray_checkpoint_interval = checkpoint_interval
self._ray_class_name = class_name
self._ray_actor_forks = 0
self._ray_actor_creation_dummy_object_id = actor_creation_dummy_object_id
self._ray_actor_creation_resources = actor_creation_resources
self._ray_actor_method_cpus = actor_method_cpus
|
def _manual_init(
self,
actor_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
):
self._ray_actor_id = actor_id
self._ray_actor_handle_id = actor_handle_id
self._ray_actor_cursor = actor_cursor
self._ray_actor_counter = actor_counter
self._ray_actor_method_names = actor_method_names
self._ray_actor_method_num_return_vals = actor_method_num_return_vals
self._ray_method_signatures = method_signatures
self._ray_checkpoint_interval = checkpoint_interval
self._ray_class_name = class_name
self._ray_actor_forks = 0
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _actor_method_call(self, method_name, args=None, kwargs=None, dependency=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
self: The local actor handle.
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
dependency: The object ID that this method is dependent on.
Defaults to None, for no dependencies. Most tasks should
pass in the dummy object returned by the preceding task.
Some tasks, such as checkpoint and terminate methods, have
no dependencies.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
ray.worker.check_connected()
ray.worker.check_main_thread()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in PYTHON_MODE
# Copy args to prevent the function from mutating them.
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
return getattr(
ray.worker.global_worker.actors[self._ray_actor_id], method_name
)(*copy.deepcopy(args))
# Add the execution dependency.
if dependency is None:
execution_dependencies = []
else:
execution_dependencies = [dependency]
is_actor_checkpoint_method = method_name == "__ray_checkpoint__"
function_id = compute_actor_method_function_id(self._ray_class_name, method_name)
object_ids = ray.worker.global_worker.submit_task(
function_id,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
is_actor_checkpoint_method=is_actor_checkpoint_method,
actor_creation_dummy_object_id=(self._ray_actor_creation_dummy_object_id),
execution_dependencies=execution_dependencies,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
self._ray_actor_cursor = object_ids.pop()
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
|
def _actor_method_call(self, method_name, args=None, kwargs=None, dependency=None):
"""Method execution stub for an actor handle.
This is the function that executes when
`actor.method_name.remote(*args, **kwargs)` is called. Instead of
executing locally, the method is packaged as a task and scheduled
to the remote actor instance.
Args:
self: The local actor handle.
method_name: The name of the actor method to execute.
args: A list of arguments for the actor method.
kwargs: A dictionary of keyword arguments for the actor method.
dependency: The object ID that this method is dependent on.
Defaults to None, for no dependencies. Most tasks should
pass in the dummy object returned by the preceding task.
Some tasks, such as checkpoint and terminate methods, have
no dependencies.
Returns:
object_ids: A list of object IDs returned by the remote actor
method.
"""
ray.worker.check_connected()
ray.worker.check_main_thread()
function_signature = self._ray_method_signatures[method_name]
if args is None:
args = []
if kwargs is None:
kwargs = {}
args = signature.extend_args(function_signature, args, kwargs)
# Execute functions locally if Ray is run in PYTHON_MODE
# Copy args to prevent the function from mutating them.
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
return getattr(
ray.worker.global_worker.actors[self._ray_actor_id], method_name
)(*copy.deepcopy(args))
# Add the execution dependency.
if dependency is None:
execution_dependencies = []
else:
execution_dependencies = [dependency]
is_actor_checkpoint_method = method_name == "__ray_checkpoint__"
function_id = compute_actor_method_function_id(self._ray_class_name, method_name)
object_ids = ray.worker.global_worker.submit_task(
function_id,
args,
actor_id=self._ray_actor_id,
actor_handle_id=self._ray_actor_handle_id,
actor_counter=self._ray_actor_counter,
is_actor_checkpoint_method=is_actor_checkpoint_method,
execution_dependencies=execution_dependencies,
)
# Update the actor counter and cursor to reflect the most recent
# invocation.
self._ray_actor_counter += 1
self._ray_actor_cursor = object_ids.pop()
# The last object returned is the dummy object that should be
# passed in to the next actor method. Do not return it to the user.
if len(object_ids) == 1:
return object_ids[0]
elif len(object_ids) > 1:
return object_ids
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote().
if (
ray.worker.global_worker.connected
and self._ray_actor_handle_id.id() == ray.worker.NIL_ACTOR_ID
):
# TODO(rkn): Should we be passing in the actor cursor as a
# dependency here?
self._actor_method_call("__ray_terminate__", args=[self._ray_actor_id.id()])
|
def __del__(self):
"""Kill the worker that is running this actor."""
# TODO(swang): Also clean up forked actor handles.
# Kill the worker if this is the original actor handle, created
# with Class.remote().
if (
ray.worker.global_worker.connected
and self._ray_actor_handle_id.id() == ray.worker.NIL_ACTOR_ID
):
self._actor_method_call("__ray_terminate__", args=[self._ray_actor_id.id()])
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def actor_handle_from_class(
Class, class_id, actor_creation_resources, checkpoint_interval, actor_method_cpus
):
class_name = Class.__name__.encode("ascii")
actor_handle_class = make_actor_handle_class(class_name)
exported = []
class ActorHandle(actor_handle_class):
@classmethod
def remote(cls, *args, **kwargs):
if ray.worker.global_worker.mode is None:
raise Exception(
"Actors cannot be created before ray.init() has been called."
)
actor_id = random_actor_id()
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(ray.worker.NIL_ACTOR_ID)
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# The number of actor method invocations that we've called so far.
actor_counter = 0
# Get the actor methods of the given class.
actor_methods = inspect.getmembers(
Class,
predicate=(
lambda x: (
inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x)
)
),
)
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
method_signatures = dict()
for k, v in actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, we there may not be much the user can do about
# it.
signature.check_signature_supported(v, warn=True)
method_signatures[k] = signature.extract_signature(v, ignore_first=True)
actor_method_names = [method_name for method_name, _ in actor_methods]
actor_method_num_return_vals = []
for _, method in actor_methods:
if hasattr(method, "__ray_num_return_vals__"):
actor_method_num_return_vals.append(method.__ray_num_return_vals__)
else:
actor_method_num_return_vals.append(1)
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
ray.worker.global_worker.actors[actor_id] = Class.__new__(Class)
else:
# Export the actor.
if not exported:
export_actor_class(
class_id,
Class,
actor_method_names,
actor_method_num_return_vals,
checkpoint_interval,
ray.worker.global_worker,
)
exported.append(0)
actor_cursor = export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
actor_creation_resources,
actor_method_cpus,
ray.worker.global_worker,
)
# Instantiate the actor handle.
actor_object = cls.__new__(cls)
actor_object._manual_init(
actor_id,
class_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
actor_cursor,
actor_creation_resources,
actor_method_cpus,
)
# Call __init__ as a remote function.
if "__init__" in actor_object._ray_actor_method_names:
actor_object._actor_method_call(
"__init__", args=args, kwargs=kwargs, dependency=actor_cursor
)
else:
print("WARNING: this object has no __init__ method.")
return actor_object
return ActorHandle
|
def actor_handle_from_class(Class, class_id, resources, checkpoint_interval):
class_name = Class.__name__.encode("ascii")
actor_handle_class = make_actor_handle_class(class_name)
exported = []
class ActorHandle(actor_handle_class):
@classmethod
def remote(cls, *args, **kwargs):
if ray.worker.global_worker.mode is None:
raise Exception(
"Actors cannot be created before ray.init() has been called."
)
actor_id = random_actor_id()
# The ID for this instance of ActorHandle. These should be unique
# across instances with the same _ray_actor_id.
actor_handle_id = ray.local_scheduler.ObjectID(ray.worker.NIL_ACTOR_ID)
# The actor cursor is a dummy object representing the most recent
# actor method invocation. For each subsequent method invocation,
# the current cursor should be added as a dependency, and then
# updated to reflect the new invocation.
actor_cursor = None
# The number of actor method invocations that we've called so far.
actor_counter = 0
# Get the actor methods of the given class.
actor_methods = inspect.getmembers(
Class,
predicate=(
lambda x: (
inspect.isfunction(x) or inspect.ismethod(x) or is_cython(x)
)
),
)
# Extract the signatures of each of the methods. This will be used
# to catch some errors if the methods are called with inappropriate
# arguments.
method_signatures = dict()
for k, v in actor_methods:
# Print a warning message if the method signature is not
# supported. We don't raise an exception because if the actor
# inherits from a class that has a method whose signature we
# don't support, we there may not be much the user can do about
# it.
signature.check_signature_supported(v, warn=True)
method_signatures[k] = signature.extract_signature(v, ignore_first=True)
actor_method_names = [method_name for method_name, _ in actor_methods]
actor_method_num_return_vals = []
for _, method in actor_methods:
if hasattr(method, "__ray_num_return_vals__"):
actor_method_num_return_vals.append(method.__ray_num_return_vals__)
else:
actor_method_num_return_vals.append(1)
# Do not export the actor class or the actor if run in PYTHON_MODE
# Instead, instantiate the actor locally and add it to
# global_worker's dictionary
if ray.worker.global_worker.mode == ray.PYTHON_MODE:
ray.worker.global_worker.actors[actor_id] = Class.__new__(Class)
else:
# Export the actor.
if not exported:
export_actor_class(
class_id,
Class,
actor_method_names,
actor_method_num_return_vals,
checkpoint_interval,
ray.worker.global_worker,
)
exported.append(0)
export_actor(
actor_id,
class_id,
class_name,
actor_method_names,
actor_method_num_return_vals,
resources,
ray.worker.global_worker,
)
# Instantiate the actor handle.
actor_object = cls.__new__(cls)
actor_object._manual_init(
actor_id,
actor_handle_id,
actor_cursor,
actor_counter,
actor_method_names,
actor_method_num_return_vals,
method_signatures,
checkpoint_interval,
)
# Call __init__ as a remote function.
if "__init__" in actor_object._ray_actor_method_names:
actor_object._actor_method_call("__init__", args=args, kwargs=kwargs)
else:
print("WARNING: this object has no __init__ method.")
return actor_object
return ActorHandle
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def make_actor(cls, resources, checkpoint_interval, actor_method_cpus):
if checkpoint_interval == 0:
raise Exception("checkpoint_interval must be greater than 0.")
# Modify the class to have an additional method that will be used for
# terminating the worker.
class Class(cls):
def __ray_terminate__(self, actor_id):
# Record that this actor has been removed so that if this node
# dies later, the actor won't be recreated. Alternatively, we could
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(
b"Actor:" + actor_id, "removed", True
)
# Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
import os
os._exit(0)
def __ray_save_checkpoint__(self):
if hasattr(self, "__ray_save__"):
object_to_serialize = self.__ray_save__()
else:
object_to_serialize = self
return pickle.dumps(object_to_serialize)
@classmethod
def __ray_restore_from_checkpoint__(cls, pickled_checkpoint):
checkpoint = pickle.loads(pickled_checkpoint)
if hasattr(cls, "__ray_restore__"):
actor_object = cls.__new__(cls)
actor_object.__ray_restore__(checkpoint)
else:
# TODO(rkn): It's possible that this will cause problems. When
# you unpickle the same object twice, the two objects will not
# have the same class.
actor_object = checkpoint
return actor_object
def __ray_checkpoint__(self):
"""Save a checkpoint.
This task saves the current state of the actor, the current task
frontier according to the local scheduler, and the checkpoint index
(number of tasks executed so far).
"""
worker = ray.worker.global_worker
checkpoint_index = worker.actor_task_counter
# Get the state to save.
checkpoint = self.__ray_save_checkpoint__()
# Get the current task frontier, per actor handle.
# NOTE(swang): This only includes actor handles that the local
# scheduler has seen. Handle IDs for which no task has yet reached
# the local scheduler will not be included, and may not be runnable
# on checkpoint resumption.
actor_id = ray.local_scheduler.ObjectID(worker.actor_id)
frontier = worker.local_scheduler_client.get_actor_frontier(actor_id)
# Save the checkpoint in Redis. TODO(rkn): Checkpoints
# should not be stored in Redis. Fix this.
set_actor_checkpoint(
worker, worker.actor_id, checkpoint_index, checkpoint, frontier
)
def __ray_checkpoint_restore__(self):
"""Restore a checkpoint.
This task looks for a saved checkpoint and if found, restores the
state of the actor, the task frontier in the local scheduler, and
the checkpoint index (number of tasks executed so far).
Returns:
A bool indicating whether a checkpoint was resumed.
"""
worker = ray.worker.global_worker
# Get the most recent checkpoint stored, if any.
checkpoint_index, checkpoint, frontier = get_actor_checkpoint(
worker, worker.actor_id
)
# Try to resume from the checkpoint.
checkpoint_resumed = False
if checkpoint_index is not None:
# Load the actor state from the checkpoint.
worker.actors[worker.actor_id] = (
worker.actor_class.__ray_restore_from_checkpoint__(checkpoint)
)
# Set the number of tasks executed so far.
worker.actor_task_counter = checkpoint_index
# Set the actor frontier in the local scheduler.
worker.local_scheduler_client.set_actor_frontier(frontier)
checkpoint_resumed = True
return checkpoint_resumed
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
class_id = random_actor_class_id()
return actor_handle_from_class(
Class, class_id, resources, checkpoint_interval, actor_method_cpus
)
|
def make_actor(cls, resources, checkpoint_interval):
# Print warning if this actor requires custom resources.
for resource_name in resources:
if resource_name not in ["CPU", "GPU"]:
raise Exception(
"Currently only GPU resources can be used for actor placement."
)
if checkpoint_interval == 0:
raise Exception("checkpoint_interval must be greater than 0.")
# Modify the class to have an additional method that will be used for
# terminating the worker.
class Class(cls):
def __ray_terminate__(self, actor_id):
# Record that this actor has been removed so that if this node
# dies later, the actor won't be recreated. Alternatively, we could
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(
b"Actor:" + actor_id, "removed", True
)
# Release the GPUs that this worker was using.
if len(ray.get_gpu_ids()) > 0:
release_gpus_in_use(
ray.worker.global_worker.driver_id,
ray.worker.global_worker.local_scheduler_id,
ray.get_gpu_ids(),
ray.worker.global_worker.redis_client,
)
# Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
import os
os._exit(0)
def __ray_save_checkpoint__(self):
if hasattr(self, "__ray_save__"):
object_to_serialize = self.__ray_save__()
else:
object_to_serialize = self
return pickle.dumps(object_to_serialize)
@classmethod
def __ray_restore_from_checkpoint__(cls, pickled_checkpoint):
checkpoint = pickle.loads(pickled_checkpoint)
if hasattr(cls, "__ray_restore__"):
actor_object = cls.__new__(cls)
actor_object.__ray_restore__(checkpoint)
else:
# TODO(rkn): It's possible that this will cause problems. When
# you unpickle the same object twice, the two objects will not
# have the same class.
actor_object = checkpoint
return actor_object
def __ray_checkpoint__(self):
"""Save a checkpoint.
This task saves the current state of the actor, the current task
frontier according to the local scheduler, and the checkpoint index
(number of tasks executed so far).
"""
worker = ray.worker.global_worker
checkpoint_index = worker.actor_task_counter
# Get the state to save.
checkpoint = self.__ray_save_checkpoint__()
# Get the current task frontier, per actor handle.
# NOTE(swang): This only includes actor handles that the local
# scheduler has seen. Handle IDs for which no task has yet reached
# the local scheduler will not be included, and may not be runnable
# on checkpoint resumption.
actor_id = ray.local_scheduler.ObjectID(worker.actor_id)
frontier = worker.local_scheduler_client.get_actor_frontier(actor_id)
# Save the checkpoint in Redis. TODO(rkn): Checkpoints
# should not be stored in Redis. Fix this.
set_actor_checkpoint(
worker, worker.actor_id, checkpoint_index, checkpoint, frontier
)
def __ray_checkpoint_restore__(self):
"""Restore a checkpoint.
This task looks for a saved checkpoint and if found, restores the
state of the actor, the task frontier in the local scheduler, and
the checkpoint index (number of tasks executed so far).
Returns:
A bool indicating whether a checkpoint was resumed.
"""
worker = ray.worker.global_worker
# Get the most recent checkpoint stored, if any.
checkpoint_index, checkpoint, frontier = get_actor_checkpoint(
worker, worker.actor_id
)
# Try to resume from the checkpoint.
checkpoint_resumed = False
if checkpoint_index is not None:
# Load the actor state from the checkpoint.
worker.actors[worker.actor_id] = (
worker.actor_class.__ray_restore_from_checkpoint__(checkpoint)
)
# Set the number of tasks executed so far.
worker.actor_task_counter = checkpoint_index
# Set the actor frontier in the local scheduler.
worker.local_scheduler_client.set_actor_frontier(frontier)
checkpoint_resumed = True
return checkpoint_resumed
Class.__module__ = cls.__module__
Class.__name__ = cls.__name__
class_id = random_actor_class_id()
return actor_handle_from_class(Class, class_id, resources, checkpoint_interval)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def __ray_terminate__(self, actor_id):
# Record that this actor has been removed so that if this node
# dies later, the actor won't be recreated. Alternatively, we could
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(b"Actor:" + actor_id, "removed", True)
# Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
import os
os._exit(0)
|
def __ray_terminate__(self, actor_id):
# Record that this actor has been removed so that if this node
# dies later, the actor won't be recreated. Alternatively, we could
# remove the actor key from Redis here.
ray.worker.global_worker.redis_client.hset(b"Actor:" + actor_id, "removed", True)
# Release the GPUs that this worker was using.
if len(ray.get_gpu_ids()) > 0:
release_gpus_in_use(
ray.worker.global_worker.driver_id,
ray.worker.global_worker.local_scheduler_id,
ray.get_gpu_ids(),
ray.worker.global_worker.redis_client,
)
# Disconnect the worker from the local scheduler. The point of this
# is so that when the worker kills itself below, the local
# scheduler won't push an error message to the driver.
ray.worker.global_worker.local_scheduler_client.disconnect()
import os
os._exit(0)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _task_table(self, task_id):
"""Fetch and parse the task table information for a single task ID.
Args:
task_id_binary: A string of bytes with the task ID to get
information about.
Returns:
A dictionary with information about the task ID in question.
TASK_STATUS_MAPPING should be used to parse the "State" field
into a human-readable string.
"""
task_table_response = self._execute_command(
task_id, "RAY.TASK_TABLE_GET", task_id.id()
)
if task_table_response is None:
raise Exception(
"There is no entry for task ID {} in the task table.".format(
binary_to_hex(task_id.id())
)
)
task_table_message = TaskReply.GetRootAsTaskReply(task_table_response, 0)
task_spec = task_table_message.TaskSpec()
task_spec = ray.local_scheduler.task_from_string(task_spec)
task_spec_info = {
"DriverID": binary_to_hex(task_spec.driver_id().id()),
"TaskID": binary_to_hex(task_spec.task_id().id()),
"ParentTaskID": binary_to_hex(task_spec.parent_task_id().id()),
"ParentCounter": task_spec.parent_counter(),
"ActorID": binary_to_hex(task_spec.actor_id().id()),
"ActorCreationID": binary_to_hex(task_spec.actor_creation_id().id()),
"ActorCreationDummyObjectID": binary_to_hex(
task_spec.actor_creation_dummy_object_id().id()
),
"ActorCounter": task_spec.actor_counter(),
"FunctionID": binary_to_hex(task_spec.function_id().id()),
"Args": task_spec.arguments(),
"ReturnObjectIDs": task_spec.returns(),
"RequiredResources": task_spec.required_resources(),
}
execution_dependencies_message = (
TaskExecutionDependencies.GetRootAsTaskExecutionDependencies(
task_table_message.ExecutionDependencies(), 0
)
)
execution_dependencies = [
ray.local_scheduler.ObjectID(
execution_dependencies_message.ExecutionDependencies(i)
)
for i in range(execution_dependencies_message.ExecutionDependenciesLength())
]
# TODO(rkn): The return fields ExecutionDependenciesString and
# ExecutionDependencies are redundant, so we should remove
# ExecutionDependencies. However, it is currently used in monitor.py.
return {
"State": task_table_message.State(),
"LocalSchedulerID": binary_to_hex(task_table_message.LocalSchedulerId()),
"ExecutionDependenciesString": task_table_message.ExecutionDependencies(),
"ExecutionDependencies": execution_dependencies,
"SpillbackCount": task_table_message.SpillbackCount(),
"TaskSpec": task_spec_info,
}
|
def _task_table(self, task_id):
"""Fetch and parse the task table information for a single task ID.
Args:
task_id_binary: A string of bytes with the task ID to get
information about.
Returns:
A dictionary with information about the task ID in question.
TASK_STATUS_MAPPING should be used to parse the "State" field
into a human-readable string.
"""
task_table_response = self._execute_command(
task_id, "RAY.TASK_TABLE_GET", task_id.id()
)
if task_table_response is None:
raise Exception(
"There is no entry for task ID {} in the task table.".format(
binary_to_hex(task_id.id())
)
)
task_table_message = TaskReply.GetRootAsTaskReply(task_table_response, 0)
task_spec = task_table_message.TaskSpec()
task_spec = ray.local_scheduler.task_from_string(task_spec)
task_spec_info = {
"DriverID": binary_to_hex(task_spec.driver_id().id()),
"TaskID": binary_to_hex(task_spec.task_id().id()),
"ParentTaskID": binary_to_hex(task_spec.parent_task_id().id()),
"ParentCounter": task_spec.parent_counter(),
"ActorID": binary_to_hex(task_spec.actor_id().id()),
"ActorCounter": task_spec.actor_counter(),
"FunctionID": binary_to_hex(task_spec.function_id().id()),
"Args": task_spec.arguments(),
"ReturnObjectIDs": task_spec.returns(),
"RequiredResources": task_spec.required_resources(),
}
return {
"State": task_table_message.State(),
"LocalSchedulerID": binary_to_hex(task_table_message.LocalSchedulerId()),
"ExecutionDependenciesString": task_table_message.ExecutionDependencies(),
"SpillbackCount": task_table_message.SpillbackCount(),
"TaskSpec": task_spec_info,
}
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def driver_removed_handler(self, unused_channel, data):
"""Handle a notification that a driver has been removed.
This releases any GPU resources that were reserved for that driver in
Redis.
"""
message = DriverTableMessage.GetRootAsDriverTableMessage(data, 0)
driver_id = message.DriverId()
log.info("Driver {} has been removed.".format(binary_to_hex(driver_id)))
self._clean_up_entries_for_driver(driver_id)
|
def driver_removed_handler(self, unused_channel, data):
"""Handle a notification that a driver has been removed.
This releases any GPU resources that were reserved for that driver in
Redis.
"""
message = DriverTableMessage.GetRootAsDriverTableMessage(data, 0)
driver_id = message.DriverId()
log.info("Driver {} has been removed.".format(binary_to_hex(driver_id)))
# Get a list of the local schedulers that have not been deleted.
local_schedulers = ray.global_state.local_schedulers()
self._clean_up_entries_for_driver(driver_id)
# Release any GPU resources that have been reserved for this driver in
# Redis.
for local_scheduler in local_schedulers:
if local_scheduler.get("GPU", 0) > 0:
local_scheduler_id = local_scheduler["DBClientID"]
num_gpus_returned = 0
# Perform a transaction to return the GPUs.
with self.redis.pipeline() as pipe:
while True:
try:
# If this key is changed before the transaction
# below (the multi/exec block), then the
# transaction will not take place.
pipe.watch(local_scheduler_id)
result = pipe.hget(local_scheduler_id, "gpus_in_use")
gpus_in_use = (
dict()
if result is None
else json.loads(result.decode("ascii"))
)
driver_id_hex = binary_to_hex(driver_id)
if driver_id_hex in gpus_in_use:
num_gpus_returned = gpus_in_use.pop(driver_id_hex)
pipe.multi()
pipe.hset(
local_scheduler_id, "gpus_in_use", json.dumps(gpus_in_use)
)
pipe.execute()
# If a WatchError is not raise, then the operations
# should have gone through atomically.
break
except redis.WatchError:
# Another client must have changed the watched key
# between the time we started WATCHing it and the
# pipeline's execution. We should just retry.
continue
log.info(
"Driver {} is returning GPU IDs {} to local scheduler {}.".format(
binary_to_hex(driver_id), num_gpus_returned, local_scheduler_id
)
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def run(self):
"""Run the monitor.
This function loops forever, checking for messages about dead database
clients and cleaning up state accordingly.
"""
# Initialize the subscription channel.
self.subscribe(DB_CLIENT_TABLE_NAME)
self.subscribe(LOCAL_SCHEDULER_INFO_CHANNEL)
self.subscribe(PLASMA_MANAGER_HEARTBEAT_CHANNEL)
self.subscribe(DRIVER_DEATH_CHANNEL)
# Scan the database table for dead database clients. NOTE: This must be
# called before reading any messages from the subscription channel.
# This ensures that we start in a consistent state, since we may have
# missed notifications that were sent before we connected to the
# subscription channel.
self.scan_db_client_table()
# If there were any dead clients at startup, clean up the associated
# state in the state tables.
if len(self.dead_local_schedulers) > 0:
self.cleanup_task_table()
if len(self.dead_plasma_managers) > 0:
self.cleanup_object_table()
log.debug(
"{} dead local schedulers, {} plasma managers total, {} "
"dead plasma managers".format(
len(self.dead_local_schedulers),
(len(self.live_plasma_managers) + len(self.dead_plasma_managers)),
len(self.dead_plasma_managers),
)
)
# Handle messages from the subscription channels.
while True:
# Process autoscaling actions
if self.autoscaler:
self.autoscaler.update()
# Record how many dead local schedulers and plasma managers we had
# at the beginning of this round.
num_dead_local_schedulers = len(self.dead_local_schedulers)
num_dead_plasma_managers = len(self.dead_plasma_managers)
# Process a round of messages.
self.process_messages()
# If any new local schedulers or plasma managers were marked as
# dead in this round, clean up the associated state.
if len(self.dead_local_schedulers) > num_dead_local_schedulers:
self.cleanup_task_table()
if len(self.dead_plasma_managers) > num_dead_plasma_managers:
self.cleanup_object_table()
# Handle plasma managers that timed out during this round.
plasma_manager_ids = list(self.live_plasma_managers.keys())
for plasma_manager_id in plasma_manager_ids:
if (
(self.live_plasma_managers[plasma_manager_id])
>= ray._config.num_heartbeats_timeout()
):
log.warn("Timed out {}".format(PLASMA_MANAGER_CLIENT_TYPE))
# Remove the plasma manager from the managers whose
# heartbeats we're tracking.
del self.live_plasma_managers[plasma_manager_id]
# Remove the plasma manager from the db_client table. The
# corresponding state in the object table will be cleaned
# up once we receive the notification for this db_client
# deletion.
self.redis.execute_command("RAY.DISCONNECT", plasma_manager_id)
# Increment the number of heartbeats that we've missed from each
# plasma manager.
for plasma_manager_id in self.live_plasma_managers:
self.live_plasma_managers[plasma_manager_id] += 1
# Wait for a heartbeat interval before processing the next round of
# messages.
time.sleep(ray._config.heartbeat_timeout_milliseconds() * 1e-3)
|
def run(self):
"""Run the monitor.
This function loops forever, checking for messages about dead database
clients and cleaning up state accordingly.
"""
# Initialize the subscription channel.
self.subscribe(DB_CLIENT_TABLE_NAME)
self.subscribe(LOCAL_SCHEDULER_INFO_CHANNEL)
self.subscribe(PLASMA_MANAGER_HEARTBEAT_CHANNEL)
self.subscribe(DRIVER_DEATH_CHANNEL)
# Scan the database table for dead database clients. NOTE: This must be
# called before reading any messages from the subscription channel.
# This ensures that we start in a consistent state, since we may have
# missed notifications that were sent before we connected to the
# subscription channel.
self.scan_db_client_table()
# If there were any dead clients at startup, clean up the associated
# state in the state tables.
if len(self.dead_local_schedulers) > 0:
self.cleanup_task_table()
self.cleanup_actors()
if len(self.dead_plasma_managers) > 0:
self.cleanup_object_table()
log.debug(
"{} dead local schedulers, {} plasma managers total, {} "
"dead plasma managers".format(
len(self.dead_local_schedulers),
(len(self.live_plasma_managers) + len(self.dead_plasma_managers)),
len(self.dead_plasma_managers),
)
)
# Handle messages from the subscription channels.
while True:
# Process autoscaling actions
if self.autoscaler:
self.autoscaler.update()
# Record how many dead local schedulers and plasma managers we had
# at the beginning of this round.
num_dead_local_schedulers = len(self.dead_local_schedulers)
num_dead_plasma_managers = len(self.dead_plasma_managers)
# Process a round of messages.
self.process_messages()
# If any new local schedulers or plasma managers were marked as
# dead in this round, clean up the associated state.
if len(self.dead_local_schedulers) > num_dead_local_schedulers:
self.cleanup_task_table()
self.cleanup_actors()
if len(self.dead_plasma_managers) > num_dead_plasma_managers:
self.cleanup_object_table()
# Handle plasma managers that timed out during this round.
plasma_manager_ids = list(self.live_plasma_managers.keys())
for plasma_manager_id in plasma_manager_ids:
if (
(self.live_plasma_managers[plasma_manager_id])
>= ray._config.num_heartbeats_timeout()
):
log.warn("Timed out {}".format(PLASMA_MANAGER_CLIENT_TYPE))
# Remove the plasma manager from the managers whose
# heartbeats we're tracking.
del self.live_plasma_managers[plasma_manager_id]
# Remove the plasma manager from the db_client table. The
# corresponding state in the object table will be cleaned
# up once we receive the notification for this db_client
# deletion.
self.redis.execute_command("RAY.DISCONNECT", plasma_manager_id)
# Increment the number of heartbeats that we've missed from each
# plasma manager.
for plasma_manager_id in self.live_plasma_managers:
self.live_plasma_managers[plasma_manager_id] += 1
# Wait for a heartbeat interval before processing the next round of
# messages.
time.sleep(ray._config.heartbeat_timeout_milliseconds() * 1e-3)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def register_trainable(name, trainable):
"""Register a trainable function or class.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable clsas. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
from ray.tune.trainable import Trainable, wrap_function
if isinstance(trainable, FunctionType):
trainable = wrap_function(trainable)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable", trainable)
_default_registry.register(TRAINABLE_CLASS, name, trainable)
|
def register_trainable(name, trainable):
"""Register a trainable function or class.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable clsas. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
if isinstance(trainable, FunctionType):
trainable = wrap_function(trainable)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable", trainable)
_default_registry.register(TRAINABLE_CLASS, name, trainable)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def register(self, category, key, value):
if category not in KNOWN_CATEGORIES:
from ray.tune import TuneError
raise TuneError(
"Unknown category {} not among {}".format(category, KNOWN_CATEGORIES)
)
self._all_objects[(category, key)] = value
|
def register(self, category, key, value):
if category not in KNOWN_CATEGORIES:
raise TuneError(
"Unknown category {} not among {}".format(category, KNOWN_CATEGORIES)
)
self._all_objects[(category, key)] = value
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def __init__(
self,
trainable_name,
config=None,
local_dir=DEFAULT_RESULTS_DIR,
experiment_tag=None,
resources=Resources(cpu=1, gpu=0),
stopping_criterion=None,
checkpoint_freq=0,
restore_path=None,
upload_dir=None,
max_failures=0,
):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
if not ray.tune.registry._default_registry.contains(
ray.tune.registry.TRAINABLE_CLASS, trainable_name
):
raise TuneError("Unknown trainable: " + trainable_name)
if stopping_criterion:
for k in stopping_criterion:
if k not in TrainingResult._fields:
raise TuneError(
"Stopping condition key `{}` must be one of {}".format(
k, TrainingResult._fields
)
)
# Trial config
self.trainable_name = trainable_name
self.config = config or {}
self.local_dir = local_dir
self.experiment_tag = experiment_tag
self.resources = resources
self.stopping_criterion = stopping_criterion or {}
self.checkpoint_freq = checkpoint_freq
self.upload_dir = upload_dir
self.verbose = True
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = None
self._checkpoint_path = restore_path
self._checkpoint_obj = None
self.runner = None
self.status = Trial.PENDING
self.location = None
self.logdir = None
self.result_logger = None
self.last_debug = 0
self.trial_id = binary_to_hex(random_string())[:8]
self.error_file = None
self.num_failures = 0
|
def __init__(
self,
trainable_name,
config=None,
local_dir=DEFAULT_RESULTS_DIR,
experiment_tag=None,
resources=Resources(cpu=1, gpu=0),
stopping_criterion=None,
checkpoint_freq=0,
restore_path=None,
upload_dir=None,
max_failures=0,
):
"""Initialize a new trial.
The args here take the same meaning as the command line flags defined
in ray.tune.config_parser.
"""
if not _default_registry.contains(TRAINABLE_CLASS, trainable_name):
raise TuneError("Unknown trainable: " + trainable_name)
if stopping_criterion:
for k in stopping_criterion:
if k not in TrainingResult._fields:
raise TuneError(
"Stopping condition key `{}` must be one of {}".format(
k, TrainingResult._fields
)
)
# Trial config
self.trainable_name = trainable_name
self.config = config or {}
self.local_dir = local_dir
self.experiment_tag = experiment_tag
self.resources = resources
self.stopping_criterion = stopping_criterion or {}
self.checkpoint_freq = checkpoint_freq
self.upload_dir = upload_dir
self.verbose = True
self.max_failures = max_failures
# Local trial state that is updated during the run
self.last_result = None
self._checkpoint_path = restore_path
self._checkpoint_obj = None
self.runner = None
self.status = Trial.PENDING
self.location = None
self.logdir = None
self.result_logger = None
self.last_debug = 0
self.trial_id = binary_to_hex(random_string())[:8]
self.error_file = None
self.num_failures = 0
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _setup_runner(self):
self.status = Trial.RUNNING
trainable_cls = ray.tune.registry.get_registry().get(
ray.tune.registry.TRAINABLE_CLASS, self.trainable_name
)
cls = ray.remote(
num_cpus=self.resources.driver_cpu_limit,
num_gpus=self.resources.driver_gpu_limit,
)(trainable_cls)
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir,
)
self.result_logger = UnifiedLogger(self.config, self.logdir, self.upload_dir)
remote_logdir = self.logdir
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
self.runner = cls.remote(
config=self.config,
registry=ray.tune.registry.get_registry(),
logger_creator=logger_creator,
)
|
def _setup_runner(self):
self.status = Trial.RUNNING
trainable_cls = get_registry().get(TRAINABLE_CLASS, self.trainable_name)
cls = ray.remote(
num_cpus=self.resources.driver_cpu_limit,
num_gpus=self.resources.driver_gpu_limit,
)(trainable_cls)
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir,
)
self.result_logger = UnifiedLogger(self.config, self.logdir, self.upload_dir)
remote_logdir = self.logdir
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
if not os.path.exists(remote_logdir):
os.makedirs(remote_logdir)
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
self.runner = cls.remote(
config=self.config, registry=get_registry(), logger_creator=logger_creator
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def __init__(self):
"""Initialize a Worker object."""
# The functions field is a dictionary that maps a driver ID to a
# dictionary of functions that have been registered for that driver
# (this inner dictionary maps function IDs to a tuple of the function
# name and the function itself). This should only be used on workers
# that execute remote functions.
self.functions = collections.defaultdict(lambda: {})
# The function_properties field is a dictionary that maps a driver ID
# to a dictionary of functions that have been registered for that
# driver (this inner dictionary maps function IDs to a tuple of the
# number of values returned by that function, the number of CPUs
# required by that function, and the number of GPUs required by that
# function). This is used when submitting a function (which can be done
# both on workers and on drivers).
self.function_properties = collections.defaultdict(lambda: {})
# This is a dictionary mapping driver ID to a dictionary that maps
# remote function IDs for that driver to a counter of the number of
# times that remote function has been executed on this worker. The
# counter is incremented every time the function is executed on this
# worker. When the counter reaches the maximum number of executions
# allowed for a particular function, the worker is killed.
self.num_task_executions = collections.defaultdict(lambda: {})
self.connected = False
self.mode = None
self.cached_remote_functions_and_actors = []
self.cached_functions_to_run = []
self.fetch_and_register_actor = None
self.make_actor = None
self.actors = {}
self.actor_task_counter = 0
# A set of all of the actor class keys that have been imported by the
# import thread. It is safe to convert this worker into an actor of
# these types.
self.imported_actor_classes = set()
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
|
def __init__(self):
"""Initialize a Worker object."""
# The functions field is a dictionary that maps a driver ID to a
# dictionary of functions that have been registered for that driver
# (this inner dictionary maps function IDs to a tuple of the function
# name and the function itself). This should only be used on workers
# that execute remote functions.
self.functions = collections.defaultdict(lambda: {})
# The function_properties field is a dictionary that maps a driver ID
# to a dictionary of functions that have been registered for that
# driver (this inner dictionary maps function IDs to a tuple of the
# number of values returned by that function, the number of CPUs
# required by that function, and the number of GPUs required by that
# function). This is used when submitting a function (which can be done
# both on workers and on drivers).
self.function_properties = collections.defaultdict(lambda: {})
# This is a dictionary mapping driver ID to a dictionary that maps
# remote function IDs for that driver to a counter of the number of
# times that remote function has been executed on this worker. The
# counter is incremented every time the function is executed on this
# worker. When the counter reaches the maximum number of executions
# allowed for a particular function, the worker is killed.
self.num_task_executions = collections.defaultdict(lambda: {})
self.connected = False
self.mode = None
self.cached_remote_functions_and_actors = []
self.cached_functions_to_run = []
self.fetch_and_register_actor = None
self.make_actor = None
self.actors = {}
self.actor_task_counter = 0
# The number of threads Plasma should use when putting an object in the
# object store.
self.memcopy_threads = 12
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray.utils.get_cuda_visible_devices()
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def put_object(self, object_id, value):
"""Put value in the local object store with object id objectid.
This assumes that the value for objectid has not yet been placed in the
local object store.
Args:
object_id (object_id.ObjectID): The object ID of the value to be
put.
value: The value to put in the object store.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ray.local_scheduler.ObjectID):
raise Exception(
"Calling 'put' on an ObjectID is not allowed "
"(similarly, returning an ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ObjectID in a list and "
"call 'put' on it (or return it)."
)
if isinstance(value, ray.actor.ActorHandleParent):
raise Exception(
"Calling 'put' on an actor handle is currently "
"not allowed (similarly, returning an actor "
"handle from a remote function is not allowed)."
)
# Serialize and put the object in the object store.
try:
self.store_and_register(object_id, value)
except pyarrow.PlasmaObjectExists as e:
# The object already exists in the object store, so there is no
# need to add it again. TODO(rkn): We need to compare the hashes
# and make sure that the objects are in fact the same. We also
# should return an error code to the caller instead of printing a
# message.
print(
"The object with ID {} already exists in the object store.".format(
object_id
)
)
|
def put_object(self, object_id, value):
"""Put value in the local object store with object id objectid.
This assumes that the value for objectid has not yet been placed in the
local object store.
Args:
object_id (object_id.ObjectID): The object ID of the value to be
put.
value: The value to put in the object store.
Raises:
Exception: An exception is raised if the attempt to store the
object fails. This can happen if there is already an object
with the same ID in the object store or if the object store is
full.
"""
# Make sure that the value is not an object ID.
if isinstance(value, ray.local_scheduler.ObjectID):
raise Exception(
"Calling 'put' on an ObjectID is not allowed "
"(similarly, returning an ObjectID from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ObjectID in a list and "
"call 'put' on it (or return it)."
)
if isinstance(value, ray.actor.ActorHandleParent):
raise Exception(
"Calling 'put' on an actor handle is currently "
"not allowed (similarly, returning an actor "
"handle from a remote function is not allowed)."
)
# Serialize and put the object in the object store.
try:
self.store_and_register(object_id, value)
except pyarrow.PlasmaObjectExists as e:
# The object already exists in the object store, so there is no
# need to add it again. TODO(rkn): We need to compare the hashes
# and make sure that the objects are in fact the same. We also
# should return an error code to the caller instead of printing a
# message.
print("This object already exists in the object store.")
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def submit_task(
self,
function_id,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
is_actor_checkpoint_method=False,
actor_creation_id=None,
actor_creation_dummy_object_id=None,
execution_dependencies=None,
):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with ID
function_id with arguments args. Retrieve object IDs for the outputs of
the function from the scheduler and immediately return them.
Args:
function_id: The ID of the function to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objecs.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
is_actor_checkpoint_method: True if this is an actor checkpoint
task and false otherwise.
actor_creation_id: The ID of the actor to create, if this is an
actor creation task.
actor_creation_dummy_object_id: If this task is an actor method,
then this argument is the dummy object ID associated with the
actor creation task for the corresponding actor.
execution_dependencies: The execution dependencies for this task.
Returns:
The return object IDs for this task.
"""
with log_span("ray:submit_task", worker=self):
check_main_thread()
if actor_id is None:
assert actor_handle_id is None
actor_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
actor_handle_id = ray.local_scheduler.ObjectID(NIL_ACTOR_HANDLE_ID)
else:
assert actor_handle_id is not None
if actor_creation_id is None:
actor_creation_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
if actor_creation_dummy_object_id is None:
actor_creation_dummy_object_id = ray.local_scheduler.ObjectID(NIL_ID)
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_local_scheduler = []
for arg in args:
if isinstance(arg, ray.local_scheduler.ObjectID):
args_for_local_scheduler.append(arg)
elif isinstance(arg, ray.actor.ActorHandleParent):
args_for_local_scheduler.append(put(ray.actor.wrap_actor_handle(arg)))
elif ray.local_scheduler.check_simple_value(arg):
args_for_local_scheduler.append(arg)
else:
args_for_local_scheduler.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
# Look up the various function properties.
function_properties = self.function_properties[self.task_driver_id.id()][
function_id.id()
]
# Submit the task to local scheduler.
task = ray.local_scheduler.Task(
self.task_driver_id,
ray.local_scheduler.ObjectID(function_id.id()),
args_for_local_scheduler,
function_properties.num_return_vals,
self.current_task_id,
self.task_index,
actor_creation_id,
actor_creation_dummy_object_id,
actor_id,
actor_handle_id,
actor_counter,
is_actor_checkpoint_method,
execution_dependencies,
function_properties.resources,
)
# Increment the worker's task index to track how many tasks have
# been submitted by the current task so far.
self.task_index += 1
self.local_scheduler_client.submit(task)
return task.returns()
|
def submit_task(
self,
function_id,
args,
actor_id=None,
actor_handle_id=None,
actor_counter=0,
is_actor_checkpoint_method=False,
execution_dependencies=None,
):
"""Submit a remote task to the scheduler.
Tell the scheduler to schedule the execution of the function with ID
function_id with arguments args. Retrieve object IDs for the outputs of
the function from the scheduler and immediately return them.
Args:
function_id: The ID of the function to execute.
args: The arguments to pass into the function. Arguments can be
object IDs or they can be values. If they are values, they must
be serializable objecs.
actor_id: The ID of the actor that this task is for.
actor_counter: The counter of the actor task.
is_actor_checkpoint_method: True if this is an actor checkpoint
task and false otherwise.
"""
with log_span("ray:submit_task", worker=self):
check_main_thread()
if actor_id is None:
assert actor_handle_id is None
actor_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
actor_handle_id = ray.local_scheduler.ObjectID(NIL_ACTOR_ID)
else:
assert actor_handle_id is not None
# Put large or complex arguments that are passed by value in the
# object store first.
args_for_local_scheduler = []
for arg in args:
if isinstance(arg, ray.local_scheduler.ObjectID):
args_for_local_scheduler.append(arg)
elif isinstance(arg, ray.actor.ActorHandleParent):
args_for_local_scheduler.append(put(ray.actor.wrap_actor_handle(arg)))
elif ray.local_scheduler.check_simple_value(arg):
args_for_local_scheduler.append(arg)
else:
args_for_local_scheduler.append(put(arg))
# By default, there are no execution dependencies.
if execution_dependencies is None:
execution_dependencies = []
# Look up the various function properties.
function_properties = self.function_properties[self.task_driver_id.id()][
function_id.id()
]
# Submit the task to local scheduler.
task = ray.local_scheduler.Task(
self.task_driver_id,
ray.local_scheduler.ObjectID(function_id.id()),
args_for_local_scheduler,
function_properties.num_return_vals,
self.current_task_id,
self.task_index,
actor_id,
actor_handle_id,
actor_counter,
is_actor_checkpoint_method,
execution_dependencies,
function_properties.resources,
)
# Increment the worker's task index to track how many tasks have
# been submitted by the current task so far.
self.task_index += 1
self.local_scheduler_client.submit(task)
return task.returns()
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
Args:
task: The task to execute.
"""
function_id = task.function_id()
# TODO(rkn): It would be preferable for actor creation tasks to share
# more of the code path with regular task execution.
if task.actor_creation_id() != ray.local_scheduler.ObjectID(NIL_ACTOR_ID):
self._become_actor(task)
return
# Wait until the function to be executed has actually been registered
# on this worker. We will push warnings to the user if we spend too
# long in this loop.
with log_span("ray:wait_for_function", worker=self):
self._wait_for_function(function_id, task.driver_id().id())
# Execute the task.
# TODO(rkn): Consider acquiring this lock with a timeout and pushing a
# warning to the user if we are waiting too long to acquire the lock
# because that may indicate that the system is hanging, and it'd be
# good to know where the system is hanging.
log(event_type="ray:acquire_lock", kind=LOG_SPAN_START, worker=self)
with self.lock:
log(event_type="ray:acquire_lock", kind=LOG_SPAN_END, worker=self)
function_name, _ = self.functions[task.driver_id().id()][function_id.id()]
contents = {
"function_name": function_name,
"task_id": task.task_id().hex(),
"worker_id": binary_to_hex(self.worker_id),
}
with log_span("ray:task", contents=contents, worker=self):
self._process_task(task)
# Push all of the log events to the global state store.
flush_log()
# Increase the task execution counter.
(self.num_task_executions[task.driver_id().id()][function_id.id()]) += 1
reached_max_executions = (
self.num_task_executions[task.driver_id().id()][function_id.id()]
== self.function_properties[task.driver_id().id()][function_id.id()].max_calls
)
if reached_max_executions:
ray.worker.global_worker.local_scheduler_client.disconnect()
os._exit(0)
|
def _wait_for_and_process_task(self, task):
"""Wait for a task to be ready and process the task.
Args:
task: The task to execute.
"""
function_id = task.function_id()
# Wait until the function to be executed has actually been registered
# on this worker. We will push warnings to the user if we spend too
# long in this loop.
with log_span("ray:wait_for_function", worker=self):
self._wait_for_function(function_id, task.driver_id().id())
# Execute the task.
# TODO(rkn): Consider acquiring this lock with a timeout and pushing a
# warning to the user if we are waiting too long to acquire the lock
# because that may indicate that the system is hanging, and it'd be
# good to know where the system is hanging.
log(event_type="ray:acquire_lock", kind=LOG_SPAN_START, worker=self)
with self.lock:
log(event_type="ray:acquire_lock", kind=LOG_SPAN_END, worker=self)
function_name, _ = self.functions[task.driver_id().id()][function_id.id()]
contents = {
"function_name": function_name,
"task_id": task.task_id().hex(),
"worker_id": binary_to_hex(self.worker_id),
}
with log_span("ray:task", contents=contents, worker=self):
self._process_task(task)
# Push all of the log events to the global state store.
flush_log()
# Increase the task execution counter.
(self.num_task_executions[task.driver_id().id()][function_id.id()]) += 1
reached_max_executions = (
self.num_task_executions[task.driver_id().id()][function_id.id()]
== self.function_properties[task.driver_id().id()][function_id.id()].max_calls
)
if reached_max_executions:
ray.worker.global_worker.local_scheduler_client.disconnect()
os._exit(0)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def _init(
address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
object_store_memory=None,
driver_mode=SCRIPT_MODE,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
resources=None,
num_redis_shards=None,
redis_max_clients=None,
plasma_directory=None,
huge_pages=False,
include_webui=True,
):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_output (bool): True if stdout and stderr for all the processes
should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with. If unspecified, Ray will attempt to autodetect
the number of GPUs available on the node (note that autodetection
currently only works for Nvidia GPUs).
resources: A dictionary mapping resource names to the quantity of that
resource available.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:
raise Exception(
"Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.PYTHON_MODE, ray.SILENT_MODE]."
)
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == PYTHON_MODE:
# If starting Ray in PYTHON_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
# Use the address 127.0.0.1 in local mode.
node_ip_address = "127.0.0.1" if node_ip_address is None else node_ip_address
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Stick the CPU and GPU resources into the resource dictionary.
resources = _normalize_resource_arguments(
num_cpus, num_gpus, resources, num_local_schedulers
)
# Start the scheduler, object store, and some workers. These will be
# killed by the call to cleanup(), which happens when the Python script
# exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(start_workers_from_local_scheduler),
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
)
else:
if redis_address is None:
raise Exception(
"When connecting to an existing cluster, "
"redis_address must be provided."
)
if num_workers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_workers must not be provided."
)
if num_local_schedulers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_local_schedulers must not be provided."
)
if num_cpus is not None or num_gpus is not None:
raise Exception(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise Exception(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if num_redis_shards is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_redis_shards must not be provided."
)
if redis_max_clients is not None:
raise Exception(
"When connecting to an existing cluster, "
"redis_max_clients must not be provided."
)
if object_store_memory is not None:
raise Exception(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if plasma_directory is not None:
raise Exception(
"When connecting to an existing cluster, "
"plasma_directory must not be provided."
)
if huge_pages:
raise Exception(
"When connecting to an existing cluster, "
"huge_pages must not be provided."
)
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(redis_address, node_ip_address)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to cleanup()
# when the Python script exits.
if driver_mode == PYTHON_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (address_info["object_store_addresses"][0].name),
"manager_socket_name": (
address_info["object_store_addresses"][0].manager_name
),
"local_scheduler_socket_name": (
address_info["local_scheduler_socket_names"][0]
),
"webui_url": address_info["webui_url"],
}
connect(
driver_address_info,
object_id_seed=object_id_seed,
mode=driver_mode,
worker=global_worker,
)
return address_info
|
def _init(
address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
object_store_memory=None,
driver_mode=SCRIPT_MODE,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
resources=None,
num_redis_shards=None,
redis_max_clients=None,
plasma_directory=None,
huge_pages=False,
include_webui=True,
):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_output (bool): True if stdout and stderr for all the processes
should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus (int): Number of cpus the user wishes all local schedulers to
be configured with.
num_gpus (int): Number of gpus the user wishes all local schedulers to
be configured with. If unspecified, Ray will attempt to autodetect
the number of GPUs available on the node (note that autodetection
currently only works for Nvidia GPUs).
resources: A dictionary mapping resource names to the quantity of that
resource available.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
redis_max_clients: If provided, attempt to configure Redis with this
maxclients number.
plasma_directory: A directory where the Plasma memory mapped files will
be created.
huge_pages: Boolean flag indicating whether to start the Object
Store with hugetlbfs support. Requires plasma_directory.
include_webui: Boolean flag indicating whether to start the web
UI, which is a Jupyter notebook.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:
raise Exception(
"Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.PYTHON_MODE, ray.SILENT_MODE]."
)
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == PYTHON_MODE:
# If starting Ray in PYTHON_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
# Use the address 127.0.0.1 in local mode.
node_ip_address = "127.0.0.1" if node_ip_address is None else node_ip_address
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Stick the CPU and GPU resources into the resource dictionary.
resources = _normalize_resource_arguments(
num_cpus, num_gpus, resources, num_local_schedulers
)
# Start the scheduler, object store, and some workers. These will be
# killed by the call to cleanup(), which happens when the Python script
# exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(start_workers_from_local_scheduler),
resources=resources,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
include_webui=include_webui,
)
else:
if redis_address is None:
raise Exception(
"When connecting to an existing cluster, "
"redis_address must be provided."
)
if num_workers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_workers must not be provided."
)
if num_local_schedulers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_local_schedulers must not be provided."
)
if num_cpus is not None or num_gpus is not None:
raise Exception(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise Exception(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if num_redis_shards is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_redis_shards must not be provided."
)
if redis_max_clients is not None:
raise Exception(
"When connecting to an existing cluster, "
"redis_max_clients must not be provided."
)
if object_store_memory is not None:
raise Exception(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if plasma_directory is not None:
raise Exception(
"When connecting to an existing cluster, "
"plasma_directory must not be provided."
)
if huge_pages:
raise Exception(
"When connecting to an existing cluster, "
"huge_pages must not be provided."
)
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(redis_address, node_ip_address)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to cleanup()
# when the Python script exits.
if driver_mode == PYTHON_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (address_info["object_store_addresses"][0].name),
"manager_socket_name": (
address_info["object_store_addresses"][0].manager_name
),
"local_scheduler_socket_name": (
address_info["local_scheduler_socket_names"][0]
),
"webui_url": address_info["webui_url"],
}
connect(
driver_address_info,
object_id_seed=object_id_seed,
mode=driver_mode,
worker=global_worker,
actor_id=NIL_ACTOR_ID,
)
return address_info
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def import_thread(worker, mode):
worker.import_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
worker.import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
# Get the exports that occurred before the call to subscribe.
with worker.lock:
export_keys = worker.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
# Continue because FunctionsToRun are the only things that the
# driver should import.
continue
if key.startswith(b"RemoteFunction"):
fetch_and_register_remote_function(key, worker=worker)
elif key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker into
# an actor of that class.
worker.imported_actor_classes.add(key)
else:
raise Exception("This code should be unreachable.")
try:
for msg in worker.import_pubsub_client.listen():
with worker.lock:
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = worker.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = worker.redis_client.lindex("Exports", i)
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run", worker=worker):
fetch_and_execute_function_to_run(key, worker=worker)
# Continue because FunctionsToRun are the only things
# that the driver should import.
continue
if key.startswith(b"RemoteFunction"):
with log_span("ray:import_remote_function", worker=worker):
fetch_and_register_remote_function(key, worker=worker)
elif key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run", worker=worker):
fetch_and_execute_function_to_run(key, worker=worker)
elif key.startswith(b"ActorClass"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this
# worker into an actor of that class.
worker.imported_actor_classes.add(key)
# TODO(rkn): We may need to bring back the case of fetching
# actor classes here.
else:
raise Exception("This code should be unreachable.")
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
|
def import_thread(worker, mode):
worker.import_pubsub_client = worker.redis_client.pubsub()
# Exports that are published after the call to
# import_pubsub_client.subscribe and before the call to
# import_pubsub_client.listen will still be processed in the loop.
worker.import_pubsub_client.subscribe("__keyspace@0__:Exports")
# Keep track of the number of imports that we've imported.
num_imported = 0
# Get the exports that occurred before the call to subscribe.
with worker.lock:
export_keys = worker.redis_client.lrange("Exports", 0, -1)
for key in export_keys:
num_imported += 1
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
# Continue because FunctionsToRun are the only things that the
# driver should import.
continue
if key.startswith(b"RemoteFunction"):
fetch_and_register_remote_function(key, worker=worker)
elif key.startswith(b"FunctionsToRun"):
fetch_and_execute_function_to_run(key, worker=worker)
elif key.startswith(b"ActorClass"):
# If this worker is an actor that is supposed to construct this
# class, fetch the actor and class information and construct
# the class.
class_id = key.split(b":", 1)[1]
if worker.actor_id != NIL_ACTOR_ID and worker.class_id == class_id:
worker.fetch_and_register_actor(key, worker)
else:
raise Exception("This code should be unreachable.")
try:
for msg in worker.import_pubsub_client.listen():
with worker.lock:
if msg["type"] == "subscribe":
continue
assert msg["data"] == b"rpush"
num_imports = worker.redis_client.llen("Exports")
assert num_imports >= num_imported
for i in range(num_imported, num_imports):
num_imported += 1
key = worker.redis_client.lindex("Exports", i)
# Handle the driver case first.
if mode != WORKER_MODE:
if key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run", worker=worker):
fetch_and_execute_function_to_run(key, worker=worker)
# Continue because FunctionsToRun are the only things
# that the driver should import.
continue
if key.startswith(b"RemoteFunction"):
with log_span("ray:import_remote_function", worker=worker):
fetch_and_register_remote_function(key, worker=worker)
elif key.startswith(b"FunctionsToRun"):
with log_span("ray:import_function_to_run", worker=worker):
fetch_and_execute_function_to_run(key, worker=worker)
elif key.startswith(b"Actor"):
# Only get the actor if the actor ID matches the actor
# ID of this worker.
(actor_id,) = worker.redis_client.hmget(key, "actor_id")
if worker.actor_id == actor_id:
worker.fetch_and_register["Actor"](key, worker)
else:
raise Exception("This code should be unreachable.")
except redis.ConnectionError:
# When Redis terminates the listen call will throw a ConnectionError,
# which we catch here.
pass
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def connect(info, object_id_seed=None, mode=WORKER_MODE, worker=global_worker):
"""Connect this worker to the local scheduler, to Plasma, and to Redis.
Args:
info (dict): A dictionary with address of the Redis server and the
sockets of the plasma store, plasma manager, and local scheduler.
object_id_seed: A seed to use to make the generation of object IDs
deterministic.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,
PYTHON_MODE, and SILENT_MODE.
"""
check_main_thread()
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
assert worker.cached_remote_functions_and_actors is not None, error_message
# Initialize some fields.
worker.worker_id = random_string()
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.actor_id = NIL_ACTOR_ID
worker.connected = True
worker.set_mode(mode)
# The worker.events field is used to aggregate logging information and
# display it in the web UI. Note that Python lists protected by the GIL,
# which is important because we will append to this field from multiple
# threads.
worker.events = []
# If running Ray in PYTHON_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == PYTHON_MODE:
return
# Set the node IP address.
worker.node_ip_address = info["node_ip_address"]
worker.redis_address = info["redis_address"]
# Create a Redis client.
redis_ip_address, redis_port = info["redis_address"].split(":")
worker.redis_client = redis.StrictRedis(host=redis_ip_address, port=int(redis_port))
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode in [SCRIPT_MODE, SILENT_MODE]:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver(
worker.redis_client, "version_mismatch", traceback_str, driver_id=None
)
worker.lock = threading.Lock()
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
if mode == WORKER_MODE:
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (
redirect_worker_output_val is not None
and int(redirect_worker_output_val) == 1
):
redirect_worker_output = 1
else:
redirect_worker_output = 0
if redirect_worker_output:
log_stdout_file, log_stderr_file = services.new_log_files("worker", True)
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
services.record_log_files_in_redis(
info["redis_address"],
info["node_ip_address"],
[log_stdout_file, log_stderr_file],
)
# Create an object for interfacing with the global state.
global_state._initialize_global_state(redis_ip_address, int(redis_port))
# Register the worker with Redis.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": worker.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"],
}
driver_info["name"] = (
main.__file__ if hasattr(main, "__file__") else "INTERACTIVE MODE"
)
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
if not worker.redis_client.exists("webui"):
worker.redis_client.hmset("webui", {"url": info["webui_url"]})
is_worker = False
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": worker.node_ip_address,
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"],
}
if redirect_worker_output:
worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
is_worker = True
else:
raise Exception("This code should be unreachable.")
# Create an object store client.
worker.plasma_client = plasma.connect(
info["store_socket_name"], info["manager_socket_name"], 64
)
worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(
info["local_scheduler_socket_name"], worker.worker_id, is_worker
)
# If this is a driver, set the current task ID, the task driver ID, and set
# the task index to 0.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# If the user provided an object_id_seed, then set the current task ID
# deterministically based on that seed (without altering the state of
# the user's random number generator). Otherwise, set the current task
# ID randomly to avoid object ID collisions.
numpy_state = np.random.get_state()
if object_id_seed is not None:
np.random.seed(object_id_seed)
else:
# Try to use true randomness.
np.random.seed(None)
worker.current_task_id = ray.local_scheduler.ObjectID(np.random.bytes(20))
# When tasks are executed on remote workers in the context of multiple
# drivers, the task driver ID is used to keep track of which driver is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.task_driver_id = ray.local_scheduler.ObjectID(worker.worker_id)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
# Set other fields needed for computing task IDs.
worker.task_index = 0
worker.put_index = 0
# Create an entry for the driver task in the task table. This task is
# added immediately with status RUNNING. This allows us to push errors
# related to this driver task back to the driver. For example, if the
# driver creates an object that is later evicted, we should notify the
# user that we're unable to reconstruct the object, since we cannot
# rerun the driver.
nil_actor_counter = 0
driver_task = ray.local_scheduler.Task(
worker.task_driver_id,
ray.local_scheduler.ObjectID(NIL_FUNCTION_ID),
[],
0,
worker.current_task_id,
worker.task_index,
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
nil_actor_counter,
False,
[],
{"CPU": 0},
)
global_state._execute_command(
driver_task.task_id(),
"RAY.TASK_TABLE_ADD",
driver_task.task_id().id(),
TASK_STATUS_RUNNING,
NIL_LOCAL_SCHEDULER_ID,
driver_task.execution_dependencies_string(),
0,
ray.local_scheduler.task_to_string(driver_task),
)
# Set the driver's current task ID to the task ID assigned to the
# driver task.
worker.current_task_id = driver_task.task_id()
# Initialize the serialization library. This registers some classes, and so
# it must be run before we export all of the cached remote functions.
_initialize_serialization()
# Start a thread to import exports from the driver or from other workers.
# Note that the driver also has an import thread, which is used only to
# import custom class definitions from calls to register_custom_serializer
# that happen under the hood on workers.
t = threading.Thread(target=import_thread, args=(worker, mode))
# Making the thread a daemon causes it to exit when the main thread exits.
t.daemon = True
t.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
t = threading.Thread(target=print_error_messages, args=(worker,))
# Making the thread a daemon causes it to exit when the main thread
# exits.
t.daemon = True
t.start()
if mode in [SCRIPT_MODE, SILENT_MODE]:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory)
)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory)
)
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
# Export cached remote functions to the workers.
for cached_type, info in worker.cached_remote_functions_and_actors:
if cached_type == "remote_function":
(function_id, func_name, func, func_invoker, function_properties) = info
export_remote_function(
function_id,
func_name,
func,
func_invoker,
function_properties,
worker,
)
elif cached_type == "actor":
(key, actor_class_info) = info
ray.actor.publish_actor_class_to_key(key, actor_class_info, worker)
else:
assert False, "This code should be unreachable."
worker.cached_functions_to_run = None
worker.cached_remote_functions_and_actors = None
|
def connect(
info,
object_id_seed=None,
mode=WORKER_MODE,
worker=global_worker,
actor_id=NIL_ACTOR_ID,
):
"""Connect this worker to the local scheduler, to Plasma, and to Redis.
Args:
info (dict): A dictionary with address of the Redis server and the
sockets of the plasma store, plasma manager, and local scheduler.
object_id_seed: A seed to use to make the generation of object IDs
deterministic.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE,
PYTHON_MODE, and SILENT_MODE.
actor_id: The ID of the actor running on this worker. If this worker is
not an actor, then this is NIL_ACTOR_ID.
"""
check_main_thread()
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
assert worker.cached_remote_functions_and_actors is not None, error_message
# Initialize some fields.
worker.worker_id = random_string()
worker.actor_id = actor_id
worker.connected = True
worker.set_mode(mode)
# The worker.events field is used to aggregate logging information and
# display it in the web UI. Note that Python lists protected by the GIL,
# which is important because we will append to this field from multiple
# threads.
worker.events = []
# If running Ray in PYTHON_MODE, there is no need to create call
# create_worker or to start the worker service.
if mode == PYTHON_MODE:
return
# Set the node IP address.
worker.node_ip_address = info["node_ip_address"]
worker.redis_address = info["redis_address"]
# Create a Redis client.
redis_ip_address, redis_port = info["redis_address"].split(":")
worker.redis_client = redis.StrictRedis(host=redis_ip_address, port=int(redis_port))
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray.services.check_version_info(worker.redis_client)
except Exception as e:
if mode in [SCRIPT_MODE, SILENT_MODE]:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray.utils.push_error_to_driver(
worker.redis_client, "version_mismatch", traceback_str, driver_id=None
)
worker.lock = threading.Lock()
# Check the RedirectOutput key in Redis and based on its value redirect
# worker output and error to their own files.
if mode == WORKER_MODE:
# This key is set in services.py when Redis is started.
redirect_worker_output_val = worker.redis_client.get("RedirectOutput")
if (
redirect_worker_output_val is not None
and int(redirect_worker_output_val) == 1
):
redirect_worker_output = 1
else:
redirect_worker_output = 0
if redirect_worker_output:
log_stdout_file, log_stderr_file = services.new_log_files("worker", True)
sys.stdout = log_stdout_file
sys.stderr = log_stderr_file
services.record_log_files_in_redis(
info["redis_address"],
info["node_ip_address"],
[log_stdout_file, log_stderr_file],
)
# Create an object for interfacing with the global state.
global_state._initialize_global_state(redis_ip_address, int(redis_port))
# Register the worker with Redis.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# The concept of a driver is the same as the concept of a "job".
# Register the driver/job with Redis here.
import __main__ as main
driver_info = {
"node_ip_address": worker.node_ip_address,
"driver_id": worker.worker_id,
"start_time": time.time(),
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"],
}
driver_info["name"] = (
main.__file__ if hasattr(main, "__file__") else "INTERACTIVE MODE"
)
worker.redis_client.hmset(b"Drivers:" + worker.worker_id, driver_info)
if not worker.redis_client.exists("webui"):
worker.redis_client.hmset("webui", {"url": info["webui_url"]})
is_worker = False
elif mode == WORKER_MODE:
# Register the worker with Redis.
worker_dict = {
"node_ip_address": worker.node_ip_address,
"plasma_store_socket": info["store_socket_name"],
"plasma_manager_socket": info["manager_socket_name"],
"local_scheduler_socket": info["local_scheduler_socket_name"],
}
if redirect_worker_output:
worker_dict["stdout_file"] = os.path.abspath(log_stdout_file.name)
worker_dict["stderr_file"] = os.path.abspath(log_stderr_file.name)
worker.redis_client.hmset(b"Workers:" + worker.worker_id, worker_dict)
is_worker = True
else:
raise Exception("This code should be unreachable.")
# Create an object store client.
worker.plasma_client = plasma.connect(
info["store_socket_name"], info["manager_socket_name"], 64
)
# Create the local scheduler client.
if worker.actor_id != NIL_ACTOR_ID:
num_gpus = int(worker.redis_client.hget(b"Actor:" + actor_id, "num_gpus"))
else:
num_gpus = 0
worker.local_scheduler_client = ray.local_scheduler.LocalSchedulerClient(
info["local_scheduler_socket_name"],
worker.worker_id,
worker.actor_id,
is_worker,
num_gpus,
)
# If this is a driver, set the current task ID, the task driver ID, and set
# the task index to 0.
if mode in [SCRIPT_MODE, SILENT_MODE]:
# If the user provided an object_id_seed, then set the current task ID
# deterministically based on that seed (without altering the state of
# the user's random number generator). Otherwise, set the current task
# ID randomly to avoid object ID collisions.
numpy_state = np.random.get_state()
if object_id_seed is not None:
np.random.seed(object_id_seed)
else:
# Try to use true randomness.
np.random.seed(None)
worker.current_task_id = ray.local_scheduler.ObjectID(np.random.bytes(20))
# When tasks are executed on remote workers in the context of multiple
# drivers, the task driver ID is used to keep track of which driver is
# responsible for the task so that error messages will be propagated to
# the correct driver.
worker.task_driver_id = ray.local_scheduler.ObjectID(worker.worker_id)
# Reset the state of the numpy random number generator.
np.random.set_state(numpy_state)
# Set other fields needed for computing task IDs.
worker.task_index = 0
worker.put_index = 0
# Create an entry for the driver task in the task table. This task is
# added immediately with status RUNNING. This allows us to push errors
# related to this driver task back to the driver. For example, if the
# driver creates an object that is later evicted, we should notify the
# user that we're unable to reconstruct the object, since we cannot
# rerun the driver.
nil_actor_counter = 0
driver_task = ray.local_scheduler.Task(
worker.task_driver_id,
ray.local_scheduler.ObjectID(NIL_FUNCTION_ID),
[],
0,
worker.current_task_id,
worker.task_index,
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
ray.local_scheduler.ObjectID(NIL_ACTOR_ID),
nil_actor_counter,
False,
[],
{"CPU": 0},
)
global_state._execute_command(
driver_task.task_id(),
"RAY.TASK_TABLE_ADD",
driver_task.task_id().id(),
TASK_STATUS_RUNNING,
NIL_LOCAL_SCHEDULER_ID,
driver_task.execution_dependencies_string(),
0,
ray.local_scheduler.task_to_string(driver_task),
)
# Set the driver's current task ID to the task ID assigned to the
# driver task.
worker.current_task_id = driver_task.task_id()
# If this is an actor, get the ID of the corresponding class for the actor.
if worker.actor_id != NIL_ACTOR_ID:
actor_key = b"Actor:" + worker.actor_id
class_id = worker.redis_client.hget(actor_key, "class_id")
worker.class_id = class_id
# Initialize the serialization library. This registers some classes, and so
# it must be run before we export all of the cached remote functions.
_initialize_serialization()
# Start a thread to import exports from the driver or from other workers.
# Note that the driver also has an import thread, which is used only to
# import custom class definitions from calls to register_custom_serializer
# that happen under the hood on workers.
t = threading.Thread(target=import_thread, args=(worker, mode))
# Making the thread a daemon causes it to exit when the main thread exits.
t.daemon = True
t.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
t = threading.Thread(target=print_error_messages, args=(worker,))
# Making the thread a daemon causes it to exit when the main thread
# exits.
t.daemon = True
t.start()
if mode in [SCRIPT_MODE, SILENT_MODE]:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory)
)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory)
)
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
# Export cached remote functions to the workers.
for cached_type, info in worker.cached_remote_functions_and_actors:
if cached_type == "remote_function":
(function_id, func_name, func, func_invoker, function_properties) = info
export_remote_function(
function_id,
func_name,
func,
func_invoker,
function_properties,
worker,
)
elif cached_type == "actor":
(key, actor_class_info) = info
ray.actor.publish_actor_class_to_key(key, actor_class_info, worker)
else:
assert False, "This code should be unreachable."
worker.cached_functions_to_run = None
worker.cached_remote_functions_and_actors = None
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def remote(*args, **kwargs):
"""This decorator is used to define remote functions and to define actors.
Args:
num_return_vals (int): The number of object IDs that a call to this
function should return.
num_cpus (int): The number of CPUs needed to execute this function.
num_gpus (int): The number of GPUs needed to execute this function.
resources: A dictionary mapping resource name to the required quantity
of that resource.
max_calls (int): The maximum number of tasks of this kind that can be
run on a worker before the worker needs to be restarted.
checkpoint_interval (int): The number of tasks to run between
checkpoints of the actor state.
"""
worker = global_worker
def make_remote_decorator(
num_return_vals,
num_cpus,
num_gpus,
resources,
max_calls,
checkpoint_interval,
func_id=None,
):
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
# Set the remote function default resources.
resources["CPU"] = (
DEFAULT_REMOTE_FUNCTION_CPUS if num_cpus is None else num_cpus
)
resources["GPU"] = (
DEFAULT_REMOTE_FUNCTION_GPUS if num_gpus is None else num_gpus
)
function_properties = FunctionProperties(
num_return_vals=num_return_vals,
resources=resources,
max_calls=max_calls,
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
# Set the actor default resources.
if num_cpus is None and num_gpus is None and resources == {}:
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
resources["CPU"] = DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE
else:
# If any resources are specified, then all resources are
# acquired for the actor's lifetime and no resources are
# associated with methods.
resources["CPU"] = (
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE
if num_cpus is None
else num_cpus
)
resources["GPU"] = (
DEFAULT_ACTOR_CREATION_GPUS_SPECIFIED_CASE
if num_gpus is None
else num_gpus
)
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE
return worker.make_actor(
func_or_class, resources, checkpoint_interval, actor_method_cpus
)
raise Exception(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
def remote_function_decorator(func, function_properties):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This runs immediately when a remote function is called."""
check_connected()
check_main_thread()
args = signature.extend_args(function_signature, args, kwargs)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = func(*copy.deepcopy(args))
return result
objectids = _submit_task(function_id, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
func_name, func_name
)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0) or is_cython(func):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
signature.check_signature_supported(func)
function_signature = signature.extract_signature(func)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, func_invoker, function_properties
)
elif worker.mode is None:
worker.cached_remote_functions_and_actors.append(
(
"remote_function",
(
function_id,
func_name,
func,
func_invoker,
function_properties,
),
)
)
return func_invoker
return remote_decorator
# Handle resource arguments
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources", {})
if not isinstance(resources, dict):
raise Exception(
"The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(type(resources))
)
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
# Handle other arguments.
num_return_vals = kwargs["num_return_vals"] if "num_return_vals" in kwargs else 1
max_calls = kwargs["max_calls"] if "max_calls" in kwargs else 0
checkpoint_interval = (
kwargs["checkpoint_interval"] if "checkpoint_interval" in kwargs else -1
)
if _mode() == WORKER_MODE:
if "function_id" in kwargs:
function_id = kwargs["function_id"]
return make_remote_decorator(
num_return_vals,
num_cpus,
num_gpus,
resources,
max_calls,
checkpoint_interval,
function_id,
)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_remote_decorator(
num_return_vals,
num_cpus,
num_gpus,
resources,
max_calls,
checkpoint_interval,
)(args[0])
else:
# This is the case where the decorator is something like
# @ray.remote(num_return_vals=2).
error_string = (
"The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'resources', "
"or 'max_calls', like "
"'@ray.remote(num_return_vals=2, "
'resources={"GPU": 1})\'.'
)
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals",
"num_cpus",
"num_gpus",
"resources",
"max_calls",
"checkpoint_interval",
], error_string
assert "function_id" not in kwargs
return make_remote_decorator(
num_return_vals,
num_cpus,
num_gpus,
resources,
max_calls,
checkpoint_interval,
)
|
def remote(*args, **kwargs):
"""This decorator is used to define remote functions and to define actors.
Args:
num_return_vals (int): The number of object IDs that a call to this
function should return.
num_cpus (int): The number of CPUs needed to execute this function.
num_gpus (int): The number of GPUs needed to execute this function.
resources: A dictionary mapping resource name to the required quantity
of that resource.
max_calls (int): The maximum number of tasks of this kind that can be
run on a worker before the worker needs to be restarted.
checkpoint_interval (int): The number of tasks to run between
checkpoints of the actor state.
"""
worker = global_worker
def make_remote_decorator(
num_return_vals, resources, max_calls, checkpoint_interval, func_id=None
):
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
function_properties = FunctionProperties(
num_return_vals=num_return_vals,
resources=resources,
max_calls=max_calls,
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
return worker.make_actor(func_or_class, resources, checkpoint_interval)
raise Exception(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
def remote_function_decorator(func, function_properties):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This runs immediately when a remote function is called."""
check_connected()
check_main_thread()
args = signature.extend_args(function_signature, args, kwargs)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = func(*copy.deepcopy(args))
return result
objectids = _submit_task(function_id, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
func_name, func_name
)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0) or is_cython(func):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
signature.check_signature_supported(func)
function_signature = signature.extract_signature(func)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, func_invoker, function_properties
)
elif worker.mode is None:
worker.cached_remote_functions_and_actors.append(
(
"remote_function",
(
function_id,
func_name,
func,
func_invoker,
function_properties,
),
)
)
return func_invoker
return remote_decorator
# Handle resource arguments
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else 1
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else 0
resources = kwargs.get("resources", {})
if not isinstance(resources, dict):
raise Exception(
"The 'resources' keyword argument must be a "
"dictionary, but received type {}.".format(type(resources))
)
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
resources["CPU"] = num_cpus
resources["GPU"] = num_gpus
# Handle other arguments.
num_return_vals = kwargs["num_return_vals"] if "num_return_vals" in kwargs else 1
max_calls = kwargs["max_calls"] if "max_calls" in kwargs else 0
checkpoint_interval = (
kwargs["checkpoint_interval"] if "checkpoint_interval" in kwargs else -1
)
if _mode() == WORKER_MODE:
if "function_id" in kwargs:
function_id = kwargs["function_id"]
return make_remote_decorator(
num_return_vals, resources, max_calls, checkpoint_interval, function_id
)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_remote_decorator(
num_return_vals, resources, max_calls, checkpoint_interval
)(args[0])
else:
# This is the case where the decorator is something like
# @ray.remote(num_return_vals=2).
error_string = (
"The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
"the arguments 'num_return_vals', 'resources', "
"or 'max_calls', like "
"'@ray.remote(num_return_vals=2, "
'resources={"GPU": 1})\'.'
)
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in [
"num_return_vals",
"num_cpus",
"num_gpus",
"resources",
"max_calls",
"checkpoint_interval",
], error_string
assert "function_id" not in kwargs
return make_remote_decorator(
num_return_vals, resources, max_calls, checkpoint_interval
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def make_remote_decorator(
num_return_vals,
num_cpus,
num_gpus,
resources,
max_calls,
checkpoint_interval,
func_id=None,
):
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
# Set the remote function default resources.
resources["CPU"] = (
DEFAULT_REMOTE_FUNCTION_CPUS if num_cpus is None else num_cpus
)
resources["GPU"] = (
DEFAULT_REMOTE_FUNCTION_GPUS if num_gpus is None else num_gpus
)
function_properties = FunctionProperties(
num_return_vals=num_return_vals,
resources=resources,
max_calls=max_calls,
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
# Set the actor default resources.
if num_cpus is None and num_gpus is None and resources == {}:
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
resources["CPU"] = DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE
else:
# If any resources are specified, then all resources are
# acquired for the actor's lifetime and no resources are
# associated with methods.
resources["CPU"] = (
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE
if num_cpus is None
else num_cpus
)
resources["GPU"] = (
DEFAULT_ACTOR_CREATION_GPUS_SPECIFIED_CASE
if num_gpus is None
else num_gpus
)
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE
return worker.make_actor(
func_or_class, resources, checkpoint_interval, actor_method_cpus
)
raise Exception(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
def remote_function_decorator(func, function_properties):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This runs immediately when a remote function is called."""
check_connected()
check_main_thread()
args = signature.extend_args(function_signature, args, kwargs)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = func(*copy.deepcopy(args))
return result
objectids = _submit_task(function_id, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
func_name, func_name
)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0) or is_cython(func):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
signature.check_signature_supported(func)
function_signature = signature.extract_signature(func)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, func_invoker, function_properties
)
elif worker.mode is None:
worker.cached_remote_functions_and_actors.append(
(
"remote_function",
(function_id, func_name, func, func_invoker, function_properties),
)
)
return func_invoker
return remote_decorator
|
def make_remote_decorator(
num_return_vals, resources, max_calls, checkpoint_interval, func_id=None
):
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
function_properties = FunctionProperties(
num_return_vals=num_return_vals,
resources=resources,
max_calls=max_calls,
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
return worker.make_actor(func_or_class, resources, checkpoint_interval)
raise Exception(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
def remote_function_decorator(func, function_properties):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This runs immediately when a remote function is called."""
check_connected()
check_main_thread()
args = signature.extend_args(function_signature, args, kwargs)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function.
# We copy the arguments to prevent the function call from
# mutating them and to match the usual behavior of
# immutable remote objects.
result = func(*copy.deepcopy(args))
return result
objectids = _submit_task(function_id, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. "
"Instead of running '{}()', try '{}.remote()'.".format(
func_name, func_name
)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0) or is_cython(func):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
signature.check_signature_supported(func)
function_signature = signature.extract_signature(func)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, func_invoker, function_properties
)
elif worker.mode is None:
worker.cached_remote_functions_and_actors.append(
(
"remote_function",
(function_id, func_name, func, func_invoker, function_properties),
)
)
return func_invoker
return remote_decorator
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
# Set the remote function default resources.
resources["CPU"] = (
DEFAULT_REMOTE_FUNCTION_CPUS if num_cpus is None else num_cpus
)
resources["GPU"] = (
DEFAULT_REMOTE_FUNCTION_GPUS if num_gpus is None else num_gpus
)
function_properties = FunctionProperties(
num_return_vals=num_return_vals, resources=resources, max_calls=max_calls
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
# Set the actor default resources.
if num_cpus is None and num_gpus is None and resources == {}:
# In the default case, actors acquire no resources for
# their lifetime, and actor methods will require 1 CPU.
resources["CPU"] = DEFAULT_ACTOR_CREATION_CPUS_SIMPLE_CASE
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SIMPLE_CASE
else:
# If any resources are specified, then all resources are
# acquired for the actor's lifetime and no resources are
# associated with methods.
resources["CPU"] = (
DEFAULT_ACTOR_CREATION_CPUS_SPECIFIED_CASE
if num_cpus is None
else num_cpus
)
resources["GPU"] = (
DEFAULT_ACTOR_CREATION_GPUS_SPECIFIED_CASE
if num_gpus is None
else num_gpus
)
actor_method_cpus = DEFAULT_ACTOR_METHOD_CPUS_SPECIFIED_CASE
return worker.make_actor(
func_or_class, resources, checkpoint_interval, actor_method_cpus
)
raise Exception(
"The @ray.remote decorator must be applied to either a function or to a class."
)
|
def remote_decorator(func_or_class):
if inspect.isfunction(func_or_class) or is_cython(func_or_class):
function_properties = FunctionProperties(
num_return_vals=num_return_vals, resources=resources, max_calls=max_calls
)
return remote_function_decorator(func_or_class, function_properties)
if inspect.isclass(func_or_class):
return worker.make_actor(func_or_class, resources, checkpoint_interval)
raise Exception(
"The @ray.remote decorator must be applied to either a function or to a class."
)
|
https://github.com/ray-project/ray/issues/1716
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1694, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/registry.py", line 12, in <module>
from ray.tune.trainable import Trainable, wrap_function
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 8, in <module>
from ray.tune.hyperband import HyperBandScheduler
File "/home/ubuntu/ray/python/ray/tune/hyperband.py", line 8, in <module>
from ray.tune.trial_scheduler import FIFOScheduler, TrialScheduler
File "/home/ubuntu/ray/python/ray/tune/trial_scheduler.py", line 5, in <module>
from ray.tune.trial import Trial
File "/home/ubuntu/ray/python/ray/tune/trial.py", line 15, in <module>
from ray.tune.registry import _default_registry, get_registry, TRAINABLE_CLASS
ImportError: cannot import name '_default_registry'
|
ImportError
|
def fetch_and_execute_function_to_run(key, worker=global_worker):
"""Run on arbitrary function on the worker."""
driver_id, serialized_function = worker.redis_client.hmget(
key, ["driver_id", "function"]
)
if (
worker.mode in [SCRIPT_MODE, SILENT_MODE]
and driver_id != worker.task_driver_id.id()
):
# This export was from a different driver and there's no need for this
# driver to import it.
return
try:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": worker})
except Exception:
# If an exception was thrown when the function was run, we record the
# traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
name = (
function.__name__
if ("function" in locals() and hasattr(function, "__name__"))
else ""
)
ray.utils.push_error_to_driver(
worker.redis_client,
"function_to_run",
traceback_str,
driver_id=driver_id,
data={"name": name},
)
|
def fetch_and_execute_function_to_run(key, worker=global_worker):
"""Run on arbitrary function on the worker."""
driver_id, serialized_function = worker.redis_client.hmget(
key, ["driver_id", "function"]
)
try:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": worker})
except Exception:
# If an exception was thrown when the function was run, we record the
# traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
name = (
function.__name__
if ("function" in locals() and hasattr(function, "__name__"))
else ""
)
ray.utils.push_error_to_driver(
worker.redis_client,
"function_to_run",
traceback_str,
driver_id=driver_id,
data={"name": name},
)
|
https://github.com/ray-project/ray/issues/1409
|
Traceback (most recent call last):
File "/home/ubuntu/ray/python/ray/worker.py", line 1625, in fetch_and_execute_function_to_run
function = pickle.loads(serialized_function)
File "/home/ubuntu/ray/python/ray/tune/__init__.py", line 6, in <module>
from ray.tune.tune import run_experiments
File "/home/ubuntu/ray/python/ray/tune/tune.py", line 88, in <module>
from ray.rllib.shard.shardedagent import ShardedAgent
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 18, in <module>
_register_all()
File "/home/ubuntu/ray/python/ray/rllib/__init__.py", line 13, in _register_all
register_trainable(key, get_agent_class(key))
File "/home/ubuntu/ray/python/ray/rllib/agent.py", line 368, in get_agent_class
from ray.rllib import es
File "/home/ubuntu/ray/python/ray/rllib/es/__init__.py", line 1, in <module>
from ray.rllib.es.es import (ESAgent, DEFAULT_CONFIG)
File "/home/ubuntu/ray/python/ray/rllib/es/es.py", line 43, in <module>
@ray.remote
File "/home/ubuntu/ray/python/ray/worker.py", line 2579, in remote
max_calls, checkpoint_interval)(args[0])
File "/home/ubuntu/ray/python/ray/worker.py", line 2482, in remote_decorator
function_properties)
File "/home/ubuntu/ray/python/ray/worker.py", line 2541, in remote_function_decorator
func_invoker, function_properties)
File "/home/ubuntu/ray/python/ray/worker.py", line 2380, in export_remote_function
check_main_thread()
File "/home/ubuntu/ray/python/ray/worker.py", line 959, in check_main_thread
.format(threading.current_thread().getName()))
Exception: The Ray methods are not thread safe and must be called from the main thread. This method was called from thread Thread-4.
|
Exception
|
def worker_task(ps, worker_index, batch_size=50):
# Download MNIST.
mnist = model.download_mnist_retry(seed=worker_index)
# Initialize the model.
net = model.SimpleCNN()
keys = net.get_weights()[0]
while True:
# Get the current weights from the parameter server.
weights = ray.get(ps.pull.remote(keys))
net.set_weights(keys, weights)
# Compute an update and push it to the parameter server.
xs, ys = mnist.train.next_batch(batch_size)
gradients = net.compute_update(xs, ys)
ps.push.remote(keys, gradients)
|
def worker_task(ps, batch_size=50):
# Download MNIST.
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
# Initialize the model.
net = model.SimpleCNN()
keys = net.get_weights()[0]
while True:
# Get the current weights from the parameter server.
weights = ray.get(ps.pull.remote(keys))
net.set_weights(keys, weights)
# Compute an update and push it to the parameter server.
xs, ys = mnist.train.next_batch(batch_size)
gradients = net.compute_update(xs, ys)
ps.push.remote(keys, gradients)
|
https://github.com/ray-project/ray/issues/1398
|
Remote function __main__.worker_task failed with:
Traceback (most recent call last):
File "async_parameter_server.py", line 40, in worker_task
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 245, in read_data_sets
source_url + TRAIN_LABELS)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 209, in maybe_download
gfile.Copy(temp_file_name, filepath)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 385, in copy
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 473, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.AlreadyExistsError: file already exists
|
tensorflow.python.framework.errors_impl.AlreadyExistsError
|
def __init__(self, worker_index, batch_size=50):
self.worker_index = worker_index
self.batch_size = batch_size
self.mnist = model.download_mnist_retry(seed=worker_index)
self.net = model.SimpleCNN()
|
def __init__(self, worker_index, batch_size=50):
self.worker_index = worker_index
self.batch_size = batch_size
self.mnist = input_data.read_data_sets(
"MNIST_data", one_hot=True, seed=worker_index
)
self.net = model.SimpleCNN()
|
https://github.com/ray-project/ray/issues/1398
|
Remote function __main__.worker_task failed with:
Traceback (most recent call last):
File "async_parameter_server.py", line 40, in worker_task
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/mnist.py", line 245, in read_data_sets
source_url + TRAIN_LABELS)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/datasets/base.py", line 209, in maybe_download
gfile.Copy(temp_file_name, filepath)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/lib/io/file_io.py", line 385, in copy
compat.as_bytes(oldpath), compat.as_bytes(newpath), overwrite, status)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py", line 473, in __exit__
c_api.TF_GetCode(self.status.status))
tensorflow.python.framework.errors_impl.AlreadyExistsError: file already exists
|
tensorflow.python.framework.errors_impl.AlreadyExistsError
|
def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
try:
# We divide very large get requests into smaller get requests
# so that a single get request doesn't block the store for a
# long time, if the store is blocked, it can block the manager
# as well as a consequence.
results = []
for i in range(0, len(object_ids), ray._config.worker_get_request_size()):
results += self.plasma_client.get(
object_ids[i : (i + ray._config.worker_get_request_size())],
timeout,
self.serialization_context,
)
return results
except pyarrow.lib.ArrowInvalid as e:
# TODO(ekl): the local scheduler could include relevant
# metadata in the task kill case for a better error message
invalid_error = RayTaskError(
"<unknown>",
None,
"Invalid return value: likely worker died or was killed "
"while executing the task.",
)
return [invalid_error] * len(object_ids)
except pyarrow.DeserializationCallbackError as e:
# Wait a little bit for the import thread to import the class.
# If we currently have the worker lock, we need to release it
# so that the import thread can acquire it.
if self.mode == WORKER_MODE:
self.lock.release()
time.sleep(0.01)
if self.mode == WORKER_MODE:
self.lock.acquire()
if time.time() - start_time > error_timeout:
warning_message = (
"This worker or driver is waiting to "
"receive a class definition so that it "
"can deserialize an object from the "
"object store. This may be fine, or it "
"may be a bug."
)
if not warning_sent:
self.push_error_to_driver(
self.task_driver_id.id(), "wait_for_class", warning_message
)
warning_sent = True
|
def retrieve_and_deserialize(self, object_ids, timeout, error_timeout=10):
start_time = time.time()
# Only send the warning once.
warning_sent = False
while True:
try:
# We divide very large get requests into smaller get requests
# so that a single get request doesn't block the store for a
# long time, if the store is blocked, it can block the manager
# as well as a consequence.
results = []
for i in range(0, len(object_ids), ray._config.worker_get_request_size()):
results += self.plasma_client.get(
object_ids[i : (i + ray._config.worker_get_request_size())],
timeout,
self.serialization_context,
)
return results
except pyarrow.DeserializationCallbackError as e:
# Wait a little bit for the import thread to import the class.
# If we currently have the worker lock, we need to release it
# so that the import thread can acquire it.
if self.mode == WORKER_MODE:
self.lock.release()
time.sleep(0.01)
if self.mode == WORKER_MODE:
self.lock.acquire()
if time.time() - start_time > error_timeout:
warning_message = (
"This worker or driver is waiting to "
"receive a class definition so that it "
"can deserialize an object from the "
"object store. This may be fine, or it "
"may be a bug."
)
if not warning_sent:
self.push_error_to_driver(
self.task_driver_id.id(), "wait_for_class", warning_message
)
warning_sent = True
|
https://github.com/ray-project/ray/issues/1049
|
Disconnecting client on fd 42
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/home/ubuntu/.local/lib/python3.6/site-packages/ray-0.2.0-py3.6-linux-x86_64.egg/ray/worker.py", li
ne 1358, in cleanup
A worker died or was killed while executing a task.
You can inspect errors by running
ray.error_info()
If this driver is hanging, start a new one with
ray.init(redis_address="127.0.0.1:25614")
worker.plasma_client.disconnect()
File "/home/ubuntu/.local/lib/python3.6/site-packages/ray-0.2.0-py3.6-linux-x86_64.egg/ray/worker.py", li
ne 865, in exit
sys.exit(0)
SystemExit: 0
|
ray.error
|
def stop(self, error=False):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
"""
if error:
self.status = Trial.ERROR
else:
self.status = Trial.TERMINATED
try:
if self.agent:
stop_tasks = []
stop_tasks.append(self.agent.stop.remote())
stop_tasks.append(
self.agent.__ray_terminate__.remote(self.agent._ray_actor_id.id())
)
_, unfinished = ray.wait(stop_tasks, num_returns=2, timeout=10000)
if unfinished:
print(("Stopping %s Actor was unsuccessful, but moving on...") % self)
except Exception:
print("Error stopping agent:", traceback.format_exc())
self.status = Trial.ERROR
finally:
self.agent = None
|
def stop(self, error=False):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
"""
if error:
self.status = Trial.ERROR
else:
self.status = Trial.TERMINATED
try:
if self.agent:
self.agent.stop.remote()
self.agent.__ray_terminate__.remote(self.agent._ray_actor_id.id())
except Exception:
print("Error stopping agent:", traceback.format_exc())
self.status = Trial.ERROR
finally:
self.agent = None
|
https://github.com/ray-project/ray/issues/1165
|
======================================================================
ERROR: testPauseThenResume (__main__.TrialRunnerTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/trial_runner_test.py", line 248, in testPauseThenResume
trials[0].resume()
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/tune/trial.py", line 120, in resume
self.start()
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/tune/trial.py", line 76, in start
self._setup_agent()
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/tune/trial.py", line 213, in _setup_agent
experiment_tag=self.experiment_tag)
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/actor.py", line 717, in remote
ray.worker.global_worker)
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/actor.py", line 381, in export_actor
num_gpus, worker.redis_client)
File "/home/travis/.local/lib/python3.6/site-packages/ray-0.2.1-py3.6-linux-x86_64.egg/ray/utils.py", line 229, in select_local_scheduler
"information is {}.".format(local_schedulers))
Exception: Could not find a node with enough GPUs or other resources to create this actor. The local scheduler information is [ {'ClientType': 'local_scheduler', 'Deleted': False, 'DBClientID': '9557d3251486fc299505385ce6ddeddef27ad895', 'AuxAddress': '127.0.0.1:38753', 'NumCPUs': 1.0, 'NumGPUs': 1.0, 'LocalSchedulerSocketName': '/tmp/scheduler14716168'}].
|
Exception
|
def save(self):
"""Saves the current model state to a checkpoint.
Returns:
Checkpoint path that may be passed to restore().
"""
checkpoint_path = self._save()
pickle.dump(
[self.experiment_id, self.iteration, self.timesteps_total, self.time_total],
open(checkpoint_path + ".rllib_metadata", "wb"),
)
return checkpoint_path
|
def save(self):
"""Saves the current model state to a checkpoint.
Returns:
Checkpoint path that may be passed to restore().
"""
checkpoint_path = self._save()
pickle.dump(
[self.experiment_id, self.iteration, self.timesteps_total, self.time_total_s],
open(checkpoint_path + ".rllib_metadata", "wb"),
)
return checkpoint_path
|
https://github.com/ray-project/ray/issues/982
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-4c879b9f1898> in <module>()
5
6 agent = PPOAgent('Pong-v0', DEFAULT_CONFIG)
----> 7 agent.save()
~/Workspace/ray/python/ray/rllib/common.py in save(self)
171 pickle.dump(
172 [self.experiment_id, self.iteration, self.timesteps_total,
--> 173 self.time_total_s],
174 open(checkpoint_path + ".rllib_metadata", "wb"))
175 return checkpoint_path
AttributeError: 'PPOAgent' object has no attribute 'time_total_s'
|
AttributeError
|
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
"""
self._restore(checkpoint_path)
metadata = pickle.load(open(checkpoint_path + ".rllib_metadata", "rb"))
self.experiment_id = metadata[0]
self.iteration = metadata[1]
self.timesteps_total = metadata[2]
self.time_total = metadata[3]
|
def restore(self, checkpoint_path):
"""Restores training state from a given model checkpoint.
These checkpoints are returned from calls to save().
"""
self._restore(checkpoint_path)
metadata = pickle.load(open(checkpoint_path + ".rllib_metadata", "rb"))
self.experiment_id = metadata[0]
self.iteration = metadata[1]
self.timesteps_total = metadata[2]
self.time_total_s = metadata[3]
|
https://github.com/ray-project/ray/issues/982
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-4c879b9f1898> in <module>()
5
6 agent = PPOAgent('Pong-v0', DEFAULT_CONFIG)
----> 7 agent.save()
~/Workspace/ray/python/ray/rllib/common.py in save(self)
171 pickle.dump(
172 [self.experiment_id, self.iteration, self.timesteps_total,
--> 173 self.time_total_s],
174 open(checkpoint_path + ".rllib_metadata", "wb"))
175 return checkpoint_path
AttributeError: 'PPOAgent' object has no attribute 'time_total_s'
|
AttributeError
|
def get_sliders(update):
# Start_box value indicates the desired start point of queried window.
start_box = widgets.FloatText(
description="Start Time:",
disabled=True,
)
# End_box value indicates the desired end point of queried window.
end_box = widgets.FloatText(
description="End Time:",
disabled=True,
)
# Percentage slider. Indicates either % of total time or total tasks
# depending on what breakdown_opt is set to.
range_slider = widgets.IntRangeSlider(
value=[0, 100],
min=0,
max=100,
step=1,
description="%:",
continuous_update=False,
orientation="horizontal",
readout=True,
)
# Indicates the number of tasks that the user wants to be returned. Is
# disabled when the breakdown_opt value is set to total_time_value.
num_tasks_box = widgets.IntText(description="Num Tasks:", disabled=False)
# Dropdown bar that lets the user choose between modifying % of total
# time or total number of tasks.
breakdown_opt = widgets.Dropdown(
options=[total_time_value, total_tasks_value],
value=total_tasks_value,
description="Selection Options:",
)
# Display box for layout.
total_time_box = widgets.VBox([start_box, end_box])
# This sets the CSS style display to hide the box.
total_time_box.layout.display = "none"
# Initially passed in to the update_wrapper function.
INIT_EVENT = "INIT"
# Create instance of context manager to determine whether callback is
# currently executing
out_recursion = _EventRecursionContextManager()
def update_wrapper(event):
# Feature received a callback, but it shouldn't be executed
# because the callback was the result of a different feature
# executing its callback based on user input.
if not out_recursion.should_recurse:
return
# Feature received a callback and it should be executed because
# the callback was the result of user input.
with out_recursion:
smallest, largest, num_tasks = ray.global_state._job_length()
diff = largest - smallest
if num_tasks is not 0:
# Describes the initial values that the slider/text box
# values should be set to.
if event == INIT_EVENT:
if breakdown_opt.value == total_tasks_value:
num_tasks_box.value = -min(10000, num_tasks)
range_slider.value = (
int(100 - (100.0 * -num_tasks_box.value) / num_tasks),
100,
)
else:
low, high = map(lambda x: x / 100.0, range_slider.value)
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the start_box value.
elif event["owner"] == start_box:
if start_box.value > end_box.value:
start_box.value = end_box.value
elif start_box.value < 0:
start_box.value = 0
low, high = range_slider.value
range_slider.value = (int((start_box.value * 100.0) / diff), high)
# Event was triggered by a change in the end_box value.
elif event["owner"] == end_box:
if start_box.value > end_box.value:
end_box.value = start_box.value
elif end_box.value > diff:
end_box.value = diff
low, high = range_slider.value
range_slider.value = (low, int((end_box.value * 100.0) / diff))
# Event was triggered by a change in the breakdown options
# toggle.
elif event["owner"] == breakdown_opt:
if breakdown_opt.value == total_tasks_value:
start_box.disabled = True
end_box.disabled = True
num_tasks_box.disabled = False
total_time_box.layout.display = "none"
# Make CSS display go back to the default settings.
num_tasks_box.layout.display = None
num_tasks_box.value = min(10000, num_tasks)
range_slider.value = (
int(100 - (100.0 * num_tasks_box.value) / num_tasks),
100,
)
else:
start_box.disabled = False
end_box.disabled = False
num_tasks_box.disabled = True
# Make CSS display go back to the default settings.
total_time_box.layout.display = None
num_tasks_box.layout.display = "none"
range_slider.value = (
int((start_box.value * 100.0) / diff),
int((end_box.value * 100.0) / diff),
)
# Event was triggered by a change in the range_slider
# value.
elif event["owner"] == range_slider:
low, high = map(lambda x: x / 100.0, range_slider.value)
if breakdown_opt.value == total_tasks_value:
old_low, old_high = event["old"]
new_low, new_high = event["new"]
if old_low != new_low:
range_slider.value = (new_low, 100)
num_tasks_box.value = -(100.0 - new_low) / 100.0 * num_tasks
else:
range_slider.value = (0, new_high)
num_tasks_box.value = new_high / 100.0 * num_tasks
else:
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the num_tasks_box
# value.
elif event["owner"] == num_tasks_box:
if num_tasks_box.value > 0:
range_slider.value = (
0,
int(100 * float(num_tasks_box.value) / num_tasks),
)
elif num_tasks_box.value < 0:
range_slider.value = (
100 + int(100 * float(num_tasks_box.value) / num_tasks),
100,
)
if not update:
return
diff = largest - smallest
# Low and high are used to scale the times that are
# queried to be relative to the absolute time.
low, high = map(lambda x: x / 100.0, range_slider.value)
# Queries to task_profiles based on the slider and text
# box values.
# (Querying based on the % total amount of time.)
if breakdown_opt.value == total_time_value:
tasks = ray.global_state.task_profiles(
start=(smallest + diff * low), end=(smallest + diff * high)
)
# (Querying based on % of total number of tasks that were
# run.)
elif breakdown_opt.value == total_tasks_value:
if range_slider.value[0] == 0:
tasks = ray.global_state.task_profiles(
num_tasks=(int(num_tasks * high)), fwd=True
)
else:
tasks = ray.global_state.task_profiles(
num_tasks=(int(num_tasks * (high - low))), fwd=False
)
update(smallest, largest, num_tasks, tasks)
# Get updated values from a slider or text box, and update the rest of
# them accordingly.
range_slider.observe(update_wrapper, names="value")
breakdown_opt.observe(update_wrapper, names="value")
start_box.observe(update_wrapper, names="value")
end_box.observe(update_wrapper, names="value")
num_tasks_box.observe(update_wrapper, names="value")
# Initializes the sliders
update_wrapper(INIT_EVENT)
# Display sliders and search boxes
display(breakdown_opt, widgets.HBox([range_slider, total_time_box, num_tasks_box]))
# Return the sliders and text boxes
return start_box, end_box, range_slider, breakdown_opt
|
def get_sliders(update):
# Start_box value indicates the desired start point of queried window.
start_box = widgets.FloatText(
description="Start Time:",
disabled=True,
)
# End_box value indicates the desired end point of queried window.
end_box = widgets.FloatText(
description="End Time:",
disabled=True,
)
# Percentage slider. Indicates either % of total time or total tasks
# depending on what breakdown_opt is set to.
range_slider = widgets.IntRangeSlider(
value=[70, 100],
min=0,
max=100,
step=1,
description="%:",
continuous_update=False,
orientation="horizontal",
readout=True,
readout_format=".0i%",
)
# Indicates the number of tasks that the user wants to be returned. Is
# disabled when the breakdown_opt value is set to total_time_value.
num_tasks_box = widgets.IntText(description="Num Tasks:", disabled=False)
# Dropdown bar that lets the user choose between modifying % of total
# time or total number of tasks.
breakdown_opt = widgets.Dropdown(
options=[total_time_value, total_tasks_value],
value=total_tasks_value,
description="Selection Options:",
)
# Display box for layout.
total_time_box = widgets.VBox([start_box, end_box])
# This sets the CSS style display to hide the box.
total_time_box.layout.display = "none"
# Initially passed in to the update_wrapper function.
INIT_EVENT = "INIT"
# Create instance of context manager to determine whether callback is
# currently executing
out_recursion = _EventRecursionContextManager()
def update_wrapper(event):
# Feature received a callback, but it shouldn't be executed
# because the callback was the result of a different feature
# executing its callback based on user input.
if not out_recursion.should_recurse:
return
# Feature received a callback and it should be executed because
# the callback was the result of user input.
with out_recursion:
smallest, largest, num_tasks = ray.global_state._job_length()
diff = largest - smallest
if num_tasks is not 0:
# Describes the initial values that the slider/text box
# values should be set to.
if event == INIT_EVENT:
if breakdown_opt.value == total_tasks_value:
num_tasks_box.value = -min(10000, num_tasks)
range_slider.value = (
int(100 - (100.0 * -num_tasks_box.value) / num_tasks),
100,
)
else:
low, high = map(lambda x: x / 100.0, range_slider.value)
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the start_box value.
elif event["owner"] == start_box:
if start_box.value > end_box.value:
start_box.value = end_box.value
elif start_box.value < 0:
start_box.value = 0
low, high = range_slider.value
range_slider.value = (int((start_box.value * 100.0) / diff), high)
# Event was triggered by a change in the end_box value.
elif event["owner"] == end_box:
if start_box.value > end_box.value:
end_box.value = start_box.value
elif end_box.value > diff:
end_box.value = diff
low, high = range_slider.value
range_slider.value = (low, int((end_box.value * 100.0) / diff))
# Event was triggered by a change in the breakdown options
# toggle.
elif event["owner"] == breakdown_opt:
if breakdown_opt.value == total_tasks_value:
start_box.disabled = True
end_box.disabled = True
num_tasks_box.disabled = False
total_time_box.layout.display = "none"
# Make CSS display go back to the default settings.
num_tasks_box.layout.display = None
num_tasks_box.value = min(10000, num_tasks)
range_slider.value = (
int(100 - (100.0 * num_tasks_box.value) / num_tasks),
100,
)
else:
start_box.disabled = False
end_box.disabled = False
num_tasks_box.disabled = True
# Make CSS display go back to the default settings.
total_time_box.layout.display = None
num_tasks_box.layout.display = "none"
range_slider.value = (
int((start_box.value * 100.0) / diff),
int((end_box.value * 100.0) / diff),
)
# Event was triggered by a change in the range_slider
# value.
elif event["owner"] == range_slider:
low, high = map(lambda x: x / 100.0, range_slider.value)
if breakdown_opt.value == total_tasks_value:
old_low, old_high = event["old"]
new_low, new_high = event["new"]
if old_low != new_low:
range_slider.value = (new_low, 100)
num_tasks_box.value = -(100.0 - new_low) / 100.0 * num_tasks
else:
range_slider.value = (0, new_high)
num_tasks_box.value = new_high / 100.0 * num_tasks
else:
start_box.value = round(diff * low, 2)
end_box.value = round(diff * high, 2)
# Event was triggered by a change in the num_tasks_box
# value.
elif event["owner"] == num_tasks_box:
if num_tasks_box.value > 0:
range_slider.value = (
0,
int(100 * float(num_tasks_box.value) / num_tasks),
)
elif num_tasks_box.value < 0:
range_slider.value = (
100 + int(100 * float(num_tasks_box.value) / num_tasks),
100,
)
if not update:
return
diff = largest - smallest
# Low and high are used to scale the times that are
# queried to be relative to the absolute time.
low, high = map(lambda x: x / 100.0, range_slider.value)
# Queries to task_profiles based on the slider and text
# box values.
# (Querying based on the % total amount of time.)
if breakdown_opt.value == total_time_value:
tasks = ray.global_state.task_profiles(
start=(smallest + diff * low), end=(smallest + diff * high)
)
# (Querying based on % of total number of tasks that were
# run.)
elif breakdown_opt.value == total_tasks_value:
if range_slider.value[0] == 0:
tasks = ray.global_state.task_profiles(
num_tasks=(int(num_tasks * high)), fwd=True
)
else:
tasks = ray.global_state.task_profiles(
num_tasks=(int(num_tasks * (high - low))), fwd=False
)
update(smallest, largest, num_tasks, tasks)
# Get updated values from a slider or text box, and update the rest of
# them accordingly.
range_slider.observe(update_wrapper, names="value")
breakdown_opt.observe(update_wrapper, names="value")
start_box.observe(update_wrapper, names="value")
end_box.observe(update_wrapper, names="value")
num_tasks_box.observe(update_wrapper, names="value")
# Initializes the sliders
update_wrapper(INIT_EVENT)
# Display sliders and search boxes
display(breakdown_opt, widgets.HBox([range_slider, total_time_box, num_tasks_box]))
# Return the sliders and text boxes
return start_box, end_box, range_slider, breakdown_opt
|
https://github.com/ray-project/ray/issues/903
|
---------------------------------------------------------------------------
TraitError Traceback (most recent call last)
<ipython-input-4-bd7fdecbffea> in <module>()
----> 1 ui.task_timeline()
~/Workspace/ray/python/ray/experimental/ui.py in task_timeline()
376 label_options = widgets.Label(value='View options:',
377 layout=widgets.Layout(width='100px'))
--> 378 start_box, end_box, range_slider, time_opt = get_sliders(False)
379 display(widgets.HBox([label_tasks, task_dep, label_objects, obj_dep]))
380 display(widgets.HBox([label_options, breakdown_opt]))
~/Workspace/ray/python/ray/experimental/ui.py in get_sliders(update)
58 orientation="horizontal",
59 readout=True,
---> 60 readout_format=".0i%",
61 )
62
~/anaconda3/lib/python3.6/site-packages/ipywidgets/widgets/widget_int.py in __init__(self, *args, **kwargs)
245 kwargs['value'] = (0.75 * min + 0.25 * max,
246 0.25 * min + 0.75 * max)
--> 247 super(_BoundedIntRange, self).__init__(*args, **kwargs)
248
249 @validate('min', 'max')
~/anaconda3/lib/python3.6/site-packages/ipywidgets/widgets/widget_int.py in __init__(self, value, **kwargs)
73 if value is not None:
74 kwargs['value'] = value
---> 75 super(_Int, self).__init__(**kwargs)
76
77
~/anaconda3/lib/python3.6/site-packages/ipywidgets/widgets/widget.py in __init__(self, **kwargs)
409 """Public constructor"""
410 self._model_id = kwargs.pop('model_id', None)
--> 411 super(Widget, self).__init__(**kwargs)
412
413 Widget._call_widget_constructed(self)
~/anaconda3/lib/python3.6/site-packages/traitlets/traitlets.py in __init__(self, *args, **kwargs)
995 for key, value in kwargs.items():
996 if self.has_trait(key):
--> 997 setattr(self, key, value)
998 else:
999 # passthrough args that don't set traits to super
~/anaconda3/lib/python3.6/site-packages/traitlets/traitlets.py in __set__(self, obj, value)
583 raise TraitError('The "%s" trait is read-only.' % self.name)
584 else:
--> 585 self.set(obj, value)
586
587 def _validate(self, obj, value):
~/anaconda3/lib/python3.6/site-packages/traitlets/traitlets.py in set(self, obj, value)
557
558 def set(self, obj, value):
--> 559 new_value = self._validate(obj, value)
560 try:
561 old_value = obj._trait_values[self.name]
~/anaconda3/lib/python3.6/site-packages/traitlets/traitlets.py in _validate(self, obj, value)
589 return value
590 if hasattr(self, 'validate'):
--> 591 value = self.validate(obj, value)
592 if obj._cross_validation_lock is False:
593 value = self._cross_validate(obj, value)
~/anaconda3/lib/python3.6/site-packages/ipywidgets/widgets/trait_types.py in validate(self, obj, value)
170 re_match = _number_format_re.match(value)
171 if re_match is None:
--> 172 self.error(obj, value)
173 else:
174 format_type = re_match.group(9)
~/anaconda3/lib/python3.6/site-packages/traitlets/traitlets.py in error(self, obj, value)
623 e = "The '%s' trait must be %s, but a value of %r was specified." \
624 % (self.name, self.info(), repr_type(value))
--> 625 raise TraitError(e)
626
627 def get_metadata(self, key, default=None):
TraitError: The 'readout_format' trait of an IntRangeSlider instance must be a valid number format, but a value of '.0i%' <class 'str'> was specified.
|
TraitError
|
def dump_catapult_trace(
self, path, task_info, breakdowns=True, task_dep=True, obj_dep=True
):
"""Dump task profiling information to a file.
This information can be viewed as a timeline of profiling information
by going to chrome://tracing in the chrome web browser and loading the
appropriate file.
Args:
path: The filepath to dump the profiling information to.
task_info: The task info to use to generate the trace. Should be
the output of ray.global_state.task_profiles().
breakdowns: Boolean indicating whether to break down the tasks into
more fine-grained segments.
task_dep: Boolean indicating whether or not task submission edges
should be included in the trace.
obj_dep: Boolean indicating whether or not object dependency edges
should be included in the trace.
"""
workers = self.workers()
start_time = None
for info in task_info.values():
task_start = min(self._get_times(info))
if not start_time or task_start < start_time:
start_time = task_start
def micros(ts):
return int(1e6 * ts)
def micros_rel(ts):
return micros(ts - start_time)
task_profiles = self.task_profiles(start=0, end=time.time())
task_table = self.task_table()
seen_obj = {}
full_trace = []
for task_id, info in task_info.items():
worker = workers[info["worker_id"]]
task_t_info = task_table[task_id]
# The total_info dictionary is what is displayed when selecting a
# task in the timeline. We copy the task spec so that we don't
# modify it in place since we will use the original values later.
total_info = copy.copy(task_table[task_id]["TaskSpec"])
total_info["Args"] = [
oid.hex() if isinstance(oid, ray.local_scheduler.ObjectID) else oid
for oid in task_t_info["TaskSpec"]["Args"]
]
total_info["ReturnObjectIDs"] = [
oid.hex() for oid in task_t_info["TaskSpec"]["ReturnObjectIDs"]
]
total_info["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
total_info["get_arguments"] = (
info["get_arguments_end"] - info["get_arguments_start"]
)
total_info["execute"] = info["execute_end"] - info["execute_start"]
total_info["store_outputs"] = (
info["store_outputs_end"] - info["store_outputs_start"]
)
total_info["function_name"] = info["function_name"]
total_info["worker_id"] = info["worker_id"]
parent_info = task_info.get(task_table[task_id]["TaskSpec"]["ParentTaskID"])
worker = workers[info["worker_id"]]
# The catapult trace format documentation can be found here:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # noqa: E501
if breakdowns:
if "get_arguments_end" in info:
get_args_trace = {
"cat": "get_arguments",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"] + ":get_arguments",
"args": total_info,
"dur": micros(
info["get_arguments_end"] - info["get_arguments_start"]
),
"cname": "rail_idle",
}
full_trace.append(get_args_trace)
if "store_outputs_end" in info:
outputs_trace = {
"cat": "store_outputs",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["store_outputs_start"]),
"ph": "X",
"name": info["function_name"] + ":store_outputs",
"args": total_info,
"dur": micros(
info["store_outputs_end"] - info["store_outputs_start"]
),
"cname": "thread_state_runnable",
}
full_trace.append(outputs_trace)
if "execute_end" in info:
execute_trace = {
"cat": "execute",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["execute_start"]),
"ph": "X",
"name": info["function_name"] + ":execute",
"args": total_info,
"dur": micros(info["execute_end"] - info["execute_start"]),
"cname": "rail_animation",
}
full_trace.append(execute_trace)
else:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
task_profiles[task_table[task_id]["TaskSpec"]["ParentTaskID"]][
"get_arguments_start"
]
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
"cname": "olive",
}
full_trace.append(task_trace)
task = {
"cat": "task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"],
"args": total_info,
"dur": micros(info["store_outputs_end"] - info["get_arguments_start"]),
"cname": "thread_state_runnable",
}
full_trace.append(task)
if task_dep:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
task_profiles[task_table[task_id]["TaskSpec"]["ParentTaskID"]][
"get_arguments_start"
]
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
}
full_trace.append(task_trace)
if obj_dep:
args = task_table[task_id]["TaskSpec"]["Args"]
for arg in args:
# Don't visualize arguments that are not object IDs.
if isinstance(arg, ray.local_scheduler.ObjectID):
object_info = self._object_table(arg)
# Don't visualize objects that were created by calls to
# put.
if not object_info["IsPut"]:
if arg not in seen_obj:
seen_obj[arg] = 0
seen_obj[arg] += 1
owner_task = self._object_table(arg)["TaskID"]
owner_worker = workers[task_profiles[owner_task]["worker_id"]]
# Adding/subtracting 2 to the time associated with
# the beginning/ending of the flow event is
# necessary to make the flow events show up
# reliably. When these times are exact, this is
# presumably an edge case, and catapult doesn't
# recognize that there is a duration event at that
# exact point in time that the flow event should be
# bound to. This issue is solved by adding the 2 ms
# to the start/end time of the flow event, which
# guarantees overlap with the duration event that
# it's associated with, and the flow event
# therefore always gets drawn.
owner = {
"cat": "obj_dependency",
"pid": ("Node " + owner_worker["node_ip_address"]),
"tid": task_profiles[owner_task]["worker_id"],
"ts": micros_rel(
task_profiles[owner_task]["store_outputs_end"]
)
- 2,
"ph": "s",
"name": "ObjectDependency",
"args": {},
"bp": "e",
"cname": "cq_build_attempt_failed",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(owner)
dependent = {
"cat": "obj_dependency",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]) + 2,
"ph": "f",
"name": "ObjectDependency",
"args": {},
"cname": "cq_build_attempt_failed",
"bp": "e",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(dependent)
print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
with open(path, "w") as outfile:
json.dump(full_trace, outfile)
|
def dump_catapult_trace(
self, path, task_info, breakdowns=True, task_dep=True, obj_dep=True
):
"""Dump task profiling information to a file.
This information can be viewed as a timeline of profiling information
by going to chrome://tracing in the chrome web browser and loading the
appropriate file.
Args:
path: The filepath to dump the profiling information to.
task_info: The task info to use to generate the trace. Should be
the output of ray.global_state.task_profiles().
breakdowns: Boolean indicating whether to break down the tasks into
more fine-grained segments.
task_dep: Boolean indicating whether or not task submission edges
should be included in the trace.
obj_dep: Boolean indicating whether or not object dependency edges
should be included in the trace.
"""
workers = self.workers()
start_time = None
for info in task_info.values():
task_start = min(self._get_times(info))
if not start_time or task_start < start_time:
start_time = task_start
def micros(ts):
return int(1e6 * ts)
def micros_rel(ts):
return micros(ts - start_time)
task_profiles = self.task_profiles(start=0, end=time.time())
task_table = self.task_table()
seen_obj = {}
full_trace = []
for task_id, info in task_info.items():
# total_info is what is displayed when selecting a task in the
# timeline.
total_info = dict()
total_info["task_id"] = task_id
total_info["get_arguments"] = (
info["get_arguments_end"] - info["get_arguments_start"]
)
total_info["execute"] = info["execute_end"] - info["execute_start"]
total_info["store_outputs"] = (
info["store_outputs_end"] - info["store_outputs_start"]
)
total_info["function_name"] = info["function_name"]
total_info["worker_id"] = info["worker_id"]
worker = workers[info["worker_id"]]
task_t_info = task_table[task_id]
task_spec = task_table[task_id]["TaskSpec"]
task_spec["Args"] = [
oid.hex() if isinstance(oid, ray.local_scheduler.ObjectID) else oid
for oid in task_t_info["TaskSpec"]["Args"]
]
task_spec["ReturnObjectIDs"] = [
oid.hex() for oid in (task_t_info["TaskSpec"]["ReturnObjectIDs"])
]
task_spec["LocalSchedulerID"] = task_t_info["LocalSchedulerID"]
total_info = copy.copy(task_spec)
parent_info = task_info.get(task_table[task_id]["TaskSpec"]["ParentTaskID"])
worker = workers[info["worker_id"]]
# The catapult trace format documentation can be found here:
# https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview # NOQA
if breakdowns:
if "get_arguments_end" in info:
get_args_trace = {
"cat": "get_arguments",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"] + ":get_arguments",
"args": total_info,
"dur": micros(
info["get_arguments_end"] - info["get_arguments_start"]
),
"cname": "rail_idle",
}
full_trace.append(get_args_trace)
if "store_outputs_end" in info:
outputs_trace = {
"cat": "store_outputs",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["store_outputs_start"]),
"ph": "X",
"name": info["function_name"] + ":store_outputs",
"args": total_info,
"dur": micros(
info["store_outputs_end"] - info["store_outputs_start"]
),
"cname": "thread_state_runnable",
}
full_trace.append(outputs_trace)
if "execute_end" in info:
execute_trace = {
"cat": "execute",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["execute_start"]),
"ph": "X",
"name": info["function_name"] + ":execute",
"args": total_info,
"dur": micros(info["execute_end"] - info["execute_start"]),
"cname": "rail_animation",
}
full_trace.append(execute_trace)
else:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
task_profiles[task_table[task_id]["TaskSpec"]["ParentTaskID"]][
"get_arguments_start"
]
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
"cname": "olive",
}
full_trace.append(task_trace)
task = {
"cat": "task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"id": task_id,
"ts": micros_rel(info["get_arguments_start"]),
"ph": "X",
"name": info["function_name"],
"args": total_info,
"dur": micros(info["store_outputs_end"] - info["get_arguments_start"]),
"cname": "thread_state_runnable",
}
full_trace.append(task)
if task_dep:
if parent_info:
parent_worker = workers[parent_info["worker_id"]]
parent_times = self._get_times(parent_info)
parent = {
"cat": "submit_task",
"pid": "Node " + parent_worker["node_ip_address"],
"tid": parent_info["worker_id"],
"ts": micros_rel(
task_profiles[task_table[task_id]["TaskSpec"]["ParentTaskID"]][
"get_arguments_start"
]
),
"ph": "s",
"name": "SubmitTask",
"args": {},
"id": (parent_info["worker_id"] + str(micros(min(parent_times)))),
}
full_trace.append(parent)
task_trace = {
"cat": "submit_task",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]),
"ph": "f",
"name": "SubmitTask",
"args": {},
"id": (info["worker_id"] + str(micros(min(parent_times)))),
"bp": "e",
}
full_trace.append(task_trace)
if obj_dep:
args = task_table[task_id]["TaskSpec"]["Args"]
for arg in args:
if isinstance(arg, ray.local_scheduler.ObjectID):
continue
object_info = self._object_table(arg)
if object_info["IsPut"]:
continue
if arg not in seen_obj:
seen_obj[arg] = 0
seen_obj[arg] += 1
owner_task = self._object_table(arg)["TaskID"]
owner_worker = workers[task_profiles[owner_task]["worker_id"]]
# Adding/subtracting 2 to the time associated with the
# beginning/ending of the flow event is necessary to
# make the flow events show up reliably. When these times
# are exact, this is presumably an edge case, and catapult
# doesn't recognize that there is a duration event at that
# exact point in time that the flow event should be bound
# to. This issue is solved by adding the 2 ms to the
# start/end time of the flow event, which guarantees
# overlap with the duration event that it's associated
# with, and the flow event therefore always gets drawn.
owner = {
"cat": "obj_dependency",
"pid": "Node " + owner_worker["node_ip_address"],
"tid": task_profiles[owner_task]["worker_id"],
"ts": micros_rel(task_profiles[owner_task]["store_outputs_end"])
- 2,
"ph": "s",
"name": "ObjectDependency",
"args": {},
"bp": "e",
"cname": "cq_build_attempt_failed",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(owner)
dependent = {
"cat": "obj_dependency",
"pid": "Node " + worker["node_ip_address"],
"tid": info["worker_id"],
"ts": micros_rel(info["get_arguments_start"]) + 2,
"ph": "f",
"name": "ObjectDependency",
"args": {},
"cname": "cq_build_attempt_failed",
"bp": "e",
"id": "obj" + str(arg) + str(seen_obj[arg]),
}
full_trace.append(dependent)
print("Creating JSON {}/{}".format(len(full_trace), len(task_info)))
with open(path, "w") as outfile:
json.dump(full_trace, outfile)
|
https://github.com/ray-project/ray/issues/835
|
1 tasks to trace
Dumping task profiling data to /var/folders/15/54jf68993rd7753c5fms424r0000gn/T/tmpnsl3ltkw.json
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/Workspace/ray/python/ray/experimental/ui.py in handle_submit(sender)
402 breakdowns=breakdown,
403 obj_dep=obj_dep.value,
--> 404 task_dep=task_dep.value)
405
406 print("Opening html file in browser...")
~/Workspace/ray/python/ray/experimental/state.py in dump_catapult_trace(self, path, task_info, breakdowns, task_dep, obj_dep)
738 if isinstance(arg, ray.local_scheduler.ObjectID):
739 continue
--> 740 object_info = self._object_table(arg)
741 if object_info["IsPut"]:
742 continue
~/Workspace/ray/python/ray/experimental/state.py in _object_table(self, object_id)
176 # Allow the argument to be either an ObjectID or a hex string.
177 if not isinstance(object_id, ray.local_scheduler.ObjectID):
--> 178 object_id = ray.local_scheduler.ObjectID(hex_to_binary(object_id))
179
180 # Return information about a single object ID.
~/Workspace/ray/python/ray/utils.py in hex_to_binary(hex_identifier)
59
60 def hex_to_binary(hex_identifier):
---> 61 return binascii.unhexlify(hex_identifier)
62
63
TypeError: argument should be bytes, buffer or ASCII string, not 'int'
|
TypeError
|
def start_objstore(
node_ip_address,
redis_address,
object_manager_port=None,
store_stdout_file=None,
store_stderr_file=None,
manager_stdout_file=None,
manager_stderr_file=None,
objstore_memory=None,
cleanup=True,
):
"""This method starts an object store process.
Args:
node_ip_address (str): The IP address of the node running the object
store.
redis_address (str): The address of the Redis instance to connect to.
object_manager_port (int): The port to use for the object manager. If
this is not provided, one will be generated randomly.
store_stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
store_stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
manager_stdout_file: A file handle opened for writing to redirect
stdout to. If no redirection should happen, then this should be
None.
manager_stderr_file: A file handle opened for writing to redirect
stderr to. If no redirection should happen, then this should be
None.
objstore_memory: The amount of memory (in bytes) to start the object
store with.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
Return:
A tuple of the Plasma store socket name, the Plasma manager socket
name, and the plasma manager port.
"""
if objstore_memory is None:
# Compute a fraction of the system memory for the Plasma store to use.
system_memory = psutil.virtual_memory().total
if sys.platform == "linux" or sys.platform == "linux2":
# On linux we use /dev/shm, its size is half the size of the
# physical memory. To not overflow it, we set the plasma memory
# limit to 0.4 times the size of the physical memory.
objstore_memory = int(system_memory * 0.4)
# Compare the requested memory size to the memory available in
# /dev/shm.
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
if objstore_memory > shm_avail:
print(
"Warning: Reducing object store memory because "
"/dev/shm has only {} bytes available. You may be "
"able to free up space by deleting files in "
"/dev/shm. If you are inside a Docker container, "
"you may need to pass an argument with the flag "
"'--shm-size' to 'docker run'.".format(shm_avail)
)
objstore_memory = int(shm_avail * 0.8)
finally:
os.close(shm_fd)
else:
objstore_memory = int(system_memory * 0.8)
# Start the Plasma store.
plasma_store_name, p1 = ray.plasma.start_plasma_store(
plasma_store_memory=objstore_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=store_stdout_file,
stderr_file=store_stderr_file,
)
# Start the plasma manager.
if object_manager_port is not None:
(plasma_manager_name, p2, plasma_manager_port) = (
ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
plasma_manager_port=object_manager_port,
node_ip_address=node_ip_address,
num_retries=1,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file,
)
)
assert plasma_manager_port == object_manager_port
else:
(plasma_manager_name, p2, plasma_manager_port) = (
ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
node_ip_address=node_ip_address,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file,
)
)
if cleanup:
all_processes[PROCESS_TYPE_PLASMA_STORE].append(p1)
all_processes[PROCESS_TYPE_PLASMA_MANAGER].append(p2)
record_log_files_in_redis(
redis_address,
node_ip_address,
[
store_stdout_file,
store_stderr_file,
manager_stdout_file,
manager_stderr_file,
],
)
return ObjectStoreAddress(
plasma_store_name, plasma_manager_name, plasma_manager_port
)
|
def start_objstore(
node_ip_address,
redis_address,
object_manager_port=None,
store_stdout_file=None,
store_stderr_file=None,
manager_stdout_file=None,
manager_stderr_file=None,
cleanup=True,
objstore_memory=None,
):
"""This method starts an object store process.
Args:
node_ip_address (str): The IP address of the node running the object
store.
redis_address (str): The address of the Redis instance to connect to.
object_manager_port (int): The port to use for the object manager. If
this is not provided, one will be generated randomly.
store_stdout_file: A file handle opened for writing to redirect stdout
to. If no redirection should happen, then this should be None.
store_stderr_file: A file handle opened for writing to redirect stderr
to. If no redirection should happen, then this should be None.
manager_stdout_file: A file handle opened for writing to redirect
stdout to. If no redirection should happen, then this should be
None.
manager_stderr_file: A file handle opened for writing to redirect
stderr to. If no redirection should happen, then this should be
None.
cleanup (bool): True if using Ray in local mode. If cleanup is true,
then this process will be killed by serices.cleanup() when the
Python process that imported services exits.
objstore_memory: The amount of memory (in bytes) to start the object
store with.
Return:
A tuple of the Plasma store socket name, the Plasma manager socket
name, and the plasma manager port.
"""
if objstore_memory is None:
# Compute a fraction of the system memory for the Plasma store to use.
system_memory = psutil.virtual_memory().total
if sys.platform == "linux" or sys.platform == "linux2":
# On linux we use /dev/shm, its size is half the size of the
# physical memory. To not overflow it, we set the plasma memory
# limit to 0.4 times the size of the physical memory.
objstore_memory = int(system_memory * 0.4)
# Compare the requested memory size to the memory available in
# /dev/shm.
shm_fd = os.open("/dev/shm", os.O_RDONLY)
try:
shm_fs_stats = os.fstatvfs(shm_fd)
# The value shm_fs_stats.f_bsize is the block size and the
# value shm_fs_stats.f_bavail is the number of available
# blocks.
shm_avail = shm_fs_stats.f_bsize * shm_fs_stats.f_bavail
if objstore_memory > shm_avail:
print(
"Warning: Reducing object store memory because "
"/dev/shm has only {} bytes available. You may be "
"able to free up space by deleting files in "
"/dev/shm. If you are inside a Docker container, "
"you may need to pass an argument with the flag "
"'--shm-size' to 'docker run'.".format(shm_avail)
)
objstore_memory = int(shm_avail * 0.8)
finally:
os.close(shm_fd)
else:
objstore_memory = int(system_memory * 0.8)
# Start the Plasma store.
plasma_store_name, p1 = ray.plasma.start_plasma_store(
plasma_store_memory=objstore_memory,
use_profiler=RUN_PLASMA_STORE_PROFILER,
stdout_file=store_stdout_file,
stderr_file=store_stderr_file,
)
# Start the plasma manager.
if object_manager_port is not None:
(plasma_manager_name, p2, plasma_manager_port) = (
ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
plasma_manager_port=object_manager_port,
node_ip_address=node_ip_address,
num_retries=1,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file,
)
)
assert plasma_manager_port == object_manager_port
else:
(plasma_manager_name, p2, plasma_manager_port) = (
ray.plasma.start_plasma_manager(
plasma_store_name,
redis_address,
node_ip_address=node_ip_address,
run_profiler=RUN_PLASMA_MANAGER_PROFILER,
stdout_file=manager_stdout_file,
stderr_file=manager_stderr_file,
)
)
if cleanup:
all_processes[PROCESS_TYPE_PLASMA_STORE].append(p1)
all_processes[PROCESS_TYPE_PLASMA_MANAGER].append(p2)
record_log_files_in_redis(
redis_address,
node_ip_address,
[
store_stdout_file,
store_stderr_file,
manager_stdout_file,
manager_stderr_file,
],
)
return ObjectStoreAddress(
plasma_store_name, plasma_manager_name, plasma_manager_port
)
|
https://github.com/ray-project/ray/issues/787
|
======================================================================
FAIL: testPutErrors (__main__.ReconstructionTestsMultinode)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/stress_tests.py", line 505, in testPutErrors
errors = self.wait_for_errors(error_check)
File "test/stress_tests.py", line 362, in wait_for_errors
self.assertTrue(error_check(errors))
AssertionError: False is not true
|
AssertionError
|
def start_ray_processes(
address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
num_workers=None,
num_local_schedulers=1,
object_store_memory=None,
num_redis_shards=1,
worker_path=None,
cleanup=True,
redirect_output=False,
include_global_scheduler=False,
include_log_monitor=False,
include_webui=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
):
"""Helper method to start Ray processes.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are num_local_schedulers existing instances of
each, including ones already registered with the given
address_info.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_output (bool): True if stdout and stderr should be redirected
to a file.
include_global_scheduler (bool): If include_global_scheduler is True,
then start a global scheduler process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
include_webui (bool): If True, then attempt to start the web UI. Note
that this is only possible with Python 3.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
num_cpus: A list of length num_local_schedulers containing the number
of CPUs each local scheduler should be configured with.
num_gpus: A list of length num_local_schedulers containing the number
of GPUs each local scheduler should be configured with.
Returns:
A dictionary of the address information for the processes that were
started.
"""
if not isinstance(num_cpus, list):
num_cpus = num_local_schedulers * [num_cpus]
if not isinstance(num_gpus, list):
num_gpus = num_local_schedulers * [num_gpus]
assert len(num_cpus) == num_local_schedulers
assert len(num_gpus) == num_local_schedulers
if num_workers is not None:
workers_per_local_scheduler = num_local_schedulers * [num_workers]
else:
workers_per_local_scheduler = []
for cpus in num_cpus:
workers_per_local_scheduler.append(
cpus if cpus is not None else psutil.cpu_count()
)
if address_info is None:
address_info = {}
address_info["node_ip_address"] = node_ip_address
if worker_path is None:
worker_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "workers/default_worker.py"
)
# Start Redis if there isn't already an instance running. TODO(rkn): We are
# suppressing the output of Redis because on Linux it prints a bunch of
# warning messages when it starts up. Instead of suppressing the output, we
# should address the warnings.
redis_address = address_info.get("redis_address")
redis_shards = address_info.get("redis_shards", [])
if redis_address is None:
redis_address, redis_shards = start_redis(
node_ip_address,
port=redis_port,
num_redis_shards=num_redis_shards,
redirect_output=redirect_output,
cleanup=cleanup,
)
address_info["redis_address"] = redis_address
time.sleep(0.1)
# Start monitoring the processes.
monitor_stdout_file, monitor_stderr_file = new_log_files(
"monitor", redirect_output
)
start_monitor(
redis_address,
node_ip_address,
stdout_file=monitor_stdout_file,
stderr_file=monitor_stderr_file,
)
if redis_shards == []:
# Get redis shards from primary redis instance.
redis_ip_address, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port)
redis_shards = redis_client.lrange("RedisShards", start=0, end=-1)
redis_shards = [shard.decode("ascii") for shard in redis_shards]
address_info["redis_shards"] = redis_shards
# Start the log monitor, if necessary.
if include_log_monitor:
log_monitor_stdout_file, log_monitor_stderr_file = new_log_files(
"log_monitor", redirect_output=True
)
start_log_monitor(
redis_address,
node_ip_address,
stdout_file=log_monitor_stdout_file,
stderr_file=log_monitor_stderr_file,
cleanup=cleanup,
)
# Start the global scheduler, if necessary.
if include_global_scheduler:
global_scheduler_stdout_file, global_scheduler_stderr_file = new_log_files(
"global_scheduler", redirect_output
)
start_global_scheduler(
redis_address,
node_ip_address,
stdout_file=global_scheduler_stdout_file,
stderr_file=global_scheduler_stderr_file,
cleanup=cleanup,
)
# Initialize with existing services.
if "object_store_addresses" not in address_info:
address_info["object_store_addresses"] = []
object_store_addresses = address_info["object_store_addresses"]
if "local_scheduler_socket_names" not in address_info:
address_info["local_scheduler_socket_names"] = []
local_scheduler_socket_names = address_info["local_scheduler_socket_names"]
# Get the ports to use for the object managers if any are provided.
object_manager_ports = (
address_info["object_manager_ports"]
if "object_manager_ports" in address_info
else None
)
if not isinstance(object_manager_ports, list):
object_manager_ports = num_local_schedulers * [object_manager_ports]
assert len(object_manager_ports) == num_local_schedulers
# Start any object stores that do not yet exist.
for i in range(num_local_schedulers - len(object_store_addresses)):
# Start Plasma.
plasma_store_stdout_file, plasma_store_stderr_file = new_log_files(
"plasma_store_{}".format(i), redirect_output
)
plasma_manager_stdout_file, plasma_manager_stderr_file = new_log_files(
"plasma_manager_{}".format(i), redirect_output
)
object_store_address = start_objstore(
node_ip_address,
redis_address,
object_manager_port=object_manager_ports[i],
store_stdout_file=plasma_store_stdout_file,
store_stderr_file=plasma_store_stderr_file,
manager_stdout_file=plasma_manager_stdout_file,
manager_stderr_file=plasma_manager_stderr_file,
objstore_memory=object_store_memory,
cleanup=cleanup,
)
object_store_addresses.append(object_store_address)
time.sleep(0.1)
# Start any local schedulers that do not yet exist.
for i in range(len(local_scheduler_socket_names), num_local_schedulers):
# Connect the local scheduler to the object store at the same index.
object_store_address = object_store_addresses[i]
plasma_address = "{}:{}".format(
node_ip_address, object_store_address.manager_port
)
# Determine how many workers this local scheduler should start.
if start_workers_from_local_scheduler:
num_local_scheduler_workers = workers_per_local_scheduler[i]
workers_per_local_scheduler[i] = 0
else:
# If we're starting the workers from Python, the local scheduler
# should not start any workers.
num_local_scheduler_workers = 0
# Start the local scheduler.
local_scheduler_stdout_file, local_scheduler_stderr_file = new_log_files(
"local_scheduler_{}".format(i), redirect_output
)
local_scheduler_name = start_local_scheduler(
redis_address,
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
worker_path,
plasma_address=plasma_address,
stdout_file=local_scheduler_stdout_file,
stderr_file=local_scheduler_stderr_file,
cleanup=cleanup,
num_cpus=num_cpus[i],
num_gpus=num_gpus[i],
num_workers=num_local_scheduler_workers,
)
local_scheduler_socket_names.append(local_scheduler_name)
time.sleep(0.1)
# Make sure that we have exactly num_local_schedulers instances of object
# stores and local schedulers.
assert len(object_store_addresses) == num_local_schedulers
assert len(local_scheduler_socket_names) == num_local_schedulers
# Start any workers that the local scheduler has not already started.
for i, num_local_scheduler_workers in enumerate(workers_per_local_scheduler):
object_store_address = object_store_addresses[i]
local_scheduler_name = local_scheduler_socket_names[i]
for j in range(num_local_scheduler_workers):
worker_stdout_file, worker_stderr_file = new_log_files(
"worker_{}_{}".format(i, j), redirect_output
)
start_worker(
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
local_scheduler_name,
redis_address,
worker_path,
stdout_file=worker_stdout_file,
stderr_file=worker_stderr_file,
cleanup=cleanup,
)
workers_per_local_scheduler[i] -= 1
# Make sure that we've started all the workers.
assert sum(workers_per_local_scheduler) == 0
# Try to start the web UI.
if include_webui:
ui_stdout_file, ui_stderr_file = new_log_files("webui", redirect_output=True)
start_ui(
redis_address,
stdout_file=ui_stdout_file,
stderr_file=ui_stderr_file,
cleanup=cleanup,
)
# Return the addresses of the relevant processes.
return address_info
|
def start_ray_processes(
address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
num_workers=None,
num_local_schedulers=1,
num_redis_shards=1,
worker_path=None,
cleanup=True,
redirect_output=False,
include_global_scheduler=False,
include_log_monitor=False,
include_webui=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
):
"""Helper method to start Ray processes.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are num_local_schedulers existing instances of
each, including ones already registered with the given
address_info.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_output (bool): True if stdout and stderr should be redirected
to a file.
include_global_scheduler (bool): If include_global_scheduler is True,
then start a global scheduler process.
include_log_monitor (bool): If True, then start a log monitor to
monitor the log files for all processes on this node and push their
contents to Redis.
include_webui (bool): If True, then attempt to start the web UI. Note
that this is only possible with Python 3.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
num_cpus: A list of length num_local_schedulers containing the number
of CPUs each local scheduler should be configured with.
num_gpus: A list of length num_local_schedulers containing the number
of GPUs each local scheduler should be configured with.
Returns:
A dictionary of the address information for the processes that were
started.
"""
if not isinstance(num_cpus, list):
num_cpus = num_local_schedulers * [num_cpus]
if not isinstance(num_gpus, list):
num_gpus = num_local_schedulers * [num_gpus]
assert len(num_cpus) == num_local_schedulers
assert len(num_gpus) == num_local_schedulers
if num_workers is not None:
workers_per_local_scheduler = num_local_schedulers * [num_workers]
else:
workers_per_local_scheduler = []
for cpus in num_cpus:
workers_per_local_scheduler.append(
cpus if cpus is not None else psutil.cpu_count()
)
if address_info is None:
address_info = {}
address_info["node_ip_address"] = node_ip_address
if worker_path is None:
worker_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "workers/default_worker.py"
)
# Start Redis if there isn't already an instance running. TODO(rkn): We are
# suppressing the output of Redis because on Linux it prints a bunch of
# warning messages when it starts up. Instead of suppressing the output, we
# should address the warnings.
redis_address = address_info.get("redis_address")
redis_shards = address_info.get("redis_shards", [])
if redis_address is None:
redis_address, redis_shards = start_redis(
node_ip_address,
port=redis_port,
num_redis_shards=num_redis_shards,
redirect_output=redirect_output,
cleanup=cleanup,
)
address_info["redis_address"] = redis_address
time.sleep(0.1)
# Start monitoring the processes.
monitor_stdout_file, monitor_stderr_file = new_log_files(
"monitor", redirect_output
)
start_monitor(
redis_address,
node_ip_address,
stdout_file=monitor_stdout_file,
stderr_file=monitor_stderr_file,
)
if redis_shards == []:
# Get redis shards from primary redis instance.
redis_ip_address, redis_port = redis_address.split(":")
redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port)
redis_shards = redis_client.lrange("RedisShards", start=0, end=-1)
redis_shards = [shard.decode("ascii") for shard in redis_shards]
address_info["redis_shards"] = redis_shards
# Start the log monitor, if necessary.
if include_log_monitor:
log_monitor_stdout_file, log_monitor_stderr_file = new_log_files(
"log_monitor", redirect_output=True
)
start_log_monitor(
redis_address,
node_ip_address,
stdout_file=log_monitor_stdout_file,
stderr_file=log_monitor_stderr_file,
cleanup=cleanup,
)
# Start the global scheduler, if necessary.
if include_global_scheduler:
global_scheduler_stdout_file, global_scheduler_stderr_file = new_log_files(
"global_scheduler", redirect_output
)
start_global_scheduler(
redis_address,
node_ip_address,
stdout_file=global_scheduler_stdout_file,
stderr_file=global_scheduler_stderr_file,
cleanup=cleanup,
)
# Initialize with existing services.
if "object_store_addresses" not in address_info:
address_info["object_store_addresses"] = []
object_store_addresses = address_info["object_store_addresses"]
if "local_scheduler_socket_names" not in address_info:
address_info["local_scheduler_socket_names"] = []
local_scheduler_socket_names = address_info["local_scheduler_socket_names"]
# Get the ports to use for the object managers if any are provided.
object_manager_ports = (
address_info["object_manager_ports"]
if "object_manager_ports" in address_info
else None
)
if not isinstance(object_manager_ports, list):
object_manager_ports = num_local_schedulers * [object_manager_ports]
assert len(object_manager_ports) == num_local_schedulers
# Start any object stores that do not yet exist.
for i in range(num_local_schedulers - len(object_store_addresses)):
# Start Plasma.
plasma_store_stdout_file, plasma_store_stderr_file = new_log_files(
"plasma_store_{}".format(i), redirect_output
)
plasma_manager_stdout_file, plasma_manager_stderr_file = new_log_files(
"plasma_manager_{}".format(i), redirect_output
)
object_store_address = start_objstore(
node_ip_address,
redis_address,
object_manager_port=object_manager_ports[i],
store_stdout_file=plasma_store_stdout_file,
store_stderr_file=plasma_store_stderr_file,
manager_stdout_file=plasma_manager_stdout_file,
manager_stderr_file=plasma_manager_stderr_file,
cleanup=cleanup,
)
object_store_addresses.append(object_store_address)
time.sleep(0.1)
# Start any local schedulers that do not yet exist.
for i in range(len(local_scheduler_socket_names), num_local_schedulers):
# Connect the local scheduler to the object store at the same index.
object_store_address = object_store_addresses[i]
plasma_address = "{}:{}".format(
node_ip_address, object_store_address.manager_port
)
# Determine how many workers this local scheduler should start.
if start_workers_from_local_scheduler:
num_local_scheduler_workers = workers_per_local_scheduler[i]
workers_per_local_scheduler[i] = 0
else:
# If we're starting the workers from Python, the local scheduler
# should not start any workers.
num_local_scheduler_workers = 0
# Start the local scheduler.
local_scheduler_stdout_file, local_scheduler_stderr_file = new_log_files(
"local_scheduler_{}".format(i), redirect_output
)
local_scheduler_name = start_local_scheduler(
redis_address,
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
worker_path,
plasma_address=plasma_address,
stdout_file=local_scheduler_stdout_file,
stderr_file=local_scheduler_stderr_file,
cleanup=cleanup,
num_cpus=num_cpus[i],
num_gpus=num_gpus[i],
num_workers=num_local_scheduler_workers,
)
local_scheduler_socket_names.append(local_scheduler_name)
time.sleep(0.1)
# Make sure that we have exactly num_local_schedulers instances of object
# stores and local schedulers.
assert len(object_store_addresses) == num_local_schedulers
assert len(local_scheduler_socket_names) == num_local_schedulers
# Start any workers that the local scheduler has not already started.
for i, num_local_scheduler_workers in enumerate(workers_per_local_scheduler):
object_store_address = object_store_addresses[i]
local_scheduler_name = local_scheduler_socket_names[i]
for j in range(num_local_scheduler_workers):
worker_stdout_file, worker_stderr_file = new_log_files(
"worker_{}_{}".format(i, j), redirect_output
)
start_worker(
node_ip_address,
object_store_address.name,
object_store_address.manager_name,
local_scheduler_name,
redis_address,
worker_path,
stdout_file=worker_stdout_file,
stderr_file=worker_stderr_file,
cleanup=cleanup,
)
workers_per_local_scheduler[i] -= 1
# Make sure that we've started all the workers.
assert sum(workers_per_local_scheduler) == 0
# Try to start the web UI.
if include_webui:
ui_stdout_file, ui_stderr_file = new_log_files("webui", redirect_output=True)
start_ui(
redis_address,
stdout_file=ui_stdout_file,
stderr_file=ui_stderr_file,
cleanup=cleanup,
)
# Return the addresses of the relevant processes.
return address_info
|
https://github.com/ray-project/ray/issues/787
|
======================================================================
FAIL: testPutErrors (__main__.ReconstructionTestsMultinode)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/stress_tests.py", line 505, in testPutErrors
errors = self.wait_for_errors(error_check)
File "test/stress_tests.py", line 362, in wait_for_errors
self.assertTrue(error_check(errors))
AssertionError: False is not true
|
AssertionError
|
def start_ray_head(
address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
num_workers=0,
num_local_schedulers=1,
object_store_memory=None,
worker_path=None,
cleanup=True,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
num_redis_shards=None,
):
"""Start Ray in local mode.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are at least num_local_schedulers existing
instances of each, including ones already registered with the given
address_info.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_output (bool): True if stdout and stderr should be redirected
to a file.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
num_cpus (int): number of cpus to configure the local scheduler with.
num_gpus (int): number of gpus to configure the local scheduler with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
Returns:
A dictionary of the address information for the processes that were
started.
"""
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
return start_ray_processes(
address_info=address_info,
node_ip_address=node_ip_address,
redis_port=redis_port,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
worker_path=worker_path,
cleanup=cleanup,
redirect_output=redirect_output,
include_global_scheduler=True,
include_log_monitor=True,
include_webui=True,
start_workers_from_local_scheduler=start_workers_from_local_scheduler,
num_cpus=num_cpus,
num_gpus=num_gpus,
num_redis_shards=num_redis_shards,
)
|
def start_ray_head(
address_info=None,
node_ip_address="127.0.0.1",
redis_port=None,
num_workers=0,
num_local_schedulers=1,
worker_path=None,
cleanup=True,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
num_redis_shards=None,
):
"""Start Ray in local mode.
Args:
address_info (dict): A dictionary with address information for
processes that have already been started. If provided, address_info
will be modified to include processes that are newly started.
node_ip_address (str): The IP address of this node.
redis_port (int): The port that the primary Redis shard should listen
to. If None, then a random port will be chosen. If the key
"redis_address" is in address_info, then this argument will be
ignored.
num_workers (int): The number of workers to start.
num_local_schedulers (int): The total number of local schedulers
required. This is also the total number of object stores required.
This method will start new instances of local schedulers and object
stores until there are at least num_local_schedulers existing
instances of each, including ones already registered with the given
address_info.
worker_path (str): The path of the source code that will be run by the
worker.
cleanup (bool): If cleanup is true, then the processes started here
will be killed by services.cleanup() when the Python process that
called this method exits.
redirect_output (bool): True if stdout and stderr should be redirected
to a file.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python.
num_cpus (int): number of cpus to configure the local scheduler with.
num_gpus (int): number of gpus to configure the local scheduler with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
Returns:
A dictionary of the address information for the processes that were
started.
"""
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
return start_ray_processes(
address_info=address_info,
node_ip_address=node_ip_address,
redis_port=redis_port,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
worker_path=worker_path,
cleanup=cleanup,
redirect_output=redirect_output,
include_global_scheduler=True,
include_log_monitor=True,
include_webui=True,
start_workers_from_local_scheduler=start_workers_from_local_scheduler,
num_cpus=num_cpus,
num_gpus=num_gpus,
num_redis_shards=num_redis_shards,
)
|
https://github.com/ray-project/ray/issues/787
|
======================================================================
FAIL: testPutErrors (__main__.ReconstructionTestsMultinode)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/stress_tests.py", line 505, in testPutErrors
errors = self.wait_for_errors(error_check)
File "test/stress_tests.py", line 362, in wait_for_errors
self.assertTrue(error_check(errors))
AssertionError: False is not true
|
AssertionError
|
def _init(
address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
object_store_memory=None,
driver_mode=SCRIPT_MODE,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
num_redis_shards=None,
):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_output (bool): True if stdout and stderr for all the processes
should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus: A list containing the number of CPUs the local schedulers
should be configured with.
num_gpus: A list containing the number of GPUs the local schedulers
should be configured with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:
raise Exception(
"Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.PYTHON_MODE, ray.SILENT_MODE]."
)
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == PYTHON_MODE:
# If starting Ray in PYTHON_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
# Use the address 127.0.0.1 in local mode.
node_ip_address = "127.0.0.1" if node_ip_address is None else node_ip_address
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Start the scheduler, object store, and some workers. These will be
# killed by the call to cleanup(), which happens when the Python script
# exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
object_store_memory=object_store_memory,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(start_workers_from_local_scheduler),
num_cpus=num_cpus,
num_gpus=num_gpus,
num_redis_shards=num_redis_shards,
)
else:
if redis_address is None:
raise Exception(
"When connecting to an existing cluster, "
"redis_address must be provided."
)
if num_workers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_workers must not be provided."
)
if num_local_schedulers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_local_schedulers must not be provided."
)
if num_cpus is not None or num_gpus is not None:
raise Exception(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if num_redis_shards is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_redis_shards must not be provided."
)
if object_store_memory is not None:
raise Exception(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(redis_address, node_ip_address)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to cleanup()
# when the Python script exits.
if driver_mode == PYTHON_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (address_info["object_store_addresses"][0].name),
"manager_socket_name": (
address_info["object_store_addresses"][0].manager_name
),
"local_scheduler_socket_name": (
address_info["local_scheduler_socket_names"][0]
),
}
connect(
driver_address_info,
object_id_seed=object_id_seed,
mode=driver_mode,
worker=global_worker,
actor_id=NIL_ACTOR_ID,
)
return address_info
|
def _init(
address_info=None,
start_ray_local=False,
object_id_seed=None,
num_workers=None,
num_local_schedulers=None,
driver_mode=SCRIPT_MODE,
redirect_output=False,
start_workers_from_local_scheduler=True,
num_cpus=None,
num_gpus=None,
num_redis_shards=None,
):
"""Helper method to connect to an existing Ray cluster or start a new one.
This method handles two cases. Either a Ray cluster already exists and we
just attach this driver to it, or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Args:
address_info (dict): A dictionary with address information for
processes in a partially-started Ray cluster. If
start_ray_local=True, any processes not in this dictionary will be
started. If provided, an updated address_info dictionary will be
returned to include processes that are newly started.
start_ray_local (bool): If True then this will start any processes not
already in address_info, including Redis, a global scheduler, local
scheduler(s), object store(s), and worker(s). It will also kill
these processes when Python exits. If False, this will attach to an
existing Ray cluster.
object_id_seed (int): Used to seed the deterministic generation of
object IDs. The same value can be used across multiple runs of the
same job in order to generate the object IDs in a consistent
manner. However, the same ID should not be used for different jobs.
num_workers (int): The number of workers to start. This is only
provided if start_ray_local is True.
num_local_schedulers (int): The number of local schedulers to start.
This is only provided if start_ray_local is True.
driver_mode (bool): The mode in which to start the driver. This should
be one of ray.SCRIPT_MODE, ray.PYTHON_MODE, and ray.SILENT_MODE.
redirect_output (bool): True if stdout and stderr for all the processes
should be redirected to files and false otherwise.
start_workers_from_local_scheduler (bool): If this flag is True, then
start the initial workers from the local scheduler. Else, start
them from Python. The latter case is for debugging purposes only.
num_cpus: A list containing the number of CPUs the local schedulers
should be configured with.
num_gpus: A list containing the number of GPUs the local schedulers
should be configured with.
num_redis_shards: The number of Redis shards to start in addition to
the primary Redis shard.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
check_main_thread()
if driver_mode not in [SCRIPT_MODE, PYTHON_MODE, SILENT_MODE]:
raise Exception(
"Driver_mode must be in [ray.SCRIPT_MODE, "
"ray.PYTHON_MODE, ray.SILENT_MODE]."
)
# Get addresses of existing services.
if address_info is None:
address_info = {}
else:
assert isinstance(address_info, dict)
node_ip_address = address_info.get("node_ip_address")
redis_address = address_info.get("redis_address")
# Start any services that do not yet exist.
if driver_mode == PYTHON_MODE:
# If starting Ray in PYTHON_MODE, don't start any other processes.
pass
elif start_ray_local:
# In this case, we launch a scheduler, a new object store, and some
# workers, and we connect to them. We do not launch any processes that
# are already registered in address_info.
# Use the address 127.0.0.1 in local mode.
node_ip_address = "127.0.0.1" if node_ip_address is None else node_ip_address
# Use 1 local scheduler if num_local_schedulers is not provided. If
# existing local schedulers are provided, use that count as
# num_local_schedulers.
local_schedulers = address_info.get("local_scheduler_socket_names", [])
if num_local_schedulers is None:
if len(local_schedulers) > 0:
num_local_schedulers = len(local_schedulers)
else:
num_local_schedulers = 1
# Use 1 additional redis shard if num_redis_shards is not provided.
num_redis_shards = 1 if num_redis_shards is None else num_redis_shards
# Start the scheduler, object store, and some workers. These will be
# killed by the call to cleanup(), which happens when the Python script
# exits.
address_info = services.start_ray_head(
address_info=address_info,
node_ip_address=node_ip_address,
num_workers=num_workers,
num_local_schedulers=num_local_schedulers,
redirect_output=redirect_output,
start_workers_from_local_scheduler=(start_workers_from_local_scheduler),
num_cpus=num_cpus,
num_gpus=num_gpus,
num_redis_shards=num_redis_shards,
)
else:
if redis_address is None:
raise Exception(
"When connecting to an existing cluster, "
"redis_address must be provided."
)
if num_workers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_workers must not be provided."
)
if num_local_schedulers is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_local_schedulers must not be provided."
)
if num_cpus is not None or num_gpus is not None:
raise Exception(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if num_redis_shards is not None:
raise Exception(
"When connecting to an existing cluster, "
"num_redis_shards must not be provided."
)
# Get the node IP address if one is not provided.
if node_ip_address is None:
node_ip_address = services.get_node_ip_address(redis_address)
# Get the address info of the processes to connect to from Redis.
address_info = get_address_info_from_redis(redis_address, node_ip_address)
# Connect this driver to Redis, the object store, and the local scheduler.
# Choose the first object store and local scheduler if there are multiple.
# The corresponding call to disconnect will happen in the call to cleanup()
# when the Python script exits.
if driver_mode == PYTHON_MODE:
driver_address_info = {}
else:
driver_address_info = {
"node_ip_address": node_ip_address,
"redis_address": address_info["redis_address"],
"store_socket_name": (address_info["object_store_addresses"][0].name),
"manager_socket_name": (
address_info["object_store_addresses"][0].manager_name
),
"local_scheduler_socket_name": (
address_info["local_scheduler_socket_names"][0]
),
}
connect(
driver_address_info,
object_id_seed=object_id_seed,
mode=driver_mode,
worker=global_worker,
actor_id=NIL_ACTOR_ID,
)
return address_info
|
https://github.com/ray-project/ray/issues/787
|
======================================================================
FAIL: testPutErrors (__main__.ReconstructionTestsMultinode)
----------------------------------------------------------------------
Traceback (most recent call last):
File "test/stress_tests.py", line 505, in testPutErrors
errors = self.wait_for_errors(error_check)
File "test/stress_tests.py", line 362, in wait_for_errors
self.assertTrue(error_check(errors))
AssertionError: False is not true
|
AssertionError
|
def __init__(self):
"""Create a GlobalState object."""
self.redis_client = None
self.redis_clients = None
|
def __init__(self):
"""Create a GlobalState object."""
self.redis_client = None
|
https://github.com/ray-project/ray/issues/785
|
+ python /home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py --docker-image=60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d --num-nodes=5 --num-redis-shards=10 --test-script=/ray/test/jenkins_tests/multi_node_tests/test_0.py
Starting head node with command:['docker', 'run', '-d', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--head', '--block', '--redis-port=6379', '--num-redis-shards=10', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting driver with command /ray/test/jenkins_tests/multi_node_tests/test_0.py.
STDOUT:
Driver 0 started at 1501449715.55.
STDERR:
Traceback (most recent call last):
File "/ray/test/jenkins_tests/multi_node_tests/test_0.py", line 22, in <module>
ray.init(redis_address=redis_address)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1011, in init
num_gpus=num_gpus, num_redis_shards=num_redis_shards)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 962, in _init
mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1338, in connect
global_state._initialize_global_state(redis_ip_address, int(redis_port))
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/experimental/state.py", line 96, in _initialize_global_state
len(ip_address_ports)))
Exception: Expected 10 Redis shard addresses, found 7
stop_node {'container_id': u'fea4409391126e14fca2459ce705051d644ec15163d5143708fc7820a2b7ba2a', 'is_head': True}
stop_node {'container_id': u'd9942971920d496cd39b65b4b9f95525a4479d455036ec2460fdbe8fe4b51c6e', 'is_head': False}
Traceback (most recent call last):
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 323, in <module>
d.stop_ray()
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 224, in stop_ray
self._stop_node(container_id)
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 207, in _stop_node
.format(container_id))
Exception: Failed to stop container 27327225b793cc721628b2fa4cf66b3c0a49eb5f0732273b66fa7ee505689655.
Build step 'Execute shell' marked build as failure
Test FAILed.
Refer to this link for build results (access rights to CI server needed):
https://amplab.cs.berkeley.edu/jenkins//job/Ray-PRB/1453/
Test FAILed.
Finished: FAILURE
|
Exception
|
def _check_connected(self):
"""Check that the object has been initialized before it is used.
Raises:
Exception: An exception is raised if ray.init() has not been called
yet.
"""
if self.redis_client is None:
raise Exception(
"The ray.global_state API cannot be used before ray.init has been called."
)
if self.redis_clients is None:
raise Exception(
"The ray.global_state API cannot be used before ray.init has been called."
)
|
def _check_connected(self):
"""Check that the object has been initialized before it is used.
Raises:
Exception: An exception is raised if ray.init() has not been called
yet.
"""
if self.redis_client is None:
raise Exception(
"The ray.global_state API cannot be used before ray.init has been called."
)
|
https://github.com/ray-project/ray/issues/785
|
+ python /home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py --docker-image=60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d --num-nodes=5 --num-redis-shards=10 --test-script=/ray/test/jenkins_tests/multi_node_tests/test_0.py
Starting head node with command:['docker', 'run', '-d', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--head', '--block', '--redis-port=6379', '--num-redis-shards=10', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting driver with command /ray/test/jenkins_tests/multi_node_tests/test_0.py.
STDOUT:
Driver 0 started at 1501449715.55.
STDERR:
Traceback (most recent call last):
File "/ray/test/jenkins_tests/multi_node_tests/test_0.py", line 22, in <module>
ray.init(redis_address=redis_address)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1011, in init
num_gpus=num_gpus, num_redis_shards=num_redis_shards)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 962, in _init
mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1338, in connect
global_state._initialize_global_state(redis_ip_address, int(redis_port))
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/experimental/state.py", line 96, in _initialize_global_state
len(ip_address_ports)))
Exception: Expected 10 Redis shard addresses, found 7
stop_node {'container_id': u'fea4409391126e14fca2459ce705051d644ec15163d5143708fc7820a2b7ba2a', 'is_head': True}
stop_node {'container_id': u'd9942971920d496cd39b65b4b9f95525a4479d455036ec2460fdbe8fe4b51c6e', 'is_head': False}
Traceback (most recent call last):
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 323, in <module>
d.stop_ray()
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 224, in stop_ray
self._stop_node(container_id)
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 207, in _stop_node
.format(container_id))
Exception: Failed to stop container 27327225b793cc721628b2fa4cf66b3c0a49eb5f0732273b66fa7ee505689655.
Build step 'Execute shell' marked build as failure
Test FAILed.
Refer to this link for build results (access rights to CI server needed):
https://amplab.cs.berkeley.edu/jenkins//job/Ray-PRB/1453/
Test FAILed.
Finished: FAILURE
|
Exception
|
def _initialize_global_state(self, redis_ip_address, redis_port, timeout=20):
"""Initialize the GlobalState object by connecting to Redis.
It's possible that certain keys in Redis may not have been fully
populated yet. In this case, we will retry this method until they have
been populated or we exceed a timeout.
Args:
redis_ip_address: The IP address of the node that the Redis server
lives on.
redis_port: The port that the Redis server is listening on.
timeout: The maximum amount of time (in seconds) that we should
wait for the keys in Redis to be populated.
"""
self.redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port)
start_time = time.time()
num_redis_shards = None
ip_address_ports = []
while time.time() - start_time < timeout:
# Attempt to get the number of Redis shards.
num_redis_shards = self.redis_client.get("NumRedisShards")
if num_redis_shards is None:
print("Waiting longer for NumRedisShards to be populated.")
time.sleep(1)
continue
num_redis_shards = int(num_redis_shards)
if num_redis_shards < 1:
raise Exception(
"Expected at least one Redis shard, found {}.".format(num_redis_shards)
)
# Attempt to get all of the Redis shards.
ip_address_ports = self.redis_client.lrange("RedisShards", start=0, end=-1)
if len(ip_address_ports) != num_redis_shards:
print("Waiting longer for RedisShards to be populated.")
time.sleep(1)
continue
# If we got here then we successfully got all of the information.
break
# Check to see if we timed out.
if time.time() - start_time >= timeout:
raise Exception(
"Timed out while attempting to initialize the "
"global state. num_redis_shards = {}, "
"ip_address_ports = {}".format(num_redis_shards, ip_address_ports)
)
# Get the rest of the information.
self.redis_clients = []
for ip_address_port in ip_address_ports:
shard_address, shard_port = ip_address_port.split(b":")
self.redis_clients.append(
redis.StrictRedis(host=shard_address, port=shard_port)
)
|
def _initialize_global_state(self, redis_ip_address, redis_port):
"""Initialize the GlobalState object by connecting to Redis.
Args:
redis_ip_address: The IP address of the node that the Redis server
lives on.
redis_port: The port that the Redis server is listening on.
"""
self.redis_client = redis.StrictRedis(host=redis_ip_address, port=redis_port)
self.redis_clients = []
num_redis_shards = self.redis_client.get("NumRedisShards")
if num_redis_shards is None:
raise Exception("No entry found for NumRedisShards")
num_redis_shards = int(num_redis_shards)
if num_redis_shards < 1:
raise Exception(
"Expected at least one Redis shard, found {}.".format(num_redis_shards)
)
ip_address_ports = self.redis_client.lrange("RedisShards", start=0, end=-1)
if len(ip_address_ports) != num_redis_shards:
raise Exception(
"Expected {} Redis shard addresses, found {}".format(
num_redis_shards, len(ip_address_ports)
)
)
for ip_address_port in ip_address_ports:
shard_address, shard_port = ip_address_port.split(b":")
self.redis_clients.append(
redis.StrictRedis(host=shard_address, port=shard_port)
)
|
https://github.com/ray-project/ray/issues/785
|
+ python /home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py --docker-image=60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d --num-nodes=5 --num-redis-shards=10 --test-script=/ray/test/jenkins_tests/multi_node_tests/test_0.py
Starting head node with command:['docker', 'run', '-d', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--head', '--block', '--redis-port=6379', '--num-redis-shards=10', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting worker node with command:['docker', 'run', '-d', '--shm-size=1G', '--shm-size=1G', '60125e3f54e6f8f0623b956c8cb04ddc6402819ed22ef5908c5b9ba8db38b46d', 'ray', 'start', '--block', '--redis-address=172.17.0.12:6379', '--num-cpus=10', '--num-gpus=0']
Starting driver with command /ray/test/jenkins_tests/multi_node_tests/test_0.py.
STDOUT:
Driver 0 started at 1501449715.55.
STDERR:
Traceback (most recent call last):
File "/ray/test/jenkins_tests/multi_node_tests/test_0.py", line 22, in <module>
ray.init(redis_address=redis_address)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1011, in init
num_gpus=num_gpus, num_redis_shards=num_redis_shards)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 962, in _init
mode=driver_mode, worker=global_worker, actor_id=NIL_ACTOR_ID)
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/worker.py", line 1338, in connect
global_state._initialize_global_state(redis_ip_address, int(redis_port))
File "/opt/conda/lib/python2.7/site-packages/ray-0.1.2-py2.7-linux-x86_64.egg/ray/experimental/state.py", line 96, in _initialize_global_state
len(ip_address_ports)))
Exception: Expected 10 Redis shard addresses, found 7
stop_node {'container_id': u'fea4409391126e14fca2459ce705051d644ec15163d5143708fc7820a2b7ba2a', 'is_head': True}
stop_node {'container_id': u'd9942971920d496cd39b65b4b9f95525a4479d455036ec2460fdbe8fe4b51c6e', 'is_head': False}
Traceback (most recent call last):
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 323, in <module>
d.stop_ray()
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 224, in stop_ray
self._stop_node(container_id)
File "/home/jenkins/workspace/Ray-PRB/test/jenkins_tests/multi_node_docker_test.py", line 207, in _stop_node
.format(container_id))
Exception: Failed to stop container 27327225b793cc721628b2fa4cf66b3c0a49eb5f0732273b66fa7ee505689655.
Build step 'Execute shell' marked build as failure
Test FAILed.
Refer to this link for build results (access rights to CI server needed):
https://amplab.cs.berkeley.edu/jenkins//job/Ray-PRB/1453/
Test FAILed.
Finished: FAILURE
|
Exception
|
def new_log_files(name, redirect_output):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for logging
stdout and stderr and false if stdout and stderr should not be
redirected.
Returns:
If redirect_output is true, this will return a tuple of two filehandles.
The first is for redirecting stdout and the second is for redirecting
stderr. If redirect_output is false, this will return a tuple of two None
objects.
"""
if not redirect_output:
return None, None
# Create a directory to be used for process log files.
logs_dir = "/tmp/raylogs"
try_to_create_directory(logs_dir)
# Create another directory that will be used by some of the RL algorithms.
try_to_create_directory("/tmp/ray")
log_id = random.randint(0, 1000000000)
log_stdout = "{}/{}-{:010d}.out".format(logs_dir, name, log_id)
log_stderr = "{}/{}-{:010d}.err".format(logs_dir, name, log_id)
log_stdout_file = open(log_stdout, "a")
log_stderr_file = open(log_stderr, "a")
return log_stdout_file, log_stderr_file
|
def new_log_files(name, redirect_output):
"""Generate partially randomized filenames for log files.
Args:
name (str): descriptive string for this log file.
redirect_output (bool): True if files should be generated for logging
stdout and stderr and false if stdout and stderr should not be
redirected.
Returns:
If redirect_output is true, this will return a tuple of two filehandles.
The first is for redirecting stdout and the second is for redirecting
stderr. If redirect_output is false, this will return a tuple of two None
objects.
"""
if not redirect_output:
return None, None
logs_dir = "/tmp/raylogs"
if not os.path.exists(logs_dir):
try:
os.makedirs(logs_dir)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise e
print(
"Attempted to create '/tmp/raylogs', but the directory already exists."
)
# Change the log directory permissions so others can use it. This is
# important when multiple people are using the same machine.
os.chmod(logs_dir, 0o0777)
log_id = random.randint(0, 1000000000)
log_stdout = "{}/{}-{:010d}.out".format(logs_dir, name, log_id)
log_stderr = "{}/{}-{:010d}.err".format(logs_dir, name, log_id)
log_stdout_file = open(log_stdout, "a")
log_stderr_file = open(log_stderr, "a")
return log_stdout_file, log_stderr_file
|
https://github.com/ray-project/ray/issues/714
|
Traceback (most recent call last):
File "python/ray/rllib/a3c/example.py", line 30, in <module>
a3c = A3C(args.environment, config)
File "/Users/rkn/Workspace/ray/python/ray/rllib/a3c/a3c.py", line 87, in __init__
Algorithm.__init__(self, env_name, config)
File "/Users/rkn/Workspace/ray/python/ray/rllib/common.py", line 41, in __init__
dir="/tmp/ray")
File "/Users/rkn/anaconda3/lib/python3.6/tempfile.py", line 368, in mkdtemp
_os.mkdir(file, 0o700)
FileNotFoundError: [Errno 2] No such file or directory: '/tmp/ray/PongDeterministic-v3_A3C_2017-07-07_17-08-28f49y6ar0'
|
FileNotFoundError
|
def remote(*args, **kwargs):
"""This decorator is used to create remote functions.
Args:
num_return_vals (int): The number of object IDs that a call to this
function should return.
num_cpus (int): The number of CPUs needed to execute this function. This
should only be passed in when defining the remote function on the driver.
num_gpus (int): The number of GPUs needed to execute this function. This
should only be passed in when defining the remote function on the driver.
"""
worker = global_worker
def make_remote_decorator(num_return_vals, num_cpus, num_gpus, func_id=None):
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [
(k, v) for k, v in funcsigs.signature(func).parameters.items()
]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
return remote_decorator
num_return_vals = (
kwargs["num_return_vals"] if "num_return_vals" in kwargs.keys() else 1
)
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs.keys() else 1
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs.keys() else 0
if _mode() == WORKER_MODE:
if "function_id" in kwargs:
function_id = kwargs["function_id"]
return make_remote_decorator(
num_return_vals, num_cpus, num_gpus, function_id
)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)(args[0])
else:
# This is the case where the decorator is something like
# @ray.remote(num_return_vals=2).
error_string = (
"The @ray.remote decorator must be applied either with no "
"arguments and no parentheses, for example '@ray.remote', "
"or it must be applied using some of the arguments "
"'num_return_vals', 'num_cpus', or 'num_gpus', like "
"'@ray.remote(num_return_vals=2)'."
)
assert len(args) == 0 and (
"num_return_vals" in kwargs or "num_cpus" in kwargs or "num_gpus" in kwargs
), error_string
assert "function_id" not in kwargs
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)
|
def remote(*args, **kwargs):
"""This decorator is used to create remote functions.
Args:
num_return_vals (int): The number of object IDs that a call to this
function should return.
num_cpus (int): The number of CPUs needed to execute this function. This
should only be passed in when defining the remote function on the driver.
num_gpus (int): The number of GPUs needed to execute this function. This
should only be passed in when defining the remote function on the driver.
"""
worker = global_worker
def make_remote_decorator(num_return_vals, num_cpus, num_gpus, func_id=None):
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
# Compute the function ID as a hash of the function name as well as the
# source code. We could in principle hash in the values in the closure
# of the function, but that is likely to introduce non-determinism in
# the computation of the function ID.
function_id_hash = hashlib.sha1()
function_id_hash.update(func_name.encode("ascii"))
function_id_hash.update(inspect.getsource(func).encode("ascii"))
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = FunctionID(function_id)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [
(k, v) for k, v in funcsigs.signature(func).parameters.items()
]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
return remote_decorator
num_return_vals = (
kwargs["num_return_vals"] if "num_return_vals" in kwargs.keys() else 1
)
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs.keys() else 1
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs.keys() else 0
if _mode() == WORKER_MODE:
if "function_id" in kwargs:
function_id = kwargs["function_id"]
return make_remote_decorator(
num_return_vals, num_cpus, num_gpus, function_id
)
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)(args[0])
else:
# This is the case where the decorator is something like
# @ray.remote(num_return_vals=2).
error_string = (
"The @ray.remote decorator must be applied either with no "
"arguments and no parentheses, for example '@ray.remote', "
"or it must be applied using some of the arguments "
"'num_return_vals', 'num_cpus', or 'num_gpus', like "
"'@ray.remote(num_return_vals=2)'."
)
assert len(args) == 0 and (
"num_return_vals" in kwargs or "num_cpus" in kwargs or "num_gpus" in kwargs
), error_string
assert "function_id" not in kwargs
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)
|
https://github.com/ray-project/ray/issues/349
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1886, in remote
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)(args[0])
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1798, in remote_decorator
function_id_hash.update(inspect.getsource(func).encode("ascii"))
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 938, in getsource
lines, lnum = getsourcelines(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 925, in getsourcelines
lines, lnum = findsource(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 756, in findsource
raise OSError('could not get source code')
OSError: could not get source code
|
OSError
|
def make_remote_decorator(num_return_vals, num_cpus, num_gpus, func_id=None):
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [(k, v) for k, v in funcsigs.signature(func).parameters.items()]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
return remote_decorator
|
def make_remote_decorator(num_return_vals, num_cpus, num_gpus, func_id=None):
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
# Compute the function ID as a hash of the function name as well as the
# source code. We could in principle hash in the values in the closure
# of the function, but that is likely to introduce non-determinism in
# the computation of the function ID.
function_id_hash = hashlib.sha1()
function_id_hash.update(func_name.encode("ascii"))
function_id_hash.update(inspect.getsource(func).encode("ascii"))
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = FunctionID(function_id)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [(k, v) for k, v in funcsigs.signature(func).parameters.items()]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
return remote_decorator
|
https://github.com/ray-project/ray/issues/349
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1886, in remote
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)(args[0])
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1798, in remote_decorator
function_id_hash.update(inspect.getsource(func).encode("ascii"))
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 938, in getsource
lines, lnum = getsourcelines(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 925, in getsourcelines
lines, lnum = findsource(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 756, in findsource
raise OSError('could not get source code')
OSError: could not get source code
|
OSError
|
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
function_id = compute_function_id(func_name, func)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [(k, v) for k, v in funcsigs.signature(func).parameters.items()]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
|
def remote_decorator(func):
func_name = "{}.{}".format(func.__module__, func.__name__)
if func_id is None:
# Compute the function ID as a hash of the function name as well as the
# source code. We could in principle hash in the values in the closure
# of the function, but that is likely to introduce non-determinism in
# the computation of the function ID.
function_id_hash = hashlib.sha1()
function_id_hash.update(func_name.encode("ascii"))
function_id_hash.update(inspect.getsource(func).encode("ascii"))
function_id = function_id_hash.digest()
assert len(function_id) == 20
function_id = FunctionID(function_id)
else:
function_id = func_id
def func_call(*args, **kwargs):
"""This gets run immediately when a worker calls a remote function."""
check_connected()
check_main_thread()
args = list(args)
# Fill in the remaining arguments.
args.extend(
[
kwargs[keyword] if keyword in kwargs else default
for keyword, default in keyword_defaults[len(args) :]
]
)
if any([arg is funcsigs._empty for arg in args]):
raise Exception(
"Not enough arguments were provided to {}.".format(func_name)
)
if _mode() == PYTHON_MODE:
# In PYTHON_MODE, remote calls simply execute the function. We copy
# the arguments to prevent the function call from mutating them and
# to match the usual behavior of immutable remote objects.
try:
_env()._running_remote_function_locally = True
result = func(*copy.deepcopy(args))
finally:
_env()._reinitialize()
_env()._running_remote_function_locally = False
return result
objectids = _submit_task(function_id, func_name, args)
if len(objectids) == 1:
return objectids[0]
elif len(objectids) > 1:
return objectids
def func_executor(arguments):
"""This gets run when the remote function is executed."""
result = func(*arguments)
return result
def func_invoker(*args, **kwargs):
"""This is used to invoke the function."""
raise Exception(
"Remote functions cannot be called directly. Instead "
"of running '{}()', try '{}.remote()'.".format(func_name, func_name)
)
func_invoker.remote = func_call
func_invoker.executor = func_executor
func_invoker.is_remote = True
func_name = "{}.{}".format(func.__module__, func.__name__)
func_invoker.func_name = func_name
if sys.version_info >= (3, 0):
func_invoker.__doc__ = func.__doc__
else:
func_invoker.func_doc = func.func_doc
sig_params = [(k, v) for k, v in funcsigs.signature(func).parameters.items()]
keyword_defaults = [(k, v.default) for k, v in sig_params]
has_vararg_param = any([v.kind == v.VAR_POSITIONAL for k, v in sig_params])
func_invoker.has_vararg_param = has_vararg_param
has_kwargs_param = any([v.kind == v.VAR_KEYWORD for k, v in sig_params])
check_signature_supported(
has_kwargs_param, has_vararg_param, keyword_defaults, func_name
)
# Everything ready - export the function
if worker.mode in [SCRIPT_MODE, SILENT_MODE]:
export_remote_function(
function_id, func_name, func, num_return_vals, num_cpus, num_gpus
)
elif worker.mode is None:
worker.cached_remote_functions.append(
(function_id, func_name, func, num_return_vals, num_cpus, num_gpus)
)
return func_invoker
|
https://github.com/ray-project/ray/issues/349
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1886, in remote
return make_remote_decorator(num_return_vals, num_cpus, num_gpus)(args[0])
File "/Users/rkn/Workspace/ray/python/ray/worker.py", line 1798, in remote_decorator
function_id_hash.update(inspect.getsource(func).encode("ascii"))
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 938, in getsource
lines, lnum = getsourcelines(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 925, in getsourcelines
lines, lnum = findsource(object)
File "/Users/rkn/anaconda3/lib/python3.6/inspect.py", line 756, in findsource
raise OSError('could not get source code')
OSError: could not get source code
|
OSError
|
def is_adhoc_metric(metric: Metric) -> bool:
return isinstance(metric, dict)
|
def is_adhoc_metric(metric: Metric) -> bool:
if not isinstance(metric, dict):
return False
metric = cast(Dict[str, Any], metric)
return bool(
(
(
metric.get("expressionType") == AdhocMetricExpressionType.SIMPLE
and metric.get("column")
and cast(Dict[str, Any], metric["column"]).get("column_name")
and metric.get("aggregate")
)
or (
metric.get("expressionType") == AdhocMetricExpressionType.SQL
and metric.get("sqlExpression")
)
)
and metric.get("label")
)
|
https://github.com/apache/superset/issues/10956
|
Sorry, something went wrong
500 - Internal Server Error
Stacktrace
Traceback (most recent call last):
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/flask_appbuilder/security/decorators.py", line 109, in wraps
return f(self, *args, **kwargs)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1616, in dashboard
for datasource, slices in datasources.items()
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1616, in <dictcomp>
for datasource, slices in datasources.items()
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/base/models.py", line 287, in data_for_slices
metric_names.add(utils.get_metric_name(metric))
TypeError: unhashable type: 'dict'
|
TypeError
|
def run_query( # druid
self,
metrics: List[Metric],
granularity: str,
from_dttm: datetime,
to_dttm: datetime,
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[List[Dict[str, Any]]] = None,
is_timeseries: Optional[bool] = True,
timeseries_limit: Optional[int] = None,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[Any] = None,
extras: Optional[Dict[str, Any]] = None,
phase: int = 2,
client: Optional["PyDruid"] = None,
order_desc: bool = True,
) -> str:
"""Runs a query against Druid and returns a dataframe."""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
row_limit = row_limit or conf.get("ROW_LIMIT")
if row_offset:
raise SupersetException("Offset not implemented for Druid connector")
if not is_timeseries:
granularity = "all"
if granularity == "all":
phase = 1
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
if self.cluster and LooseVersion(self.cluster.get_druid_version()) < LooseVersion(
"0.11.0"
):
for metric in metrics:
self.sanitize_metric_object(metric)
if timeseries_limit_metric:
self.sanitize_metric_object(timeseries_limit_metric)
aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
# the dimensions list with dimensionSpecs expanded
columns_ = columns if IS_SIP_38 else groupby
dimensions = self.get_dimensions(columns_, columns_dict) if columns_ else []
extras = extras or {}
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity, timezone=timezone, origin=extras.get("druid_time_origin")
),
post_aggregations=post_aggs,
intervals=self.intervals_from_dttms(from_dttm, to_dttm),
)
if is_timeseries:
qry["context"] = dict(skipEmptyBuckets=True)
filters = (
DruidDatasource.get_filters(filter, self.num_cols, columns_dict)
if filter
else None
)
if filters:
qry["filter"] = filters
if "having_druid" in extras:
having_filters = self.get_having_filters(extras["having_druid"])
if having_filters:
qry["having"] = having_filters
else:
having_filters = None
order_direction = "descending" if order_desc else "ascending"
if (IS_SIP_38 and not metrics and columns and "__time" not in columns) or (
not IS_SIP_38 and columns
):
columns.append("__time")
del qry["post_aggregations"]
del qry["aggregations"]
del qry["dimensions"]
qry["columns"] = columns
qry["metrics"] = []
qry["granularity"] = "all"
qry["limit"] = row_limit
client.scan(**qry)
elif (IS_SIP_38 and columns) or (
not IS_SIP_38 and not groupby and not having_filters
):
logger.info("Running timeseries query for no groupby values")
del qry["dimensions"]
client.timeseries(**qry)
elif (
not having_filters
and order_desc
and (
(IS_SIP_38 and columns and len(columns) == 1)
or (not IS_SIP_38 and groupby and len(groupby) == 1)
)
):
dim = list(qry["dimensions"])[0]
logger.info("Running two-phase topn query for dimension [{}]".format(dim))
pre_qry = deepcopy(qry)
order_by: Optional[str] = None
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
else:
agg_keys = qry["aggregations"].keys()
order_by = list(agg_keys)[0] if agg_keys else None
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["threshold"] = min(row_limit, timeseries_limit or row_limit)
pre_qry["metric"] = order_by
pre_qry["dimension"] = self._dimensions_to_values(qry["dimensions"])[0]
del pre_qry["dimensions"]
client.topn(**pre_qry)
logger.info("Phase 1 Complete")
if phase == 2:
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, [pre_qry["dimension"]], filters
)
qry["threshold"] = timeseries_limit or 1000
if row_limit and granularity == "all":
qry["threshold"] = row_limit
qry["dimension"] = dim
del qry["dimensions"]
qry["metric"] = list(qry["aggregations"].keys())[0]
client.topn(**qry)
logger.info("Phase 2 Complete")
elif having_filters or ((IS_SIP_38 and columns) or (not IS_SIP_38 and groupby)):
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
logger.info("Running groupby query for dimensions [{}]".format(dimensions))
if timeseries_limit and is_timeseries:
logger.info("Running two-phase query for timeseries")
pre_qry = deepcopy(qry)
pre_qry_dims = self._dimensions_to_values(qry["dimensions"])
# Can't use set on an array with dicts
# Use set with non-dict items only
non_dict_dims = list(
set([x for x in pre_qry_dims if not isinstance(x, dict)])
)
dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]
pre_qry["dimensions"] = non_dict_dims + dict_dims # type: ignore
order_by = None
if metrics:
order_by = utils.get_metric_name(metrics[0])
else:
order_by = pre_qry_dims[0] # type: ignore
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["limit_spec"] = {
"type": "default",
"limit": min(timeseries_limit, row_limit),
"intervals": self.intervals_from_dttms(inner_from_dttm, inner_to_dttm),
"columns": [{"dimension": order_by, "direction": order_direction}],
}
client.groupby(**pre_qry)
logger.info("Phase 1 Complete")
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2
)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, pre_qry["dimensions"], filters
)
qry["limit_spec"] = None
if row_limit:
dimension_values = self._dimensions_to_values(dimensions)
qry["limit_spec"] = {
"type": "default",
"limit": row_limit,
"columns": [
{
"dimension": (
utils.get_metric_name(metrics[0])
if metrics
else dimension_values[0]
),
"direction": order_direction,
}
],
}
client.groupby(**qry)
logger.info("Query Complete")
query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
return query_str
|
def run_query( # druid
self,
metrics: List[Metric],
granularity: str,
from_dttm: datetime,
to_dttm: datetime,
columns: Optional[List[str]] = None,
groupby: Optional[List[str]] = None,
filter: Optional[List[Dict[str, Any]]] = None,
is_timeseries: Optional[bool] = True,
timeseries_limit: Optional[int] = None,
timeseries_limit_metric: Optional[Metric] = None,
row_limit: Optional[int] = None,
row_offset: Optional[int] = None,
inner_from_dttm: Optional[datetime] = None,
inner_to_dttm: Optional[datetime] = None,
orderby: Optional[Any] = None,
extras: Optional[Dict[str, Any]] = None,
phase: int = 2,
client: Optional["PyDruid"] = None,
order_desc: bool = True,
) -> str:
"""Runs a query against Druid and returns a dataframe."""
# TODO refactor into using a TBD Query object
client = client or self.cluster.get_pydruid_client()
row_limit = row_limit or conf.get("ROW_LIMIT")
if row_offset:
raise SupersetException("Offset not implemented for Druid connector")
if not is_timeseries:
granularity = "all"
if granularity == "all":
phase = 1
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None
query_str = ""
metrics_dict = {m.metric_name: m for m in self.metrics}
columns_dict = {c.column_name: c for c in self.columns}
if self.cluster and LooseVersion(self.cluster.get_druid_version()) < LooseVersion(
"0.11.0"
):
for metric in metrics:
self.sanitize_metric_object(metric)
if timeseries_limit_metric:
self.sanitize_metric_object(timeseries_limit_metric)
aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs(
metrics, metrics_dict
)
# the dimensions list with dimensionSpecs expanded
columns_ = columns if IS_SIP_38 else groupby
dimensions = self.get_dimensions(columns_, columns_dict) if columns_ else []
extras = extras or {}
qry = dict(
datasource=self.datasource_name,
dimensions=dimensions,
aggregations=aggregations,
granularity=DruidDatasource.granularity(
granularity, timezone=timezone, origin=extras.get("druid_time_origin")
),
post_aggregations=post_aggs,
intervals=self.intervals_from_dttms(from_dttm, to_dttm),
)
if is_timeseries:
qry["context"] = dict(skipEmptyBuckets=True)
filters = (
DruidDatasource.get_filters(filter, self.num_cols, columns_dict)
if filter
else None
)
if filters:
qry["filter"] = filters
if "having_druid" in extras:
having_filters = self.get_having_filters(extras["having_druid"])
if having_filters:
qry["having"] = having_filters
else:
having_filters = None
order_direction = "descending" if order_desc else "ascending"
if (IS_SIP_38 and not metrics and columns and "__time" not in columns) or (
not IS_SIP_38 and columns
):
columns.append("__time")
del qry["post_aggregations"]
del qry["aggregations"]
del qry["dimensions"]
qry["columns"] = columns
qry["metrics"] = []
qry["granularity"] = "all"
qry["limit"] = row_limit
client.scan(**qry)
elif (IS_SIP_38 and columns) or (
not IS_SIP_38 and not groupby and not having_filters
):
logger.info("Running timeseries query for no groupby values")
del qry["dimensions"]
client.timeseries(**qry)
elif (
not having_filters
and order_desc
and (
(IS_SIP_38 and columns and len(columns) == 1)
or (not IS_SIP_38 and groupby and len(groupby) == 1)
)
):
dim = list(qry["dimensions"])[0]
logger.info("Running two-phase topn query for dimension [{}]".format(dim))
pre_qry = deepcopy(qry)
order_by: Optional[str] = None
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
else:
agg_keys = qry["aggregations"].keys()
order_by = list(agg_keys)[0] if agg_keys else None
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["threshold"] = min(row_limit, timeseries_limit or row_limit)
pre_qry["metric"] = order_by
pre_qry["dimension"] = self._dimensions_to_values(qry["dimensions"])[0]
del pre_qry["dimensions"]
client.topn(**pre_qry)
logger.info("Phase 1 Complete")
if phase == 2:
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, [pre_qry["dimension"]], filters
)
qry["threshold"] = timeseries_limit or 1000
if row_limit and granularity == "all":
qry["threshold"] = row_limit
qry["dimension"] = dim
del qry["dimensions"]
qry["metric"] = list(qry["aggregations"].keys())[0]
client.topn(**qry)
logger.info("Phase 2 Complete")
elif having_filters or ((IS_SIP_38 and columns) or (not IS_SIP_38 and groupby)):
# If grouping on multiple fields or using a having filter
# we have to force a groupby query
logger.info("Running groupby query for dimensions [{}]".format(dimensions))
if timeseries_limit and is_timeseries:
logger.info("Running two-phase query for timeseries")
pre_qry = deepcopy(qry)
pre_qry_dims = self._dimensions_to_values(qry["dimensions"])
# Can't use set on an array with dicts
# Use set with non-dict items only
non_dict_dims = list(
set([x for x in pre_qry_dims if not isinstance(x, dict)])
)
dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)]
pre_qry["dimensions"] = non_dict_dims + dict_dims # type: ignore
order_by = None
if metrics:
order_by = utils.get_metric_name(metrics[0])
else:
order_by = pre_qry_dims[0] # type: ignore
if timeseries_limit_metric:
order_by = utils.get_metric_name(timeseries_limit_metric)
aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs(
[timeseries_limit_metric], metrics_dict
)
if phase == 1:
pre_qry["aggregations"].update(aggs_dict)
pre_qry["post_aggregations"].update(post_aggs_dict)
else:
pre_qry["aggregations"] = aggs_dict
pre_qry["post_aggregations"] = post_aggs_dict
# Limit on the number of timeseries, doing a two-phases query
pre_qry["granularity"] = "all"
pre_qry["limit_spec"] = {
"type": "default",
"limit": min(timeseries_limit, row_limit),
"intervals": self.intervals_from_dttms(inner_from_dttm, inner_to_dttm),
"columns": [{"dimension": order_by, "direction": order_direction}],
}
client.groupby(**pre_qry)
logger.info("Phase 1 Complete")
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2
)
query_str += "\n"
if phase == 1:
return query_str
query_str += "// Phase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is None:
df = pd.DataFrame()
qry["filter"] = self._add_filter_from_pre_query_data(
df, pre_qry["dimensions"], qry["filter"]
)
qry["limit_spec"] = None
if row_limit:
dimension_values = self._dimensions_to_values(dimensions)
qry["limit_spec"] = {
"type": "default",
"limit": row_limit,
"columns": [
{
"dimension": (
utils.get_metric_name(metrics[0])
if metrics
else dimension_values[0]
),
"direction": order_direction,
}
],
}
client.groupby(**qry)
logger.info("Query Complete")
query_str += json.dumps(client.query_builder.last_query.query_dict, indent=2)
return query_str
|
https://github.com/apache/superset/issues/10928
|
Sep 17 07:57:14 superset[5963]: Traceback (most recent call last):
Sep 17 07:57:14 superset[5963]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 472, in get_df_payload
Sep 17 07:57:14 superset[5963]: df = self.get_df(query_obj)
Sep 17 07:57:14 superset[5963]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 250, in get_df
Sep 17 07:57:14 superset[5963]: self.results = self.datasource.query(query_obj)
Sep 17 07:57:14 superset[5963]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 1440, in que
Sep 17 07:57:14 superset[5963]: query_str = self.get_query_str(client=client, query_obj=query_obj, phase=2)
Sep 17 07:57:14 superset[5963]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 1017, in get
Sep 17 07:57:14 superset[5963]: return self.run_query(client=client, phase=phase, **query_obj)
Sep 17 07:57:14 superset[5963]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 1399, in run
Sep 17 07:57:14 superset[5963]: df, pre_qry["dimensions"], qry["filter"]
Sep 17 07:57:14 superset[5963]: KeyError: 'filter'
|
KeyError
|
def cache_key(self, query_obj: QueryObject, **kwargs: Any) -> Optional[str]:
extra_cache_keys = self.datasource.get_extra_cache_keys(query_obj.to_dict())
cache_key = (
query_obj.cache_key(
datasource=self.datasource.uid,
extra_cache_keys=extra_cache_keys,
rls=security_manager.get_rls_ids(self.datasource)
if config["ENABLE_ROW_LEVEL_SECURITY"]
else [],
changed_on=self.datasource.changed_on,
**kwargs,
)
if query_obj
else None
)
return cache_key
|
def cache_key(self, query_obj: QueryObject, **kwargs: Any) -> Optional[str]:
extra_cache_keys = self.datasource.get_extra_cache_keys(query_obj.to_dict())
cache_key = (
query_obj.cache_key(
datasource=self.datasource.uid,
extra_cache_keys=extra_cache_keys,
rls=security_manager.get_rls_ids(self.datasource),
changed_on=self.datasource.changed_on,
**kwargs,
)
if query_obj
else None
)
return cache_key
|
https://github.com/apache/superset/issues/9545
|
| INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:29] "GET /superset/annotation_json/3?form_data=%7B%22time_range%22%3A%22No+filter%22%7D HTTP/1.1" 500 -
superset_1 | 172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | 127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | DEBUG:superset.stats_logger:[stats_logger] (incr) annotation_json
superset_1 | DEBUG:parsedatetime:eval - with context - False, False
superset_1 | ERROR:superset.views.base:Cannot compile Column object until its 'name' is assigned.
superset_1 | Traceback (most recent call last):
superset_1 | File "/app/superset/views/base.py", line 120, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/flask_appbuilder/security/decorators.py", line 151, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/app/superset/views/core.py", line 674, in annotation_json
superset_1 | payload = viz_obj.get_payload()
superset_1 | File "/app/superset/viz.py", line 404, in get_payload
superset_1 | payload = self.get_df_payload(query_obj)
superset_1 | File "/app/superset/viz.py", line 417, in get_df_payload
superset_1 | cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
superset_1 | File "/app/superset/viz.py", line 396, in cache_key
superset_1 | cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
superset_1 | File "/app/superset/security/manager.py", line 943, in get_rls_ids
superset_1 | ids = [f.id for f in self.get_rls_filters(table)]
superset_1 | File "/app/superset/security/manager.py", line 933, in get_rls_filters
superset_1 | return query.all()
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3246, in all
superset_1 | return list(self)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
superset_1 | return self._execute_and_instances(context)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3430, in _execute_and_instances
superset_1 | result = conn.execute(querycontext.statement, self._params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 984, in execute
superset_1 | return meth(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
superset_1 | return connection._execute_clauseelement(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1094, in _execute_clauseelement
superset_1 | else None,
superset_1 | File "<string>", line 1, in <lambda>
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 468, in compile
superset_1 | return self._compiler(dialect, bind=bind, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 474, in _compiler
superset_1 | return dialect.statement_compiler(dialect, self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 590, in __init__
superset_1 | Compiled.__init__(self, dialect, statement, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
superset_1 | self.string = self.process(self.statement, **compile_kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
superset_1 | return obj._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2162, in visit_select
superset_1 | text, select, inner_columns, froms, byfrom, kwargs
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2261, in _compose_select_body
superset_1 | t = select._whereclause._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in visit_clauselist
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1001, in <genexpr>
superset_1 | s
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in <genexpr>
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1324, in visit_binary
superset_1 | return self._generate_generic_binary(binary, opstring, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1372, in _generate_generic_binary
superset_1 | self, eager_grouping=eager_grouping, **kw
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 869, in visit_column
superset_1 | name = self._fallback_column_name(column)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 861, in _fallback_column_name
superset_1 | "Cannot compile Column object until " "its 'name' is assigned."
superset_1 | sqlalchemy.exc.CompileError: Cannot compile Column object until its 'name' is assigned.
|
sqlalchemy.exc.CompileError
|
def get_sqla_query( # sqla
self,
metrics,
granularity,
from_dttm,
to_dttm,
columns=None,
groupby=None,
filter=None,
is_timeseries=True,
timeseries_limit=15,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
orderby=None,
extras=None,
order_desc=True,
) -> SqlaQuery:
"""Querying any sqla table from this common interface"""
template_kwargs = {
"from_dttm": from_dttm,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"to_dttm": to_dttm,
"filter": filter,
"columns": {col.column_name: col for col in self.columns},
}
is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE")
template_kwargs.update(self.template_params_dict)
extra_cache_keys: List[Any] = []
template_kwargs["extra_cache_keys"] = extra_cache_keys
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.database.db_engine_spec
prequeries: List[str] = []
orderby = orderby or []
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
cols: Dict[str, Column] = {col.column_name: col for col in self.columns}
metrics_dict: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise Exception(
_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"
)
)
if not metrics and not columns and (is_sip_38 or (not is_sip_38 and not groupby)):
raise Exception(_("Empty query?"))
metrics_exprs: List[ColumnElement] = []
for m in metrics:
if utils.is_adhoc_metric(m):
metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
elif m in metrics_dict:
metrics_exprs.append(metrics_dict[m].get_sqla_col())
else:
raise Exception(_("Metric '%(metric)s' does not exist", metric=m))
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
select_exprs: List[Column] = []
groupby_exprs_sans_timestamp: OrderedDict = OrderedDict()
if (is_sip_38 and metrics and columns) or (not is_sip_38 and groupby):
# dedup columns while preserving order
groupby = list(dict.fromkeys(columns if is_sip_38 else groupby))
select_exprs = []
for s in groupby:
if s in cols:
outer = cols[s].get_sqla_col()
else:
outer = literal_column(f"({s})")
outer = self.make_sqla_column_compatible(outer, s)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for s in columns:
select_exprs.append(
cols[s].get_sqla_col()
if s in cols
else self.make_sqla_column_compatible(literal_column(s))
)
metrics_exprs = []
time_range_endpoints = extras.get("time_range_endpoints")
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
dttm_col = cols[granularity]
time_grain = extras.get("time_grain_sqla")
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
select_exprs += [timestamp]
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
time_filters.append(
cols[self.main_dttm_col].get_time_filter(
from_dttm, to_dttm, time_range_endpoints
)
)
time_filters.append(
dttm_col.get_time_filter(from_dttm, to_dttm, time_range_endpoints)
)
select_exprs += metrics_exprs
labels_expected = [c._df_label_expected for c in select_exprs]
select_exprs = db_engine_spec.make_select_compatible(
groupby_exprs_with_timestamp.values(), select_exprs
)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if (is_sip_38 and metrics) or (not is_sip_38 and not columns):
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and: List = []
for flt in filter:
if not all([flt.get(s) for s in ["col", "op"]]):
continue
col = flt["col"]
op = flt["op"].upper()
col_obj = cols.get(col)
if col_obj:
is_list_target = op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
)
eq = self.filter_values_handler(
values=flt.get("val"),
target_column_is_numeric=col_obj.is_numeric,
is_list_target=is_list_target,
)
if op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
):
cond = col_obj.get_sqla_col().in_(eq)
if isinstance(eq, str) and NULL_STRING in eq:
cond = or_(cond, col_obj.get_sqla_col() is None)
if op == utils.FilterOperator.NOT_IN.value:
cond = ~cond
where_clause_and.append(cond)
else:
if col_obj.is_numeric:
eq = utils.cast_to_num(flt["val"])
if op == utils.FilterOperator.EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == utils.FilterOperator.NOT_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == utils.FilterOperator.GREATER_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == utils.FilterOperator.LESS_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == utils.FilterOperator.LIKE.value:
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == utils.FilterOperator.IS_NULL.value:
where_clause_and.append(col_obj.get_sqla_col() == None)
elif op == utils.FilterOperator.IS_NOT_NULL.value:
where_clause_and.append(col_obj.get_sqla_col() != None)
else:
raise Exception(_("Invalid filter operation type: %(op)s", op=op))
if config["ENABLE_ROW_LEVEL_SECURITY"]:
where_clause_and += self._get_sqla_row_level_filters(template_processor)
if extras:
where = extras.get("where")
if where:
where = template_processor.process_template(where)
where_clause_and += [sa.text("({})".format(where))]
having = extras.get("having")
if having:
having = template_processor.process_template(having)
having_clause_and += [sa.text("({})".format(having))]
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if not orderby and ((is_sip_38 and metrics) or (not is_sip_38 and not columns)):
orderby = [(main_metric_expr, not order_desc)]
# To ensure correct handling of the ORDER BY labeling we need to reference the
# metric instance if defined in the SELECT clause.
metrics_exprs_by_label = {m._label: m for m in metrics_exprs}
for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sqla(col, cols)
elif col in cols:
col = cols[col].get_sqla_col()
if isinstance(col, Label) and col._label in metrics_exprs_by_label:
col = metrics_exprs_by_label[col._label]
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if (
is_timeseries
and timeseries_limit
and not time_groupby_inline
and ((is_sip_38 and columns) or (not is_sip_38 and groupby))
):
if self.database.db_engine_spec.allows_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, "mme_inner__"
)
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
time_range_endpoints,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric, metrics_dict, cols
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + "__")
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [
(
self._get_timeseries_orderby(
timeseries_limit_metric, metrics_dict, cols
),
False,
)
]
# run prequery to get top groups
prequery_obj = {
"is_timeseries": False,
"row_limit": timeseries_limit,
"metrics": metrics,
"granularity": granularity,
"from_dttm": inner_from_dttm or from_dttm,
"to_dttm": inner_to_dttm or to_dttm,
"filter": filter,
"orderby": orderby,
"extras": extras,
"columns": columns,
"order_desc": True,
}
if not is_sip_38:
prequery_obj["groupby"] = groupby
result = self.query(prequery_obj)
prequeries.append(result.query)
dimensions = [
c
for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(
result.df, dimensions, groupby_exprs_sans_timestamp
)
qry = qry.where(top_groups)
return SqlaQuery(
extra_cache_keys=extra_cache_keys,
labels_expected=labels_expected,
sqla_query=qry.select_from(tbl),
prequeries=prequeries,
)
|
def get_sqla_query( # sqla
self,
metrics,
granularity,
from_dttm,
to_dttm,
columns=None,
groupby=None,
filter=None,
is_timeseries=True,
timeseries_limit=15,
timeseries_limit_metric=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
orderby=None,
extras=None,
order_desc=True,
) -> SqlaQuery:
"""Querying any sqla table from this common interface"""
template_kwargs = {
"from_dttm": from_dttm,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"to_dttm": to_dttm,
"filter": filter,
"columns": {col.column_name: col for col in self.columns},
}
is_sip_38 = is_feature_enabled("SIP_38_VIZ_REARCHITECTURE")
template_kwargs.update(self.template_params_dict)
extra_cache_keys: List[Any] = []
template_kwargs["extra_cache_keys"] = extra_cache_keys
template_processor = self.get_template_processor(**template_kwargs)
db_engine_spec = self.database.db_engine_spec
prequeries: List[str] = []
orderby = orderby or []
# For backward compatibility
if granularity not in self.dttm_cols:
granularity = self.main_dttm_col
# Database spec supports join-free timeslot grouping
time_groupby_inline = db_engine_spec.time_groupby_inline
cols: Dict[str, Column] = {col.column_name: col for col in self.columns}
metrics_dict: Dict[str, SqlMetric] = {m.metric_name: m for m in self.metrics}
if not granularity and is_timeseries:
raise Exception(
_(
"Datetime column not provided as part table configuration "
"and is required by this type of chart"
)
)
if not metrics and not columns and (is_sip_38 or (not is_sip_38 and not groupby)):
raise Exception(_("Empty query?"))
metrics_exprs: List[ColumnElement] = []
for m in metrics:
if utils.is_adhoc_metric(m):
metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols))
elif m in metrics_dict:
metrics_exprs.append(metrics_dict[m].get_sqla_col())
else:
raise Exception(_("Metric '%(metric)s' does not exist", metric=m))
if metrics_exprs:
main_metric_expr = metrics_exprs[0]
else:
main_metric_expr, label = literal_column("COUNT(*)"), "ccount"
main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label)
select_exprs: List[Column] = []
groupby_exprs_sans_timestamp: OrderedDict = OrderedDict()
if (is_sip_38 and metrics and columns) or (not is_sip_38 and groupby):
# dedup columns while preserving order
groupby = list(dict.fromkeys(columns if is_sip_38 else groupby))
select_exprs = []
for s in groupby:
if s in cols:
outer = cols[s].get_sqla_col()
else:
outer = literal_column(f"({s})")
outer = self.make_sqla_column_compatible(outer, s)
groupby_exprs_sans_timestamp[outer.name] = outer
select_exprs.append(outer)
elif columns:
for s in columns:
select_exprs.append(
cols[s].get_sqla_col()
if s in cols
else self.make_sqla_column_compatible(literal_column(s))
)
metrics_exprs = []
time_range_endpoints = extras.get("time_range_endpoints")
groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items())
if granularity:
dttm_col = cols[granularity]
time_grain = extras.get("time_grain_sqla")
time_filters = []
if is_timeseries:
timestamp = dttm_col.get_timestamp_expression(time_grain)
select_exprs += [timestamp]
groupby_exprs_with_timestamp[timestamp.name] = timestamp
# Use main dttm column to support index with secondary dttm columns.
if (
db_engine_spec.time_secondary_columns
and self.main_dttm_col in self.dttm_cols
and self.main_dttm_col != dttm_col.column_name
):
time_filters.append(
cols[self.main_dttm_col].get_time_filter(
from_dttm, to_dttm, time_range_endpoints
)
)
time_filters.append(
dttm_col.get_time_filter(from_dttm, to_dttm, time_range_endpoints)
)
select_exprs += metrics_exprs
labels_expected = [c._df_label_expected for c in select_exprs]
select_exprs = db_engine_spec.make_select_compatible(
groupby_exprs_with_timestamp.values(), select_exprs
)
qry = sa.select(select_exprs)
tbl = self.get_from_clause(template_processor)
if (is_sip_38 and metrics) or (not is_sip_38 and not columns):
qry = qry.group_by(*groupby_exprs_with_timestamp.values())
where_clause_and = []
having_clause_and: List = []
for flt in filter:
if not all([flt.get(s) for s in ["col", "op"]]):
continue
col = flt["col"]
op = flt["op"].upper()
col_obj = cols.get(col)
if col_obj:
is_list_target = op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
)
eq = self.filter_values_handler(
values=flt.get("val"),
target_column_is_numeric=col_obj.is_numeric,
is_list_target=is_list_target,
)
if op in (
utils.FilterOperator.IN.value,
utils.FilterOperator.NOT_IN.value,
):
cond = col_obj.get_sqla_col().in_(eq)
if isinstance(eq, str) and NULL_STRING in eq:
cond = or_(cond, col_obj.get_sqla_col() is None)
if op == utils.FilterOperator.NOT_IN.value:
cond = ~cond
where_clause_and.append(cond)
else:
if col_obj.is_numeric:
eq = utils.cast_to_num(flt["val"])
if op == utils.FilterOperator.EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() == eq)
elif op == utils.FilterOperator.NOT_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() != eq)
elif op == utils.FilterOperator.GREATER_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() > eq)
elif op == utils.FilterOperator.LESS_THAN.value:
where_clause_and.append(col_obj.get_sqla_col() < eq)
elif op == utils.FilterOperator.GREATER_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() >= eq)
elif op == utils.FilterOperator.LESS_THAN_OR_EQUALS.value:
where_clause_and.append(col_obj.get_sqla_col() <= eq)
elif op == utils.FilterOperator.LIKE.value:
where_clause_and.append(col_obj.get_sqla_col().like(eq))
elif op == utils.FilterOperator.IS_NULL.value:
where_clause_and.append(col_obj.get_sqla_col() == None)
elif op == utils.FilterOperator.IS_NOT_NULL.value:
where_clause_and.append(col_obj.get_sqla_col() != None)
else:
raise Exception(_("Invalid filter operation type: %(op)s", op=op))
where_clause_and += self._get_sqla_row_level_filters(template_processor)
if extras:
where = extras.get("where")
if where:
where = template_processor.process_template(where)
where_clause_and += [sa.text("({})".format(where))]
having = extras.get("having")
if having:
having = template_processor.process_template(having)
having_clause_and += [sa.text("({})".format(having))]
if granularity:
qry = qry.where(and_(*(time_filters + where_clause_and)))
else:
qry = qry.where(and_(*where_clause_and))
qry = qry.having(and_(*having_clause_and))
if not orderby and ((is_sip_38 and metrics) or (not is_sip_38 and not columns)):
orderby = [(main_metric_expr, not order_desc)]
# To ensure correct handling of the ORDER BY labeling we need to reference the
# metric instance if defined in the SELECT clause.
metrics_exprs_by_label = {m._label: m for m in metrics_exprs}
for col, ascending in orderby:
direction = asc if ascending else desc
if utils.is_adhoc_metric(col):
col = self.adhoc_metric_to_sqla(col, cols)
elif col in cols:
col = cols[col].get_sqla_col()
if isinstance(col, Label) and col._label in metrics_exprs_by_label:
col = metrics_exprs_by_label[col._label]
qry = qry.order_by(direction(col))
if row_limit:
qry = qry.limit(row_limit)
if (
is_timeseries
and timeseries_limit
and not time_groupby_inline
and ((is_sip_38 and columns) or (not is_sip_38 and groupby))
):
if self.database.db_engine_spec.allows_joins:
# some sql dialects require for order by expressions
# to also be in the select clause -- others, e.g. vertica,
# require a unique inner alias
inner_main_metric_expr = self.make_sqla_column_compatible(
main_metric_expr, "mme_inner__"
)
inner_groupby_exprs = []
inner_select_exprs = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
inner = self.make_sqla_column_compatible(gby_obj, gby_name + "__")
inner_groupby_exprs.append(inner)
inner_select_exprs.append(inner)
inner_select_exprs += [inner_main_metric_expr]
subq = select(inner_select_exprs).select_from(tbl)
inner_time_filter = dttm_col.get_time_filter(
inner_from_dttm or from_dttm,
inner_to_dttm or to_dttm,
time_range_endpoints,
)
subq = subq.where(and_(*(where_clause_and + [inner_time_filter])))
subq = subq.group_by(*inner_groupby_exprs)
ob = inner_main_metric_expr
if timeseries_limit_metric:
ob = self._get_timeseries_orderby(
timeseries_limit_metric, metrics_dict, cols
)
direction = desc if order_desc else asc
subq = subq.order_by(direction(ob))
subq = subq.limit(timeseries_limit)
on_clause = []
for gby_name, gby_obj in groupby_exprs_sans_timestamp.items():
# in this case the column name, not the alias, needs to be
# conditionally mutated, as it refers to the column alias in
# the inner query
col_name = db_engine_spec.make_label_compatible(gby_name + "__")
on_clause.append(gby_obj == column(col_name))
tbl = tbl.join(subq.alias(), and_(*on_clause))
else:
if timeseries_limit_metric:
orderby = [
(
self._get_timeseries_orderby(
timeseries_limit_metric, metrics_dict, cols
),
False,
)
]
# run prequery to get top groups
prequery_obj = {
"is_timeseries": False,
"row_limit": timeseries_limit,
"metrics": metrics,
"granularity": granularity,
"from_dttm": inner_from_dttm or from_dttm,
"to_dttm": inner_to_dttm or to_dttm,
"filter": filter,
"orderby": orderby,
"extras": extras,
"columns": columns,
"order_desc": True,
}
if not is_sip_38:
prequery_obj["groupby"] = groupby
result = self.query(prequery_obj)
prequeries.append(result.query)
dimensions = [
c
for c in result.df.columns
if c not in metrics and c in groupby_exprs_sans_timestamp
]
top_groups = self._get_top_groups(
result.df, dimensions, groupby_exprs_sans_timestamp
)
qry = qry.where(top_groups)
return SqlaQuery(
extra_cache_keys=extra_cache_keys,
labels_expected=labels_expected,
sqla_query=qry.select_from(tbl),
prequeries=prequeries,
)
|
https://github.com/apache/superset/issues/9545
|
| INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:29] "GET /superset/annotation_json/3?form_data=%7B%22time_range%22%3A%22No+filter%22%7D HTTP/1.1" 500 -
superset_1 | 172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | 127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | DEBUG:superset.stats_logger:[stats_logger] (incr) annotation_json
superset_1 | DEBUG:parsedatetime:eval - with context - False, False
superset_1 | ERROR:superset.views.base:Cannot compile Column object until its 'name' is assigned.
superset_1 | Traceback (most recent call last):
superset_1 | File "/app/superset/views/base.py", line 120, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/flask_appbuilder/security/decorators.py", line 151, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/app/superset/views/core.py", line 674, in annotation_json
superset_1 | payload = viz_obj.get_payload()
superset_1 | File "/app/superset/viz.py", line 404, in get_payload
superset_1 | payload = self.get_df_payload(query_obj)
superset_1 | File "/app/superset/viz.py", line 417, in get_df_payload
superset_1 | cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
superset_1 | File "/app/superset/viz.py", line 396, in cache_key
superset_1 | cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
superset_1 | File "/app/superset/security/manager.py", line 943, in get_rls_ids
superset_1 | ids = [f.id for f in self.get_rls_filters(table)]
superset_1 | File "/app/superset/security/manager.py", line 933, in get_rls_filters
superset_1 | return query.all()
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3246, in all
superset_1 | return list(self)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
superset_1 | return self._execute_and_instances(context)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3430, in _execute_and_instances
superset_1 | result = conn.execute(querycontext.statement, self._params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 984, in execute
superset_1 | return meth(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
superset_1 | return connection._execute_clauseelement(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1094, in _execute_clauseelement
superset_1 | else None,
superset_1 | File "<string>", line 1, in <lambda>
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 468, in compile
superset_1 | return self._compiler(dialect, bind=bind, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 474, in _compiler
superset_1 | return dialect.statement_compiler(dialect, self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 590, in __init__
superset_1 | Compiled.__init__(self, dialect, statement, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
superset_1 | self.string = self.process(self.statement, **compile_kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
superset_1 | return obj._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2162, in visit_select
superset_1 | text, select, inner_columns, froms, byfrom, kwargs
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2261, in _compose_select_body
superset_1 | t = select._whereclause._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in visit_clauselist
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1001, in <genexpr>
superset_1 | s
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in <genexpr>
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1324, in visit_binary
superset_1 | return self._generate_generic_binary(binary, opstring, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1372, in _generate_generic_binary
superset_1 | self, eager_grouping=eager_grouping, **kw
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 869, in visit_column
superset_1 | name = self._fallback_column_name(column)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 861, in _fallback_column_name
superset_1 | "Cannot compile Column object until " "its 'name' is assigned."
superset_1 | sqlalchemy.exc.CompileError: Cannot compile Column object until its 'name' is assigned.
|
sqlalchemy.exc.CompileError
|
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
if config["ENABLE_ROW_LEVEL_SECURITY"]:
cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode("utf-8")).hexdigest()
|
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode("utf-8")).hexdigest()
|
https://github.com/apache/superset/issues/9545
|
| INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:29] "GET /superset/annotation_json/3?form_data=%7B%22time_range%22%3A%22No+filter%22%7D HTTP/1.1" 500 -
superset_1 | 172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:172.18.0.1 - - [15/Apr/2020 14:37:48] "GET /annotationlayermodelview/api/read HTTP/1.1" 200 -
superset_1 | 127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | INFO:werkzeug:127.0.0.1 - - [15/Apr/2020 14:37:50] "GET /health HTTP/1.1" 200 -
superset_1 | DEBUG:superset.stats_logger:[stats_logger] (incr) annotation_json
superset_1 | DEBUG:parsedatetime:eval - with context - False, False
superset_1 | ERROR:superset.views.base:Cannot compile Column object until its 'name' is assigned.
superset_1 | Traceback (most recent call last):
superset_1 | File "/app/superset/views/base.py", line 120, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/flask_appbuilder/security/decorators.py", line 151, in wraps
superset_1 | return f(self, *args, **kwargs)
superset_1 | File "/app/superset/views/core.py", line 674, in annotation_json
superset_1 | payload = viz_obj.get_payload()
superset_1 | File "/app/superset/viz.py", line 404, in get_payload
superset_1 | payload = self.get_df_payload(query_obj)
superset_1 | File "/app/superset/viz.py", line 417, in get_df_payload
superset_1 | cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
superset_1 | File "/app/superset/viz.py", line 396, in cache_key
superset_1 | cache_dict["rls"] = security_manager.get_rls_ids(self.datasource)
superset_1 | File "/app/superset/security/manager.py", line 943, in get_rls_ids
superset_1 | ids = [f.id for f in self.get_rls_filters(table)]
superset_1 | File "/app/superset/security/manager.py", line 933, in get_rls_filters
superset_1 | return query.all()
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3246, in all
superset_1 | return list(self)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3405, in __iter__
superset_1 | return self._execute_and_instances(context)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/orm/query.py", line 3430, in _execute_and_instances
superset_1 | result = conn.execute(querycontext.statement, self._params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 984, in execute
superset_1 | return meth(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 293, in _execute_on_connection
superset_1 | return connection._execute_clauseelement(self, multiparams, params)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/engine/base.py", line 1094, in _execute_clauseelement
superset_1 | else None,
superset_1 | File "<string>", line 1, in <lambda>
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 468, in compile
superset_1 | return self._compiler(dialect, bind=bind, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/elements.py", line 474, in _compiler
superset_1 | return dialect.statement_compiler(dialect, self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 590, in __init__
superset_1 | Compiled.__init__(self, dialect, statement, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 319, in __init__
superset_1 | self.string = self.process(self.statement, **compile_kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 350, in process
superset_1 | return obj._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2162, in visit_select
superset_1 | text, select, inner_columns, froms, byfrom, kwargs
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 2261, in _compose_select_body
superset_1 | t = select._whereclause._compiler_dispatch(self, **kwargs)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in visit_clauselist
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1001, in <genexpr>
superset_1 | s
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1003, in <genexpr>
superset_1 | c._compiler_dispatch(self, **kw) for c in clauselist.clauses
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1324, in visit_binary
superset_1 | return self._generate_generic_binary(binary, opstring, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 1372, in _generate_generic_binary
superset_1 | self, eager_grouping=eager_grouping, **kw
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/visitors.py", line 95, in _compiler_dispatch
superset_1 | return meth(self, **kw)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 869, in visit_column
superset_1 | name = self._fallback_column_name(column)
superset_1 | File "/usr/local/lib/python3.6/site-packages/sqlalchemy/sql/compiler.py", line 861, in _fallback_column_name
superset_1 | "Cannot compile Column object until " "its 'name' is assigned."
superset_1 | sqlalchemy.exc.CompileError: Cannot compile Column object until its 'name' is assigned.
|
sqlalchemy.exc.CompileError
|
def _get_slice_data(schedule):
slc = schedule.slice
slice_url = _get_url_path(
"Superset.explore_json", csv="true", form_data=json.dumps({"slice_id": slc.id})
)
# URL to include in the email
url = _get_url_path("Superset.slice", slice_id=slc.id)
cookies = {}
for cookie in _get_auth_cookies():
cookies["session"] = cookie
opener = urllib.request.build_opener()
opener.addheaders.append(("Cookie", f"session={cookies['session']}"))
response = opener.open(slice_url)
if response.getcode() != 200:
raise URLError(response.getcode())
# TODO: Move to the csv module
content = response.read()
rows = [r.split(b",") for r in content.splitlines()]
if schedule.delivery_type == EmailDeliveryType.inline:
data = None
# Parse the csv file and generate HTML
columns = rows.pop(0)
with app.app_context():
body = render_template(
"superset/reports/slice_data.html",
columns=columns,
rows=rows,
name=slc.slice_name,
link=url,
)
elif schedule.delivery_type == EmailDeliveryType.attachment:
data = {__("%(name)s.csv", name=slc.slice_name): content}
body = __(
'<b><a href="%(url)s">Explore in Superset</a></b><p></p>',
name=slc.slice_name,
url=url,
)
return EmailContent(body, data, None)
|
def _get_slice_data(schedule):
slc = schedule.slice
slice_url = _get_url_path(
"Superset.explore_json", csv="true", form_data=json.dumps({"slice_id": slc.id})
)
# URL to include in the email
url = _get_url_path("Superset.slice", slice_id=slc.id)
cookies = {}
for cookie in _get_auth_cookies():
cookies["session"] = cookie
opener = urllib.request.build_opener()
opener.addheaders.append(("Cookie", f"session={cookies['session']}"))
response = opener.open(slice_url)
if response.getcode() != 200:
raise URLError(response.getcode())
# TODO: Move to the csv module
rows = [r.split(b",") for r in response.content.splitlines()]
if schedule.delivery_type == EmailDeliveryType.inline:
data = None
# Parse the csv file and generate HTML
columns = rows.pop(0)
with app.app_context():
body = render_template(
"superset/reports/slice_data.html",
columns=columns,
rows=rows,
name=slc.slice_name,
link=url,
)
elif schedule.delivery_type == EmailDeliveryType.attachment:
data = {__("%(name)s.csv", name=slc.slice_name): response.content}
body = __(
'<b><a href="%(url)s">Explore in Superset</a></b><p></p>',
name=slc.slice_name,
url=url,
)
return EmailContent(body, data, None)
|
https://github.com/apache/superset/issues/8784
|
Traceback (most recent call last):
File "/usr/local/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/home/superset/superset/tasks/schedules.py", line 367, in schedule_email_report
deliver_slice(schedule)
File "/home/superset/superset/tasks/schedules.py", line 334, in deliver_slice
email = _get_slice_data(schedule)
File "/home/superset/superset/tasks/schedules.py", line 267, in _get_slice_data
rows = [r.split(b",") for r in response.content.splitlines()]
AttributeError: 'HTTPResponse' object has no attribute 'content'
|
AttributeError
|
def get_filters(cls, raw_filters, num_cols, columns_dict) -> "Filter":
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col = flt.get("col")
op = flt.get("op")
eq = flt.get("val")
if not col or not op or (eq is None and op not in ("IS NULL", "IS NOT NULL")):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and "extractionFn" in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in ("in", "not in")
eq = cls.filter_values_handler(
eq,
is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col,
)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == "==":
cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op == "!=":
cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op in ("in", "not in"):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type="in",
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
if op == "not in":
cond = ~cond
elif op == "regex":
cond = Filter(
extraction_function=extraction_fn,
type="regex",
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == ">=":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == "<=":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == ">":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == "<":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == "IS NULL":
cond = Filter(dimension=col, value="")
elif op == "IS NOT NULL":
cond = ~Filter(dimension=col, value="")
if filters:
filters = Filter(type="and", fields=[cond, filters])
else:
filters = cond
return filters
|
def get_filters(cls, raw_filters, num_cols, columns_dict) -> "Filter":
"""Given Superset filter data structure, returns pydruid Filter(s)"""
filters = None
for flt in raw_filters:
col = flt.get("col")
op = flt.get("op")
eq = flt.get("val")
if not col or not op or (eq is None and op not in ("IS NULL", "IS NOT NULL")):
continue
# Check if this dimension uses an extraction function
# If so, create the appropriate pydruid extraction object
column_def = columns_dict.get(col)
dim_spec = column_def.dimension_spec if column_def else None
extraction_fn = None
if dim_spec and "extractionFn" in dim_spec:
(col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec)
cond = None
is_numeric_col = col in num_cols
is_list_target = op in ("in", "not in")
eq = cls.filter_values_handler(
eq,
is_list_target=is_list_target,
target_column_is_numeric=is_numeric_col,
)
# For these two ops, could have used Dimension,
# but it doesn't support extraction functions
if op == "==":
cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op == "!=":
cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn)
elif op in ("in", "not in"):
fields = []
# ignore the filter if it has no value
if not len(eq):
continue
# if it uses an extraction fn, use the "in" operator
# as Dimension isn't supported
elif extraction_fn is not None:
cond = Filter(
dimension=col,
values=eq,
type="in",
extraction_function=extraction_fn,
)
elif len(eq) == 1:
cond = Dimension(col) == eq[0]
else:
for s in eq:
fields.append(Dimension(col) == s)
cond = Filter(type="or", fields=fields)
if op == "not in":
cond = ~cond
elif op == "regex":
cond = Filter(
extraction_function=extraction_fn,
type="regex",
pattern=eq,
dimension=col,
)
# For the ops below, could have used pydruid's Bound,
# but it doesn't support extraction functions
elif op == ">=":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == "<=":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
dimension=col,
lowerStrict=False,
upperStrict=False,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == ">":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
lowerStrict=True,
upperStrict=False,
dimension=col,
lower=eq,
upper=None,
alphaNumeric=is_numeric_col,
)
elif op == "<":
cond = Filter(
type="bound",
extraction_function=extraction_fn,
upperStrict=True,
lowerStrict=False,
dimension=col,
lower=None,
upper=eq,
alphaNumeric=is_numeric_col,
)
elif op == "IS NULL":
cond = Dimension(col) is None
elif op == "IS NOT NULL":
cond = Dimension(col) is not None
if filters:
filters = Filter(type="and", fields=[cond, filters])
else:
filters = cond
return filters
|
https://github.com/apache/superset/issues/8676
|
Traceback (most recent call last):
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 418, in get_df_payload
df = self.get_df(query_obj)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 203, in get_df
self.results = self.datasource.query(query_obj)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 1377, in query
query_str = self.get_query_str(client=client, query_obj=query_obj, phase=2)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 984, in get_query_str
return self.run_query(client=client, phase=phase, **query_obj)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/connectors/druid/models.py", line 1355, in run_query
client.groupby(**qry)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pydruid/client.py", line 307, in groupby
query = self.query_builder.groupby(kwargs)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pydruid/query.py", line 367, in groupby
return self.build_query(query_type, args)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pydruid/query.py", line 295, in build_query
query_dict[key] = Filter.build_filter(val)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pydruid/utils/filters.py", line 119, in build_filter
filter = filter_obj.filter['filter']
AttributeError: 'bool' object has no attribute 'filter'
|
AttributeError
|
def get_url(chart):
"""Return external URL for warming up a given chart/table cache."""
with app.test_request_context():
baseurl = (
"{SUPERSET_WEBSERVER_PROTOCOL}://"
"{SUPERSET_WEBSERVER_ADDRESS}:"
"{SUPERSET_WEBSERVER_PORT}".format(**app.config)
)
return f"{baseurl}{chart.url}"
|
def get_url(chart):
"""Return external URL for warming up a given chart/table cache."""
with app.test_request_context():
baseurl = "{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}".format(
**app.config
)
return f"{baseurl}{chart.url}"
|
https://github.com/apache/superset/issues/8461
|
[2019-10-28 15:00:00,015: INFO/ForkPoolWorker-6] cache-warmup[4345da24-b272-4af8-a8bc-2c1ee924191c]: Loading strategy
[2019-10-28 15:00:00,015: INFO/ForkPoolWorker-6] cache-warmup[4345da24-b272-4af8-a8bc-2c1ee924191c]: Loading TopNDashboardsStrategy
[2019-10-28 15:00:00,017: INFO/ForkPoolWorker-6] cache-warmup[4345da24-b272-4af8-a8bc-2c1ee924191c]: Success!
[2019-10-28 15:00:00,031: INFO/ForkPoolWorker-6] cache-warmup[4345da24-b272-4af8-a8bc-2c1ee924191c]: Fetching 0.0.0.0:8088/superset/explore/?form_data=%7B%22slice_id%22%3A%201%7D
[2019-10-28 15:00:00,031: ERROR/ForkPoolWorker-6] cache-warmup[4345da24-b272-4af8-a8bc-2c1ee924191c]: Error warming up cache!
Traceback (most recent call last):
File "/home/superset/superset/tasks/cache.py", line 285, in cache_warmup
request.urlopen(url)
File "/usr/local/lib/python3.6/urllib/request.py", line 223, in urlopen
return opener.open(url, data, timeout)
File "/usr/local/lib/python3.6/urllib/request.py", line 526, in open
response = self._open(req, data)
File "/usr/local/lib/python3.6/urllib/request.py", line 549, in _open
'unknown_open', req)
File "/usr/local/lib/python3.6/urllib/request.py", line 504, in _call_chain
result = func(*args)
File "/usr/local/lib/python3.6/urllib/request.py", line 1388, in unknown_open
raise URLError('unknown url type: %s' % type)
urllib.error.URLError: <urlopen error unknown url type: 0.0.0.0>
|
urllib.error.URLError
|
def configure_celery(self) -> None:
celery_app.config_from_object(self.config["CELERY_CONFIG"])
celery_app.set_default()
flask_app = self.flask_app
# Here, we want to ensure that every call into Celery task has an app context
# setup properly
task_base = celery_app.Task
class AppContextTask(task_base): # type: ignore
# pylint: disable=too-few-public-methods
abstract = True
# Grab each call into the task and set up an app context
def __call__(self, *args, **kwargs):
with flask_app.app_context():
return task_base.__call__(self, *args, **kwargs)
celery_app.Task = AppContextTask
|
def configure_celery(self) -> None:
celery_app.config_from_object(self.config["CELERY_CONFIG"])
celery_app.set_default()
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def get_query_backoff_handler(details):
query_id = details["kwargs"]["query_id"]
logger.error(f"Query with id `{query_id}` could not be retrieved")
stats_logger.incr("error_attempting_orm_query_{}".format(details["tries"] - 1))
logger.error(f"Query {query_id}: Sleeping for a sec before retrying...")
|
def get_query_backoff_handler(details):
query_id = details["kwargs"]["query_id"]
logging.error(f"Query with id `{query_id}` could not be retrieved")
stats_logger.incr("error_attempting_orm_query_{}".format(details["tries"] - 1))
logging.error(f"Query {query_id}: Sleeping for a sec before retrying...")
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def session_scope(nullpool):
"""Provide a transactional scope around a series of operations."""
if nullpool:
engine = sqlalchemy.create_engine(
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
)
session_class = sessionmaker()
session_class.configure(bind=engine)
session = session_class()
else:
session = db.session()
session.commit() # HACK
try:
yield session
session.commit()
except Exception as e:
session.rollback()
logger.exception(e)
raise
finally:
session.close()
|
def session_scope(nullpool):
"""Provide a transactional scope around a series of operations."""
if nullpool:
engine = sqlalchemy.create_engine(
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
)
session_class = sessionmaker()
session_class.configure(bind=engine)
session = session_class()
else:
session = db.session()
session.commit() # HACK
try:
yield session
session.commit()
except Exception as e:
session.rollback()
logging.exception(e)
raise
finally:
session.close()
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def get_sql_results(
ctask,
query_id,
rendered_query,
return_results=True,
store_results=False,
user_name=None,
start_time=None,
expand_data=False,
):
"""Executes the sql query returns the results."""
with session_scope(not ctask.request.called_directly) as session:
try:
return execute_sql_statements(
ctask,
query_id,
rendered_query,
return_results,
store_results,
user_name,
session=session,
start_time=start_time,
expand_data=expand_data,
)
except Exception as e:
logger.exception(f"Query {query_id}: {e}")
stats_logger.incr("error_sqllab_unhandled")
query = get_query(query_id, session)
return handle_query_error(str(e), query, session)
|
def get_sql_results(
ctask,
query_id,
rendered_query,
return_results=True,
store_results=False,
user_name=None,
start_time=None,
expand_data=False,
):
"""Executes the sql query returns the results."""
with session_scope(not ctask.request.called_directly) as session:
try:
return execute_sql_statements(
ctask,
query_id,
rendered_query,
return_results,
store_results,
user_name,
session=session,
start_time=start_time,
expand_data=expand_data,
)
except Exception as e:
logging.exception(f"Query {query_id}: {e}")
stats_logger.incr("error_sqllab_unhandled")
query = get_query(query_id, session)
return handle_query_error(str(e), query, session)
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
query_id = query.id
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config["SQL_MAX_ROW"]
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_("Only `SELECT` statements are allowed against this database")
)
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(
_("Only `SELECT` statements can be used with the CREATE TABLE feature.")
)
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = "tmp_{}_table_{}".format(
query.user_id, start_dttm.strftime("%Y_%m_%d_%H_%M_%S")
)
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config["SQL_QUERY_MUTATOR"]
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
session.commit()
with stats_timing("sqllab.query.time_executing_query", stats_logger):
logger.info(f"Query {query_id}: Running query: \n{sql}")
db_engine_spec.execute(cursor, sql, async_=True)
logger.info(f"Query {query_id}: Handling cursor")
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing("sqllab.query.time_fetching_results", stats_logger):
logger.debug(
"Query {}: Fetching data for query object: {}".format(
query_id, query.to_dict()
)
)
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logger.exception(f"Query {query_id}: {e}")
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
"after {} seconds.".format(SQLLAB_TIMEOUT)
)
except Exception as e:
logger.exception(f"Query {query_id}: {e}")
raise SqlLabException(db_engine_spec.extract_error_message(e))
logger.debug(f"Query {query_id}: Fetching cursor description")
cursor_description = cursor.description
return SupersetDataFrame(data, cursor_description, db_engine_spec)
|
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
query_id = query.id
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config["SQL_MAX_ROW"]
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_("Only `SELECT` statements are allowed against this database")
)
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(
_("Only `SELECT` statements can be used with the CREATE TABLE feature.")
)
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = "tmp_{}_table_{}".format(
query.user_id, start_dttm.strftime("%Y_%m_%d_%H_%M_%S")
)
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config["SQL_QUERY_MUTATOR"]
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
session.commit()
with stats_timing("sqllab.query.time_executing_query", stats_logger):
logging.info(f"Query {query_id}: Running query: \n{sql}")
db_engine_spec.execute(cursor, sql, async_=True)
logging.info(f"Query {query_id}: Handling cursor")
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing("sqllab.query.time_fetching_results", stats_logger):
logging.debug(
"Query {}: Fetching data for query object: {}".format(
query_id, query.to_dict()
)
)
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(f"Query {query_id}: {e}")
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
"after {} seconds.".format(SQLLAB_TIMEOUT)
)
except Exception as e:
logging.exception(f"Query {query_id}: {e}")
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug(f"Query {query_id}: Fetching cursor description")
cursor_description = cursor.description
return SupersetDataFrame(data, cursor_description, db_engine_spec)
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def _serialize_payload(
payload: dict, use_msgpack: Optional[bool] = False
) -> Union[bytes, str]:
logger.debug(f"Serializing to msgpack: {use_msgpack}")
if use_msgpack:
return msgpack.dumps(payload, default=json_iso_dttm_ser, use_bin_type=True)
else:
return json.dumps(payload, default=json_iso_dttm_ser, ignore_nan=True)
|
def _serialize_payload(
payload: dict, use_msgpack: Optional[bool] = False
) -> Union[bytes, str]:
logging.debug(f"Serializing to msgpack: {use_msgpack}")
if use_msgpack:
return msgpack.dumps(payload, default=json_iso_dttm_ser, use_bin_type=True)
else:
return json.dumps(payload, default=json_iso_dttm_ser, ignore_nan=True)
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def execute_sql_statements(
ctask,
query_id,
rendered_query,
return_results=True,
store_results=False,
user_name=None,
session=None,
start_time=None,
expand_data=False,
):
"""Executes the sql query returns the results."""
if store_results and start_time:
# only asynchronous queries
stats_logger.timing("sqllab.query.time_pending", now_as_float() - start_time)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
if database.allow_run_async and not results_backend:
raise SqlLabException("Results backend isn't configured.")
# Breaking down into multiple statements
parsed_query = ParsedQuery(rendered_query)
statements = parsed_query.get_statements()
logger.info(f"Query {query_id}: Executing {len(statements)} statement(s)")
logger.info(f"Query {query_id}: Set query to 'running'")
query.status = QueryStatus.RUNNING
query.start_running_time = now_as_float()
session.commit()
engine = database.get_sqla_engine(
schema=query.schema,
nullpool=True,
user_name=user_name,
source=sources.get("sql_lab", None),
)
# Sharing a single connection and cursor across the
# execution of all statements (if many)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
statement_count = len(statements)
for i, statement in enumerate(statements):
# Check if stopped
query = get_query(query_id, session)
if query.status == QueryStatus.STOPPED:
return
# Run statement
msg = f"Running statement {i + 1} out of {statement_count}"
logger.info(f"Query {query_id}: {msg}")
query.set_extra_json_key("progress", msg)
session.commit()
try:
cdf = execute_sql_statement(
statement, query, user_name, session, cursor
)
except Exception as e:
msg = str(e)
if statement_count > 1:
msg = f"[Statement {i + 1} out of {statement_count}] " + msg
payload = handle_query_error(msg, query, session, payload)
return payload
# Success, updating the query entry in database
query.rows = cdf.size
query.progress = 100
query.set_extra_json_key("progress", None)
if query.select_as_cta:
query.select_sql = database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False,
)
query.end_time = now_as_float()
data, selected_columns, all_columns, expanded_columns = _serialize_and_expand_data(
cdf, db_engine_spec, store_results and results_backend_use_msgpack, expand_data
)
payload.update(
{
"status": QueryStatus.SUCCESS,
"data": data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query.to_dict(),
}
)
payload["query"]["state"] = QueryStatus.SUCCESS
if store_results and results_backend:
key = str(uuid.uuid4())
logger.info(f"Query {query_id}: Storing results in results backend, key: {key}")
with stats_timing("sqllab.query.results_backend_write", stats_logger):
with stats_timing(
"sqllab.query.results_backend_write_serialization", stats_logger
):
serialized_payload = _serialize_payload(
payload, results_backend_use_msgpack
)
cache_timeout = database.cache_timeout
if cache_timeout is None:
cache_timeout = config["CACHE_DEFAULT_TIMEOUT"]
compressed = zlib_compress(serialized_payload)
logger.debug(
f"*** serialized payload size: {getsizeof(serialized_payload)}"
)
logger.debug(f"*** compressed payload size: {getsizeof(compressed)}")
results_backend.set(key, compressed, cache_timeout)
query.results_key = key
query.status = QueryStatus.SUCCESS
session.commit()
if return_results:
return payload
|
def execute_sql_statements(
ctask,
query_id,
rendered_query,
return_results=True,
store_results=False,
user_name=None,
session=None,
start_time=None,
expand_data=False,
):
"""Executes the sql query returns the results."""
if store_results and start_time:
# only asynchronous queries
stats_logger.timing("sqllab.query.time_pending", now_as_float() - start_time)
query = get_query(query_id, session)
payload = dict(query_id=query_id)
database = query.database
db_engine_spec = database.db_engine_spec
db_engine_spec.patch()
if database.allow_run_async and not results_backend:
raise SqlLabException("Results backend isn't configured.")
# Breaking down into multiple statements
parsed_query = ParsedQuery(rendered_query)
statements = parsed_query.get_statements()
logging.info(f"Query {query_id}: Executing {len(statements)} statement(s)")
logging.info(f"Query {query_id}: Set query to 'running'")
query.status = QueryStatus.RUNNING
query.start_running_time = now_as_float()
session.commit()
engine = database.get_sqla_engine(
schema=query.schema,
nullpool=True,
user_name=user_name,
source=sources.get("sql_lab", None),
)
# Sharing a single connection and cursor across the
# execution of all statements (if many)
with closing(engine.raw_connection()) as conn:
with closing(conn.cursor()) as cursor:
statement_count = len(statements)
for i, statement in enumerate(statements):
# Check if stopped
query = get_query(query_id, session)
if query.status == QueryStatus.STOPPED:
return
# Run statement
msg = f"Running statement {i + 1} out of {statement_count}"
logging.info(f"Query {query_id}: {msg}")
query.set_extra_json_key("progress", msg)
session.commit()
try:
cdf = execute_sql_statement(
statement, query, user_name, session, cursor
)
except Exception as e:
msg = str(e)
if statement_count > 1:
msg = f"[Statement {i + 1} out of {statement_count}] " + msg
payload = handle_query_error(msg, query, session, payload)
return payload
# Success, updating the query entry in database
query.rows = cdf.size
query.progress = 100
query.set_extra_json_key("progress", None)
if query.select_as_cta:
query.select_sql = database.select_star(
query.tmp_table_name,
limit=query.limit,
schema=database.force_ctas_schema,
show_cols=False,
latest_partition=False,
)
query.end_time = now_as_float()
data, selected_columns, all_columns, expanded_columns = _serialize_and_expand_data(
cdf, db_engine_spec, store_results and results_backend_use_msgpack, expand_data
)
payload.update(
{
"status": QueryStatus.SUCCESS,
"data": data,
"columns": all_columns,
"selected_columns": selected_columns,
"expanded_columns": expanded_columns,
"query": query.to_dict(),
}
)
payload["query"]["state"] = QueryStatus.SUCCESS
if store_results and results_backend:
key = str(uuid.uuid4())
logging.info(
f"Query {query_id}: Storing results in results backend, key: {key}"
)
with stats_timing("sqllab.query.results_backend_write", stats_logger):
with stats_timing(
"sqllab.query.results_backend_write_serialization", stats_logger
):
serialized_payload = _serialize_payload(
payload, results_backend_use_msgpack
)
cache_timeout = database.cache_timeout
if cache_timeout is None:
cache_timeout = config["CACHE_DEFAULT_TIMEOUT"]
compressed = zlib_compress(serialized_payload)
logging.debug(
f"*** serialized payload size: {getsizeof(serialized_payload)}"
)
logging.debug(f"*** compressed payload size: {getsizeof(compressed)}")
results_backend.set(key, compressed, cache_timeout)
query.results_key = key
query.status = QueryStatus.SUCCESS
session.commit()
if return_results:
return payload
|
https://github.com/apache/superset/issues/8651
|
Task sql_lab.get_sql_results[051acb0d-30a5-4dbe-b30e-5b16bc9d8545] raised unexpected: RuntimeError('Working outside of application context.\n\nThis typically means that you attempted to use functionality that needed\nto interface with the current application object in some way. To solve\nthis, set up an application context with app.app_context(). See the\ndocumentation for more information.',)
Traceback (most recent call last):
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 385, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/celery/app/trace.py", line 648, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 154, in get_sql_results
with session_scope(not ctask.request.called_directly) as session:
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/contextlib2.py", line 79, in __enter__
return next(self.gen)
File "/Users/rob/work/incubator-superset/superset/sql_lab.py", line 117, in session_scope
app.config["SQLALCHEMY_DATABASE_URI"], poolclass=NullPool
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 348, in __getattr__
return getattr(self._get_current_object(), name)
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/werkzeug/local.py", line 307, in _get_current_object
return self.__local()
File "/Users/rob/work/incubator-superset/venv/lib/python3.6/site-packages/flask/globals.py", line 52, in _find_app
raise RuntimeError(_app_ctx_err_msg)
RuntimeError: Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in some way. To solve
this, set up an application context with app.app_context(). See the
documentation for more information.
|
RuntimeError
|
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
country = countries.get(fd.get("country_fieldtype"), row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
|
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf["m1"] = df[metric].iloc[:, 0]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
country = countries.get(fd.get("country_fieldtype"), row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
|
https://github.com/apache/superset/issues/7006
|
Mar 11 11:08:03 analytics-tool1004 superset[27831]: 2019-03-11 11:08:03,985:ERROR:root:Too many indexers
Mar 11 11:08:03 analytics-tool1004 superset[27831]: Traceback (most recent call last):
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/base.py", line 96, in wraps
Mar 11 11:08:03 analytics-tool1004 superset[27831]: return f(self, *args, **kwargs)
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1211, in explore_json
Mar 11 11:08:03 analytics-tool1004 superset[27831]: samples=samples,
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1142, in generate_json
Mar 11 11:08:03 analytics-tool1004 superset[27831]: payload = viz_obj.get_payload()
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 374, in get_payload
Mar 11 11:08:03 analytics-tool1004 superset[27831]: payload['data'] = self.get_data(df)
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 1745, in get_data
Mar 11 11:08:03 analytics-tool1004 superset[27831]: ndf['m1'] = df[metric].iloc[:,0]
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/indexing.py", line 1471, in __getitem__
Mar 11 11:08:03 analytics-tool1004 superset[27831]: return self._getitem_tuple(key)
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/indexing.py", line 2012, in _getitem_tuple
Mar 11 11:08:03 analytics-tool1004 superset[27831]: self._has_valid_tuple(tup)
Mar 11 11:08:03 analytics-tool1004 superset[27831]: File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/indexing.py", line 219, in _has_valid_tuple
Mar 11 11:08:03 analytics-tool1004 superset[27831]: raise IndexingError('Too many indexers')
Mar 11 11:08:03 analytics-tool1004 superset[27831]: pandas.core.indexing.IndexingError: Too many indexers
|
IndexingError
|
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [
form_data.get("entity"),
]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
self.x_metric = form_data.get("x")
self.y_metric = form_data.get("y")
self.z_metric = form_data.get("size")
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = list(
set(
[
self.z_metric,
self.x_metric,
self.y_metric,
]
)
)
if not all(d["metrics"] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
|
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [
form_data.get("entity"),
]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
self.x_metric = form_data.get("x")
self.y_metric = form_data.get("y")
self.z_metric = form_data.get("size")
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d["metrics"] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
|
https://github.com/apache/superset/issues/7079
|
Traceback (most recent call last):
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3078, in get_loc
return self._engine.get_loc(key)
File "pandas/_libs/index.pyx", line 140, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 157, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 183, in pandas._libs.index.IndexEngine._get_loc_duplicates
File "pandas/_libs/index.pyx", line 200, in pandas._libs.index.IndexEngine._maybe_get_bool_indexer
KeyError: 'x'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/internals.py", line 4243, in set
loc = self.items.get_loc(item)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/indexes/base.py", line 3080, in get_loc
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 140, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 157, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 183, in pandas._libs.index.IndexEngine._get_loc_duplicates
File "pandas/_libs/index.pyx", line 200, in pandas._libs.index.IndexEngine._maybe_get_bool_indexer
KeyError: 'x'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/base.py", line 114, in wraps
return f(self, *args, **kwargs)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1229, in explore_json
samples=samples,
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/views/core.py", line 1160, in generate_json
payload = viz_obj.get_payload()
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 375, in get_payload
payload['data'] = self.get_data(df)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/superset/viz.py", line 967, in get_data
df['x'] = df[[utils.get_metric_name(self.x_metric)]]
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/frame.py", line 3119, in __setitem__
self._set_item(key, value)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/frame.py", line 3195, in _set_item
NDFrame._set_item(self, key, value)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/generic.py", line 2600, in _set_item
self._data.set(key, value)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/internals.py", line 4246, in set
self.insert(len(self.items), item, value)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/internals.py", line 4347, in insert
placement=slice(loc, loc + 1))
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/internals.py", line 3205, in make_block
return klass(values, ndim=ndim, placement=placement)
File "/srv/deployment/analytics/superset/venv/lib/python3.7/site-packages/pandas/core/internals.py", line 125, in __init__
'{mgr}'.format(val=len(self.values), mgr=len(self.mgr_locs)))
ValueError: Wrong number of items passed 2, placement implies 1
|
KeyError
|
def get_data(self, df):
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10**9)] = obj.get(metric)
data[metric] = values
start, end = utils.get_since_until(
form_data.get("time_range"), form_data.get("since"), form_data.get("until")
)
if not start or not end:
raise Exception("Please provide both time bounds (Since and Until)")
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1
else:
range_ = diff_secs // (60 * 60) + 1
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
|
def get_data(self, df):
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
data[metric] = {
str(obj[DTTM_ALIAS] / 10**9): obj.get(metric) for obj in records
}
start, end = utils.get_since_until(
form_data.get("time_range"), form_data.get("since"), form_data.get("until")
)
if not start or not end:
raise Exception("Please provide both time bounds (Since and Until)")
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1
else:
range_ = diff_secs // (60 * 60) + 1
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
|
https://github.com/apache/superset/issues/6278
|
2018-11-06 05:56:26,419:ERROR:root:unsupported operand type(s) for /: 'Timestamp' and 'int'
Traceback (most recent call last):
File "/Users/alganas/LEXER/master/incubator-superset/superset/views/core.py", line 1162, in generate_json
payload = viz_obj.get_payload()
File "/Users/alganas/LEXER/master/incubator-superset/superset/viz.py", line 372, in get_payload
payload['data'] = self.get_data(df)
File "/Users/alganas/LEXER/master/incubator-superset/superset/viz.py", line 788, in get_data
for obj in records
File "/Users/alganas/LEXER/master/incubator-superset/superset/viz.py", line 788, in <dictcomp>
for obj in records
TypeError: unsupported operand type(s) for /: 'Timestamp' and 'int'
|
TypeError
|
def get_or_create_main_db(caravel):
db = caravel.db
config = caravel.app.config
DB = caravel.models.Database
logging.info("Creating database reference")
dbobj = db.session.query(DB).filter_by(database_name="main").first()
if not dbobj:
dbobj = DB(database_name="main")
logging.info(config.get("SQLALCHEMY_DATABASE_URI"))
dbobj.set_sqlalchemy_uri(config.get("SQLALCHEMY_DATABASE_URI"))
dbobj.expose_in_sqllab = True
db.session.add(dbobj)
db.session.commit()
return dbobj
|
def get_or_create_main_db(caravel):
db = caravel.db
config = caravel.app.config
DB = caravel.models.Database
logging.info("Creating database reference")
dbobj = db.session.query(DB).filter_by(database_name="main").first()
if not dbobj:
dbobj = DB(database_name="main")
logging.info(config.get("SQLALCHEMY_DATABASE_URI"))
dbobj.sqlalchemy_uri = config.get("SQLALCHEMY_DATABASE_URI")
dbobj.expose_in_sqllab = True
db.session.add(dbobj)
db.session.commit()
return dbobj
|
https://github.com/apache/superset/issues/1070
|
2016-09-07 01:52:51,509:ERROR:root:(pymysql.err.OperationalError) (1045, u"Access denied for user 'mysqladmin'@'172.18.0.3' (using password: NO)")
Traceback (most recent call last):
File "/home/caravel/caravel/caravel/views.py", line 66, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/Flask_AppBuilder-1.8.1-py2.7.egg/flask_appbuilder/security/decorators.py", line 52, in wraps
return f(self, *args, **kwargs)
File "/home/caravel/caravel/caravel/views.py", line 1118, in tables
'tables': database.all_table_names(schema),
File "/home/caravel/caravel/caravel/models.py", line 445, in all_table_names
return sorted(self.inspector.get_table_names(schema))
File "/home/caravel/caravel/caravel/models.py", line 442, in inspector
return sqla.inspect(engine)
File "build/bdist.linux-x86_64/egg/sqlalchemy/inspection.py", line 63, in inspect
ret = reg(subject)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/reflection.py", line 139, in _insp
return Inspector.from_engine(bind)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/reflection.py", line 135, in from_engine
return Inspector(bind)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/reflection.py", line 109, in __init__
bind.connect().close()
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 2018, in connect
return self._connection_cls(self, **kwargs)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 72, in __init__
if connection is not None else engine.raw_connection()
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 2104, in raw_connection
self.pool.unique_connection, _connection)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 2078, in _wrap_pool_connect
e, dialect, self)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 1405, in _handle_dbapi_exception_noconnection
exc_info
File "build/bdist.linux-x86_64/egg/sqlalchemy/util/compat.py", line 202, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/base.py", line 2074, in _wrap_pool_connect
return fn()
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 318, in unique_connection
return _ConnectionFairy._checkout(self)
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 713, in _checkout
fairy = _ConnectionRecord.checkout(pool)
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 480, in checkout
rec = pool._do_get()
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 1060, in _do_get
self._dec_overflow()
File "build/bdist.linux-x86_64/egg/sqlalchemy/util/langhelpers.py", line 60, in __exit__
compat.reraise(exc_type, exc_value, exc_tb)
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 1057, in _do_get
return self._create_connection()
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 323, in _create_connection
return _ConnectionRecord(self)
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 449, in __init__
self.connection = self.__connect()
File "build/bdist.linux-x86_64/egg/sqlalchemy/pool.py", line 607, in __connect
connection = self.__pool._invoke_creator(self)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/strategies.py", line 97, in connect
return dialect.connect(*cargs, **cparams)
File "build/bdist.linux-x86_64/egg/sqlalchemy/engine/default.py", line 385, in connect
return self.dbapi.connect(*cargs, **cparams)
File "/usr/local/lib/python2.7/site-packages/pymysql/__init__.py", line 90, in Connect
return Connection(*args, **kwargs)
File "/usr/local/lib/python2.7/site-packages/pymysql/connections.py", line 690, in __init__
self.connect()
File "/usr/local/lib/python2.7/site-packages/pymysql/connections.py", line 908, in connect
self._request_authentication()
File "/usr/local/lib/python2.7/site-packages/pymysql/connections.py", line 1116, in _request_authentication
auth_packet = self._read_packet()
File "/usr/local/lib/python2.7/site-packages/pymysql/connections.py", line 983, in _read_packet
packet.check_error()
File "/usr/local/lib/python2.7/site-packages/pymysql/connections.py", line 395, in check_error
err.raise_mysql_exception(self._data)
File "/usr/local/lib/python2.7/site-packages/pymysql/err.py", line 107, in raise_mysql_exception
raise errorclass(errno, errval)
OperationalError: (pymysql.err.OperationalError) (1045, u"Access denied for user 'mysqladmin'@'172.18.0.3' (using password: NO)")
|
OperationalError
|
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
|
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index)
|
https://github.com/apache/superset/issues/395
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1381, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python2.7/dist-packages/flask_appbuilder/security/decorators.py", line 26, in wraps
return f(self, *args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/caravel/models.py", line 1164, in wrapper
return f(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/caravel/views.py", line 515, in explore
payload = obj.get_csv()
File "/usr/local/lib/python2.7/dist-packages/caravel/viz.py", line 282, in get_csv
return df.to_csv(index=include_index)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 1343, in to_csv
formatter.save()
File "/usr/local/lib/python2.7/dist-packages/pandas/core/format.py", line 1550, in save
self._save()
File "/usr/local/lib/python2.7/dist-packages/pandas/core/format.py", line 1651, in _save
self._save_chunk(start_i, end_i)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/format.py", line 1677, in _save_chunk
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
File "pandas/lib.pyx", line 1078, in pandas.lib.write_csv_rows (pandas/lib.c:19848)
UnicodeEncodeError: 'ascii' codec can't encode character u'\xfc' in position 1: ordinal not in range(128)
|
UnicodeEncodeError
|
def query( # druid
self,
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
extras=None, # noqa
select=None,
): # noqa
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
aggregations = {
m.metric_name: m.json_obj for m in self.metrics if m.metric_name in metrics
}
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
origin = extras.get("druid_time_origin")
if origin:
dttm = utils.parse_human_datetime(origin)
granularity["origin"] = dttm.isoformat()
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
intervals=from_dttm.isoformat() + "/" + to_dttm.isoformat(),
)
filters = None
for col, op, eq in filter:
cond = None
if op == "==":
cond = Dimension(col) == eq
elif op == "!=":
cond = ~(Dimension(col) == eq)
elif op in ("in", "not in"):
fields = []
splitted = eq.split(",")
if len(splitted) > 1:
for s in eq.split(","):
s = s.strip()
fields.append(Filter.build_filter(Dimension(col) == s))
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == "not in":
cond = ~cond
if filters:
filters = Filter(
type="and",
fields=[Filter.build_filter(cond), Filter.build_filter(filters)],
)
else:
filters = cond
if filters:
qry["filter"] = filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry["granularity"] = "all"
pre_qry["limit_spec"] = {
"type": "default",
"limit": timeseries_limit,
"intervals": (
inner_from_dttm.isoformat() + "/" + inner_to_dttm.isoformat()
),
"columns": [
{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}
],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(client.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry["dimensions"]
filters = []
for _, row in df.iterrows():
fields = []
for dim in dims:
f = Filter.build_filter(Dimension(dim) == row[dim])
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(Filter.build_filter(filt))
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry["filter"] = ff
else:
qry["filter"] = Filter(
type="and",
fields=[
Filter.build_filter(ff),
Filter.build_filter(orig_filters),
],
)
qry["limit_spec"] = None
if row_limit:
qry["limit_spec"] = {
"type": "default",
"limit": row_limit,
"columns": [
{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}
],
}
client.groupby(**qry)
query_str += json.dumps(client.query_dict, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception("No data was returned.")
if not is_timeseries and granularity == "all" and "timestamp" in df.columns:
del df["timestamp"]
# Reordering columns
cols = []
if "timestamp" in df.columns:
cols += ["timestamp"]
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
cols += [col for col in df.columns if col not in cols]
df = df[cols]
return QueryResult(df=df, query=query_str, duration=datetime.now() - qry_start_dttm)
|
def query( # druid
self,
groupby,
metrics,
granularity,
from_dttm,
to_dttm,
filter=None, # noqa
is_timeseries=True,
timeseries_limit=None,
row_limit=None,
inner_from_dttm=None,
inner_to_dttm=None,
extras=None, # noqa
select=None,
): # noqa
"""Runs a query against Druid and returns a dataframe.
This query interface is common to SqlAlchemy and Druid
"""
# TODO refactor into using a TBD Query object
qry_start_dttm = datetime.now()
inner_from_dttm = inner_from_dttm or from_dttm
inner_to_dttm = inner_to_dttm or to_dttm
# add tzinfo to native datetime with config
from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))
query_str = ""
aggregations = {
m.metric_name: m.json_obj for m in self.metrics if m.metric_name in metrics
}
granularity = granularity or "all"
if granularity != "all":
granularity = utils.parse_human_timedelta(granularity).total_seconds() * 1000
if not isinstance(granularity, string_types):
granularity = {"type": "duration", "duration": granularity}
origin = extras.get("druid_time_origin")
if origin:
dttm = utils.parse_human_datetime(origin)
granularity["origin"] = dttm.isoformat()
qry = dict(
datasource=self.datasource_name,
dimensions=groupby,
aggregations=aggregations,
granularity=granularity,
intervals=from_dttm.isoformat() + "/" + to_dttm.isoformat(),
)
filters = None
for col, op, eq in filter:
cond = None
if op == "==":
cond = Dimension(col) == eq
elif op == "!=":
cond = ~(Dimension(col) == eq)
elif op in ("in", "not in"):
fields = []
splitted = eq.split(",")
if len(splitted) > 1:
for s in eq.split(","):
s = s.strip()
fields.append(Filter.build_filter(Dimension(col) == s))
cond = Filter(type="or", fields=fields)
else:
cond = Dimension(col) == eq
if op == "not in":
cond = ~cond
if filters:
filters = Filter(
type="and",
fields=[Filter.build_filter(cond), Filter.build_filter(filters)],
)
else:
filters = cond
if filters:
qry["filter"] = filters
client = self.cluster.get_pydruid_client()
orig_filters = filters
if timeseries_limit and is_timeseries:
# Limit on the number of timeseries, doing a two-phases query
pre_qry = deepcopy(qry)
pre_qry["granularity"] = "all"
pre_qry["limit_spec"] = {
"type": "default",
"limit": timeseries_limit,
"intervals": (
inner_from_dttm.isoformat() + "/" + inner_to_dttm.isoformat()
),
"columns": [
{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}
],
}
client.groupby(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(client.query_dict, indent=2) + "\n"
query_str += "//\nPhase 2 (built based on phase one's results)\n"
df = client.export_pandas()
if df is not None and not df.empty:
dims = qry["dimensions"]
filters = []
for _, row in df.iterrows():
fields = []
for dim in dims:
f = Filter.build_filter(Dimension(dim) == row[dim])
fields.append(f)
if len(fields) > 1:
filt = Filter(type="and", fields=fields)
filters.append(Filter.build_filter(filt))
elif fields:
filters.append(fields[0])
if filters:
ff = Filter(type="or", fields=filters)
if not orig_filters:
qry["filter"] = ff
else:
qry["filter"] = Filter(
type="and",
fields=[
Filter.build_filter(ff),
Filter.build_filter(orig_filters),
],
)
qry["limit_spec"] = None
if row_limit:
qry["limit_spec"] = {
"type": "default",
"limit": row_limit,
"columns": [
{
"dimension": metrics[0] if metrics else self.metrics[0],
"direction": "descending",
}
],
}
client.groupby(**qry)
query_str += json.dumps(qry, indent=2)
df = client.export_pandas()
if df is None or df.size == 0:
raise Exception("No data was returned.")
if not is_timeseries and granularity == "all" and "timestamp" in df.columns:
del df["timestamp"]
# Reordering columns
cols = []
if "timestamp" in df.columns:
cols += ["timestamp"]
cols += [col for col in groupby if col in df.columns]
cols += [col for col in metrics if col in df.columns]
cols += [col for col in df.columns if col not in cols]
df = df[cols]
return QueryResult(df=df, query=query_str, duration=datetime.now() - qry_start_dttm)
|
https://github.com/apache/superset/issues/388
|
because qry['filter'] is a Filter instance
Stack:
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1836, in __call__
return self.wsgi_app(environ, start_response)
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1820, in wsgi_app
response = self.make_response(self.handle_exception(e))
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1403, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1817, in wsgi_app
response = self.full_dispatch_request()
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1477, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1381, in handle_user_exception
Open an interactive python shell in this framereraise(exc_type, exc_value, tb)
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1475, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/lib/python2.7/site-packages/Flask-0.10.1-py2.7.egg/flask/app.py", line 1461, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/lib/python2.7/site-packages/Flask_AppBuilder-1.6.0-py2.7.egg/flask_appbuilder/security/decorators.py", line 26, in wraps
return f(self, *args, **kwargs)
File "/usr/lib/python2.7/site-packages/caravel-0.8.8-py2.7.egg/caravel/models.py", line 1176, in wrapper
return f(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/caravel-0.8.8-py2.7.egg/caravel/views.py", line 514, in explore
raise e
Traceback (most recent call last):
File "/projects/druid/panoramix/caravel/views.py", line 510, in explore
payload = obj.get_json()
File "/projects/druid/panoramix/caravel/viz.py", line 251, in get_json
'data': self.get_data(),
File "/projects/druid/panoramix/caravel/viz.py", line 745, in get_data
df = self.get_df()
File "/projects/druid/panoramix/caravel/viz.py", line 140, in get_df
self.results = self.datasource.query(**query_obj)
File "/projects/druid/panoramix/caravel/models.py", line 1116, in query
query_str += json.dumps(qry, indent=2)
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/__init__.py", line 250, in dumps
sort_keys=sort_keys, **kw).encode(obj)
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/encoder.py", line 209, in encode
chunks = list(chunks)
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/encoder.py", line 434, in _iterencode
for chunk in _iterencode_dict(o, _current_indent_level):
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/encoder.py", line 408, in _iterencode_dict
for chunk in chunks:
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/encoder.py", line 442, in _iterencode
o = _default(o)
File "/Users/clu/.pyenv/versions/2.7.5/lib/python2.7/json/encoder.py", line 184, in default
raise TypeError(repr(o) + " is not JSON serializable")
TypeError: <pydruid.utils.filters.Filter instance at 0x10ae1d128> is not JSON serializable
|
TypeError
|
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._is_soundbar = None
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
|
def __init__(self, ip_address):
# Note: Creation of a SoCo instance should be as cheap and quick as
# possible. Do not make any network calls here
super(SoCo, self).__init__()
# Check if ip_address is a valid IPv4 representation.
# Sonos does not (yet) support IPv6
try:
socket.inet_aton(ip_address)
except socket.error:
raise ValueError("Not a valid IP address string")
#: The speaker's ip address
self.ip_address = ip_address
self.speaker_info = {} # Stores information about the current speaker
# The services which we use
# pylint: disable=invalid-name
self.avTransport = AVTransport(self)
self.contentDirectory = ContentDirectory(self)
self.deviceProperties = DeviceProperties(self)
self.renderingControl = RenderingControl(self)
self.zoneGroupTopology = ZoneGroupTopology(self)
self.alarmClock = AlarmClock(self)
self.systemProperties = SystemProperties(self)
self.musicServices = MusicServices(self)
self.music_library = MusicLibrary(self)
# Some private attributes
self._all_zones = set()
self._groups = set()
self._is_bridge = None
self._is_coordinator = False
self._player_name = None
self._uid = None
self._household_id = None
self._visible_zones = set()
self._zgs_cache = None
_LOG.debug("Created SoCo instance for ip: %s", ip_address)
|
https://github.com/SoCo/SoCo/issues/633
|
my_zone
SoCo("192.168.2.253")
my_zone.get_speaker_info()
{u'display_version': '9.1', u'player_icon': '/img/icon-S11.png', u'uid': 'RINCON_XYZ', u'software_version': '45.1-56150', u'mac_address': u'MA-CA-DD-RE-S-S', u'hardware_version': '1.14.1.11-1', u'model_number': 'S11', u'serial_number': 'MA-CA-DD-RE-S-S:1', u'zone_name': 'Family Room', u'model_name': 'Sonos Playbase'}
my_zone.night_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 813, in night_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support night mode
my_zone.dialog_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 851, in dialog_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support dialog mode
|
soco.exceptions.NotSupportedException
|
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.is_soundbar:
message = "This device does not support night mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[("InstanceID", 0), ("EQType", "NightMode"), ("DesiredValue", int(night_mode))]
)
|
def night_mode(self, night_mode):
"""Switch on/off the speaker's night mode.
:param night_mode: Enable or disable night mode
:type night_mode: bool
:raises NotSupportedException: If the device does not support
night mode.
"""
if not self.speaker_info:
self.get_speaker_info()
if "PLAYBAR" not in self.speaker_info["model_name"]:
message = "This device does not support night mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[("InstanceID", 0), ("EQType", "NightMode"), ("DesiredValue", int(night_mode))]
)
|
https://github.com/SoCo/SoCo/issues/633
|
my_zone
SoCo("192.168.2.253")
my_zone.get_speaker_info()
{u'display_version': '9.1', u'player_icon': '/img/icon-S11.png', u'uid': 'RINCON_XYZ', u'software_version': '45.1-56150', u'mac_address': u'MA-CA-DD-RE-S-S', u'hardware_version': '1.14.1.11-1', u'model_number': 'S11', u'serial_number': 'MA-CA-DD-RE-S-S:1', u'zone_name': 'Family Room', u'model_name': 'Sonos Playbase'}
my_zone.night_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 813, in night_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support night mode
my_zone.dialog_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 851, in dialog_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support dialog mode
|
soco.exceptions.NotSupportedException
|
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = "This device does not support dialog mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[
("InstanceID", 0),
("EQType", "DialogLevel"),
("DesiredValue", int(dialog_mode)),
]
)
|
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.speaker_info:
self.get_speaker_info()
if "PLAYBAR" not in self.speaker_info["model_name"]:
message = "This device does not support dialog mode"
raise NotSupportedException(message)
self.renderingControl.SetEQ(
[
("InstanceID", 0),
("EQType", "DialogLevel"),
("DesiredValue", int(dialog_mode)),
]
)
|
https://github.com/SoCo/SoCo/issues/633
|
my_zone
SoCo("192.168.2.253")
my_zone.get_speaker_info()
{u'display_version': '9.1', u'player_icon': '/img/icon-S11.png', u'uid': 'RINCON_XYZ', u'software_version': '45.1-56150', u'mac_address': u'MA-CA-DD-RE-S-S', u'hardware_version': '1.14.1.11-1', u'model_number': 'S11', u'serial_number': 'MA-CA-DD-RE-S-S:1', u'zone_name': 'Family Room', u'model_name': 'Sonos Playbase'}
my_zone.night_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 813, in night_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support night mode
my_zone.dialog_mode = True
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/soco/core.py", line 851, in dialog_mode
raise NotSupportedException(message)
soco.exceptions.NotSupportedException: This device does not support dialog mode
|
soco.exceptions.NotSupportedException
|
def _strategy_simple(self, status_list, tasks, *args, kind=None, **kwargs):
self._general_strategy(
status_list, tasks, *args, strategy_type="simple", kind=None, **kwargs
)
|
def _strategy_simple(self, status_list, tasks, *args, kind=None, **kwargs):
"""Peek at the DFK and the executors specified.
We assume here that tasks are not held in a runnable
state, and that all tasks from an app would be sent to
a single specific executor, i.e tasks cannot be specified
to go to one of more executors.
Args:
- tasks (task_ids): Not used here.
KWargs:
- kind (Not used)
"""
for exec_status in status_list:
executor = exec_status.executor
label = executor.label
if not executor.scaling_enabled:
continue
# Tasks that are either pending completion
active_tasks = executor.outstanding
status = exec_status.status
# Dict[object, JobStatus]: job_id -> status
self.unset_logging()
# FIXME we need to handle case where provider does not define these
# FIXME probably more of this logic should be moved to the provider
min_blocks = executor.provider.min_blocks
max_blocks = executor.provider.max_blocks
if isinstance(executor, HighThroughputExecutor):
tasks_per_node = executor.workers_per_node
elif isinstance(executor, ExtremeScaleExecutor):
tasks_per_node = executor.ranks_per_node
nodes_per_block = executor.provider.nodes_per_block
parallelism = executor.provider.parallelism
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
pending = sum([1 for x in status.values() if x.state == JobState.PENDING])
active_blocks = running + pending
active_slots = active_blocks * tasks_per_node * nodes_per_block
if hasattr(executor, "connected_workers"):
logger.debug(
"Executor {} has {} active tasks, {}/{} running/pending blocks, and {} connected workers".format(
label, active_tasks, running, pending, executor.connected_workers
)
)
else:
logger.debug(
"Executor {} has {} active tasks and {}/{} running/pending blocks".format(
label, active_tasks, running, pending
)
)
# reset kill timer if executor has active tasks
if active_tasks > 0 and self.executors[executor.label]["idle_since"]:
self.executors[executor.label]["idle_since"] = None
# Case 1
# No tasks.
if active_tasks == 0:
# Case 1a
# Fewer blocks that min_blocks
if active_blocks <= min_blocks:
# Ignore
# logger.debug("Strategy: Case.1a")
pass
# Case 1b
# More blocks than min_blocks. Scale down
else:
# We want to make sure that max_idletime is reached
# before killing off resources
if not self.executors[executor.label]["idle_since"]:
logger.debug(
"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format(
label, self.max_idletime
)
)
self.executors[executor.label]["idle_since"] = time.time()
idle_since = self.executors[executor.label]["idle_since"]
if (time.time() - idle_since) > self.max_idletime:
# We have resources idle for the max duration,
# we have to scale_in now.
logger.debug(
"Idle time has reached {}s for executor {}; removing resources".format(
self.max_idletime, label
)
)
exec_status.scale_in(active_blocks - min_blocks)
else:
pass
# logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
# Case 2
# More tasks than the available slots.
elif (float(active_slots) / active_tasks) < parallelism:
# Case 2a
# We have the max blocks possible
if active_blocks >= max_blocks:
# Ignore since we already have the max nodes
# logger.debug("Strategy: Case.2a")
pass
# Case 2b
else:
# logger.debug("Strategy: Case.2b")
excess = math.ceil((active_tasks * parallelism) - active_slots)
excess_blocks = math.ceil(
float(excess) / (tasks_per_node * nodes_per_block)
)
excess_blocks = min(excess_blocks, max_blocks - active_blocks)
logger.debug("Requesting {} more blocks".format(excess_blocks))
exec_status.scale_out(excess_blocks)
elif active_slots == 0 and active_tasks > 0:
# Case 4
# Check if slots are being lost quickly ?
logger.debug("Requesting single slot")
if active_blocks < max_blocks:
exec_status.scale_out(1)
# Case 3
# tasks ~ slots
else:
# logger.debug("Strategy: Case 3")
pass
|
https://github.com/Parsl/parsl/issues/1862
|
2020-09-10 00:15:14 parsl.dataflow.dflow:509 [INFO] Task 0 launched on executor medium
2020-09-10 00:15:14 workflow:68 [INFO] Waiting for validation task to complete...
2020-09-10 00:15:17 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
2020-09-10 00:15:17 parsl.dataflow.strategy:332 [DEBUG] Executor medium has 0 active tasks, 0/1 running/pending blocks, and 0 connected workers
2020-09-10 00:15:17 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 66, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/strategy.py", line 302, in _strategy_htex_auto_scale
for label, executor in self.dfk.executors.items():
RuntimeError: dictionary changed size during iteration
2020-09-10 00:15:22 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
|
RuntimeError
|
def _strategy_htex_auto_scale(self, status_list, tasks, *args, kind=None, **kwargs):
self._general_strategy(
status_list, tasks, *args, strategy_type="htex", kind=None, **kwargs
)
|
def _strategy_htex_auto_scale(self, tasks, *args, kind=None, **kwargs):
"""HTEX specific auto scaling strategy
This strategy works only for HTEX. This strategy will scale up by
requesting additional compute resources via the provider when the
workload requirements exceed the provisioned capacity. The scale out
behavior is exactly like the 'simple' strategy.
If there are idle blocks during execution, this strategy will terminate
those idle blocks specifically. When # of tasks >> # of blocks, HTEX places
tasks evenly across blocks, which makes it rather difficult to ensure that
some blocks will reach 0% utilization. Consequently, this strategy can be
expected to scale down effectively only when # of workers, or tasks executing
per block is close to 1.
Args:
- tasks (task_ids): Not used here.
KWargs:
- kind (Not used)
"""
for label, executor in self.dfk.executors.items():
if not executor.scaling_enabled:
continue
# Tasks that are either pending completion
active_tasks = executor.outstanding
status = executor.status()
self.unset_logging()
# FIXME we need to handle case where provider does not define these
# FIXME probably more of this logic should be moved to the provider
min_blocks = executor.provider.min_blocks
max_blocks = executor.provider.max_blocks
if isinstance(executor, HighThroughputExecutor):
tasks_per_node = executor.workers_per_node
elif isinstance(executor, ExtremeScaleExecutor):
tasks_per_node = executor.ranks_per_node
nodes_per_block = executor.provider.nodes_per_block
parallelism = executor.provider.parallelism
running = sum([1 for x in status.values() if x.state == JobState.RUNNING])
pending = sum([1 for x in status.values() if x.state == JobState.PENDING])
active_blocks = running + pending
active_slots = active_blocks * tasks_per_node * nodes_per_block
if hasattr(executor, "connected_workers"):
logger.debug(
"Executor {} has {} active tasks, {}/{} running/pending blocks, and {} connected workers".format(
label, active_tasks, running, pending, executor.connected_workers
)
)
else:
logger.debug(
"Executor {} has {} active tasks and {}/{} running/pending blocks".format(
label, active_tasks, running, pending
)
)
# reset kill timer if executor has active tasks
if active_tasks > 0 and self.executors[executor.label]["idle_since"]:
self.executors[executor.label]["idle_since"] = None
# Case 1
# No tasks.
if active_tasks == 0:
# Case 1a
# Fewer blocks that min_blocks
if active_blocks <= min_blocks:
# Ignore
# logger.debug("Strategy: Case.1a")
pass
# Case 1b
# More blocks than min_blocks. Scale down
else:
# We want to make sure that max_idletime is reached
# before killing off resources
if not self.executors[executor.label]["idle_since"]:
logger.debug(
"Executor {} has 0 active tasks; starting kill timer (if idle time exceeds {}s, resources will be removed)".format(
label, self.max_idletime
)
)
self.executors[executor.label]["idle_since"] = time.time()
idle_since = self.executors[executor.label]["idle_since"]
if (time.time() - idle_since) > self.max_idletime:
# We have resources idle for the max duration,
# we have to scale_in now.
logger.debug(
"Idle time has reached {}s for executor {}; removing resources".format(
self.max_idletime, label
)
)
executor.scale_in(active_blocks - min_blocks)
else:
pass
# logger.debug("Strategy: Case.1b. Waiting for timer : {0}".format(idle_since))
# Case 2
# More tasks than the available slots.
elif (float(active_slots) / active_tasks) < parallelism:
# Case 2a
# We have the max blocks possible
if active_blocks >= max_blocks:
# Ignore since we already have the max nodes
# logger.debug("Strategy: Case.2a")
pass
# Case 2b
else:
# logger.debug("Strategy: Case.2b")
excess = math.ceil((active_tasks * parallelism) - active_slots)
excess_blocks = math.ceil(
float(excess) / (tasks_per_node * nodes_per_block)
)
excess_blocks = min(excess_blocks, max_blocks - active_blocks)
logger.debug("Requesting {} more blocks".format(excess_blocks))
executor.scale_out(excess_blocks)
elif active_slots == 0 and active_tasks > 0:
# Case 4
logger.debug("Requesting single slot")
if active_blocks < max_blocks:
executor.scale_out(1)
# Case 4
# More slots than tasks
elif active_slots > 0 and active_slots > active_tasks:
logger.debug("More slots than tasks")
if isinstance(executor, HighThroughputExecutor):
if active_blocks > min_blocks:
executor.scale_in(1, force=False, max_idletime=self.max_idletime)
# Case 3
# tasks ~ slots
else:
# logger.debug("Strategy: Case 3")
pass
|
https://github.com/Parsl/parsl/issues/1862
|
2020-09-10 00:15:14 parsl.dataflow.dflow:509 [INFO] Task 0 launched on executor medium
2020-09-10 00:15:14 workflow:68 [INFO] Waiting for validation task to complete...
2020-09-10 00:15:17 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
2020-09-10 00:15:17 parsl.dataflow.strategy:332 [DEBUG] Executor medium has 0 active tasks, 0/1 running/pending blocks, and 0 connected workers
2020-09-10 00:15:17 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 66, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/strategy.py", line 302, in _strategy_htex_auto_scale
for label, executor in self.dfk.executors.items():
RuntimeError: dictionary changed size during iteration
2020-09-10 00:15:22 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
|
RuntimeError
|
def scale_in(self, n, force=True, max_idletime=None):
if force and not max_idletime:
ids = self._executor.scale_in(n)
else:
ids = self._executor.scale_in(n, force=force, max_idletime=max_idletime)
if ids is not None:
new_status = {}
for id in ids:
new_status[id] = JobStatus(JobState.CANCELLED)
del self._status[id]
self.send_monitoring_info(new_status, block_id_type="internal")
return ids
|
def scale_in(self, n):
ids = self._executor.scale_in(n)
if ids is not None:
new_status = {}
for id in ids:
new_status[id] = JobStatus(JobState.CANCELLED)
del self._status[id]
self.send_monitoring_info(new_status, block_id_type="internal")
return ids
|
https://github.com/Parsl/parsl/issues/1862
|
2020-09-10 00:15:14 parsl.dataflow.dflow:509 [INFO] Task 0 launched on executor medium
2020-09-10 00:15:14 workflow:68 [INFO] Waiting for validation task to complete...
2020-09-10 00:15:17 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
2020-09-10 00:15:17 parsl.dataflow.strategy:332 [DEBUG] Executor medium has 0 active tasks, 0/1 running/pending blocks, and 0 connected workers
2020-09-10 00:15:17 parsl.dataflow.flow_control:114 [ERROR] Flow control callback threw an exception - logging and proceeding anyway
Traceback (most recent call last):
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/flow_control.py", line 112, in make_callback
self.callback(tasks=self._event_buffer, kind=kind)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/task_status_poller.py", line 66, in poll
self._strategy.strategize(self._poll_items, tasks)
File "/home/akila/Documents/NUS_CSI/Parsl/parsl.git/parsl/dataflow/strategy.py", line 302, in _strategy_htex_auto_scale
for label, executor in self.dfk.executors.items():
RuntimeError: dictionary changed size during iteration
2020-09-10 00:15:22 parsl.providers.torque.torque:128 [DEBUG] coerced job_id 1838438.wlm01 -> 1838438.wlm01
|
RuntimeError
|
def start(self, priority_queue, node_queue, resource_queue) -> None:
self._kill_event = threading.Event()
self._priority_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
priority_queue,
"priority",
self._kill_event,
),
name="Monitoring-migrate-priority",
daemon=True,
)
self._priority_queue_pull_thread.start()
self._node_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
node_queue,
"node",
self._kill_event,
),
name="Monitoring-migrate-node",
daemon=True,
)
self._node_queue_pull_thread.start()
self._resource_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
resource_queue,
"resource",
self._kill_event,
),
name="Monitoring-migrate-resource",
daemon=True,
)
self._resource_queue_pull_thread.start()
"""
maintain a set to track the tasks that are already INSERTed into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message. In such a case, the resource table
primary key would be violated.
If that happens, the message will be added to deferred_resource_messages and processed later.
"""
inserted_tasks = set() # type: Set[object]
"""
like inserted_tasks but for task,try tuples
"""
inserted_tries = set() # type: Set[Any]
# for any task ID, we can defer exactly one message, which is the
# assumed-to-be-unique first message (with first message flag set).
# The code prior to this patch will discard previous message in
# the case of multiple messages to defer.
deferred_resource_messages = {} # type: Dict[str, Any]
while (
not self._kill_event.is_set()
or self.pending_priority_queue.qsize() != 0
or self.pending_resource_queue.qsize() != 0
or priority_queue.qsize() != 0
or resource_queue.qsize() != 0
):
"""
WORKFLOW_INFO and TASK_INFO messages (i.e. priority messages)
"""
logger.debug(
"""Checking STOP conditions: {}, {}, {}, {}, {}""".format(
self._kill_event.is_set(),
self.pending_priority_queue.qsize() != 0,
self.pending_resource_queue.qsize() != 0,
priority_queue.qsize() != 0,
resource_queue.qsize() != 0,
)
)
# This is the list of resource messages which can be reprocessed as if they
# had just arrived because the corresponding first task message has been
# processed (corresponding by task id)
reprocessable_first_resource_messages = []
# Get a batch of priority messages
priority_messages = self._get_messages_in_batch(
self.pending_priority_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if priority_messages:
logger.debug(
"Got {} messages from priority queue".format(len(priority_messages))
)
(
task_info_update_messages,
task_info_insert_messages,
task_info_all_messages,
) = [], [], []
try_update_messages, try_insert_messages, try_all_messages = [], [], []
for msg_type, msg in priority_messages:
if msg_type.value == MessageType.WORKFLOW_INFO.value:
if "python_version" in msg: # workflow start message
logger.debug("Inserting workflow start info to WORKFLOW table")
self._insert(table=WORKFLOW, messages=[msg])
self.workflow_start_message = msg
else: # workflow end message
logger.debug("Updating workflow end info to WORKFLOW table")
self._update(
table=WORKFLOW,
columns=[
"run_id",
"tasks_failed_count",
"tasks_completed_count",
"time_completed",
],
messages=[msg],
)
self.workflow_end = True
elif msg_type.value == MessageType.TASK_INFO.value:
task_try_id = str(msg["task_id"]) + "." + str(msg["try_id"])
task_info_all_messages.append(msg)
if msg["task_id"] in inserted_tasks:
task_info_update_messages.append(msg)
else:
inserted_tasks.add(msg["task_id"])
task_info_insert_messages.append(msg)
try_all_messages.append(msg)
if task_try_id in inserted_tries:
try_update_messages.append(msg)
else:
inserted_tries.add(task_try_id)
try_insert_messages.append(msg)
# check if there is a left_message for this task
if task_try_id in deferred_resource_messages:
reprocessable_first_resource_messages.append(
deferred_resource_messages.pop(task_try_id)
)
else:
raise RuntimeError(
"Unexpected message type {} received on priority queue".format(
msg_type
)
)
logger.debug("Updating and inserting TASK_INFO to all tables")
logger.debug(
"Updating {} TASK_INFO into workflow table".format(
len(task_info_update_messages)
)
)
self._update(
table=WORKFLOW,
columns=["run_id", "tasks_failed_count", "tasks_completed_count"],
messages=task_info_all_messages,
)
if task_info_insert_messages:
self._insert(table=TASK, messages=task_info_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks))
)
if task_info_update_messages:
logger.debug(
"Updating {} TASK_INFO into task table".format(
len(task_info_update_messages)
)
)
self._update(
table=TASK,
columns=[
"task_time_invoked",
"task_time_returned",
"run_id",
"task_id",
"task_fail_count",
"task_hashsum",
],
messages=task_info_update_messages,
)
logger.debug(
"Inserting {} task_info_all_messages into status table".format(
len(task_info_all_messages)
)
)
self._insert(table=STATUS, messages=task_info_all_messages)
if try_insert_messages:
logger.debug(
"Inserting {} TASK_INFO to try table".format(
len(try_insert_messages)
)
)
self._insert(table=TRY, messages=try_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks))
)
if try_update_messages:
logger.debug(
"Updating {} TASK_INFO into try table".format(
len(try_update_messages)
)
)
self._update(
table=TRY,
columns=[
"run_id",
"task_id",
"try_id",
"task_fail_history",
"task_try_time_launched",
"task_try_time_returned",
],
messages=try_update_messages,
)
"""
NODE_INFO messages
"""
node_info_messages = self._get_messages_in_batch(
self.pending_node_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if node_info_messages:
logger.debug(
"Got {} messages from node queue".format(len(node_info_messages))
)
self._insert(table=NODE, messages=node_info_messages)
"""
Resource info messages
"""
resource_messages = self._get_messages_in_batch(
self.pending_resource_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if resource_messages:
logger.debug(
"Got {} messages from resource queue, {} reprocessable".format(
len(resource_messages), len(reprocessable_first_resource_messages)
)
)
insert_resource_messages = []
for msg in resource_messages:
task_try_id = str(msg["task_id"]) + "." + str(msg["try_id"])
if msg["first_msg"]:
# Update the running time to try table if first message
msg["task_status_name"] = States.running.name
msg["task_try_time_running"] = msg["timestamp"]
if (
task_try_id in inserted_tries
): # TODO: needs to become task_id and try_id, and check against inserted_tries
reprocessable_first_resource_messages.append(msg)
else:
if task_try_id in deferred_resource_messages:
logger.error(
"Task {} already has a deferred resource message. Discarding previous message.".format(
msg["task_id"]
)
)
deferred_resource_messages[task_try_id] = msg
else:
# Insert to resource table if not first message
insert_resource_messages.append(msg)
if insert_resource_messages:
self._insert(table=RESOURCE, messages=insert_resource_messages)
if reprocessable_first_resource_messages:
self._insert(table=STATUS, messages=reprocessable_first_resource_messages)
self._update(
table=TRY,
columns=[
"task_try_time_running",
"run_id",
"task_id",
"try_id",
"hostname",
],
messages=reprocessable_first_resource_messages,
)
|
def start(self, priority_queue, node_queue, resource_queue) -> None:
self._kill_event = threading.Event()
self._priority_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
priority_queue,
"priority",
self._kill_event,
),
name="Monitoring-migrate-priority",
daemon=True,
)
self._priority_queue_pull_thread.start()
self._node_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
node_queue,
"node",
self._kill_event,
),
name="Monitoring-migrate-node",
daemon=True,
)
self._node_queue_pull_thread.start()
self._resource_queue_pull_thread = threading.Thread(
target=self._migrate_logs_to_internal,
args=(
resource_queue,
"resource",
self._kill_event,
),
name="Monitoring-migrate-resource",
daemon=True,
)
self._resource_queue_pull_thread.start()
"""
maintain a set to track the tasks that are already INSERTed into database
to prevent race condition that the first resource message (indicate 'running' state)
arrives before the first task message. In such a case, the resource table
primary key would be violated.
If that happens, the message will be added to deferred_resource_messages and processed later.
"""
inserted_tasks = set() # type: Set[object]
"""
like inserted_tasks but for task,try tuples
"""
inserted_tries = set() # type: Set[Any]
# for any task ID, we can defer exactly one message, which is the
# assumed-to-be-unique first message (with first message flag set).
# The code prior to this patch will discard previous message in
# the case of multiple messages to defer.
deferred_resource_messages = {} # type: Dict[str, Any]
while (
not self._kill_event.is_set()
or self.pending_priority_queue.qsize() != 0
or self.pending_resource_queue.qsize() != 0
or priority_queue.qsize() != 0
or resource_queue.qsize() != 0
):
"""
WORKFLOW_INFO and TASK_INFO messages (i.e. priority messages)
"""
logger.debug(
"""Checking STOP conditions: {}, {}, {}, {}, {}""".format(
self._kill_event.is_set(),
self.pending_priority_queue.qsize() != 0,
self.pending_resource_queue.qsize() != 0,
priority_queue.qsize() != 0,
resource_queue.qsize() != 0,
)
)
# This is the list of resource messages which can be reprocessed as if they
# had just arrived because the corresponding first task message has been
# processed (corresponding by task id)
reprocessable_first_resource_messages = []
# Get a batch of priority messages
priority_messages = self._get_messages_in_batch(
self.pending_priority_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if priority_messages:
logger.debug(
"Got {} messages from priority queue".format(len(priority_messages))
)
(
task_info_update_messages,
task_info_insert_messages,
task_info_all_messages,
) = [], [], []
try_update_messages, try_insert_messages, try_all_messages = [], [], []
for msg_type, msg in priority_messages:
if msg_type.value == MessageType.WORKFLOW_INFO.value:
if "python_version" in msg: # workflow start message
logger.debug("Inserting workflow start info to WORKFLOW table")
self._insert(table=WORKFLOW, messages=[msg])
self.workflow_start_message = msg
else: # workflow end message
logger.debug("Updating workflow end info to WORKFLOW table")
self._update(
table=WORKFLOW,
columns=[
"run_id",
"tasks_failed_count",
"tasks_completed_count",
"time_completed",
],
messages=[msg],
)
self.workflow_end = True
elif msg_type.value == MessageType.TASK_INFO.value:
task_try_id = str(msg["task_id"]) + "." + str(msg["try_id"])
task_info_all_messages.append(msg)
if msg["task_id"] in inserted_tasks:
task_info_update_messages.append(msg)
else:
inserted_tasks.add(msg["task_id"])
task_info_insert_messages.append(msg)
try_all_messages.append(msg)
if task_try_id in inserted_tries:
try_update_messages.append(msg)
else:
inserted_tries.add(task_try_id)
try_insert_messages.append(msg)
# check if there is a left_message for this task
if task_try_id in deferred_resource_messages:
reprocessable_first_resource_messages.append(
deferred_resource_messages.pop(task_try_id)
)
else:
raise RuntimeError(
"Unexpected message type {} received on priority queue".format(
msg_type
)
)
logger.debug("Updating and inserting TASK_INFO to all tables")
logger.debug(
"Updating {} TASK_INFO into workflow table".format(
len(task_info_update_messages)
)
)
self._update(
table=WORKFLOW,
columns=["run_id", "tasks_failed_count", "tasks_completed_count"],
messages=task_info_all_messages,
)
if task_info_insert_messages:
self._insert(table=TASK, messages=task_info_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks))
)
if task_info_update_messages:
logger.debug(
"Updating {} TASK_INFO into task table".format(
len(task_info_update_messages)
)
)
self._update(
table=TASK,
columns=[
"task_time_invoked",
"task_time_returned",
"run_id",
"task_id",
"task_fail_count",
"task_hashsum",
],
messages=task_info_update_messages,
)
logger.debug(
"Inserting {} task_info_all_messages into status table".format(
len(task_info_all_messages)
)
)
self._insert(table=STATUS, messages=task_info_all_messages)
if try_insert_messages:
logger.debug(
"Inserting {} TASK_INFO to try table".format(
len(try_insert_messages)
)
)
self._insert(table=TRY, messages=try_insert_messages)
logger.debug(
"There are {} inserted task records".format(len(inserted_tasks))
)
if try_update_messages:
logger.debug(
"Updating {} TASK_INFO into try table".format(
len(try_update_messages)
)
)
self._update(
table=TRY,
columns=[
"run_id",
"task_id",
"try_id",
"task_fail_history",
"task_try_time_launched",
"task_try_time_returned",
],
messages=try_update_messages,
)
"""
NODE_INFO messages
"""
node_info_messages = self._get_messages_in_batch(
self.pending_node_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if node_info_messages:
logger.debug(
"Got {} messages from node queue".format(len(node_info_messages))
)
self._insert(table=NODE, messages=node_info_messages)
"""
Resource info messages
"""
resource_messages = self._get_messages_in_batch(
self.pending_resource_queue,
interval=self.batching_interval,
threshold=self.batching_threshold,
)
if resource_messages:
logger.debug(
"Got {} messages from resource queue, {} reprocessable".format(
len(resource_messages), len(reprocessable_first_resource_messages)
)
)
self._insert(table=RESOURCE, messages=resource_messages)
for msg in resource_messages:
task_try_id = str(msg["task_id"]) + "." + str(msg["try_id"])
if msg["first_msg"]:
msg["task_status_name"] = States.running.name
msg["task_try_time_running"] = msg["timestamp"]
if (
task_try_id in inserted_tries
): # TODO: needs to become task_id and try_id, and check against inserted_tries
reprocessable_first_resource_messages.append(msg)
else:
if task_try_id in deferred_resource_messages:
logger.error(
"Task {} already has a deferred resource message. Discarding previous message.".format(
msg["task_id"]
)
)
deferred_resource_messages[task_try_id] = msg
if reprocessable_first_resource_messages:
self._insert(table=STATUS, messages=reprocessable_first_resource_messages)
self._update(
table=TRY,
columns=[
"task_try_time_running",
"run_id",
"task_id",
"try_id",
"hostname",
],
messages=reprocessable_first_resource_messages,
)
|
https://github.com/Parsl/parsl/issues/1837
|
2020-08-08 17:10:06,436 - root - ERROR - Exception reading IO counters for child memory_percent. Recorded IO usage may be incomplete
Traceback (most recent call last):
File "/lus/theta-fs0/projects/CSC249ADCD08/colmena/env/lib/python3.7/site-packages/parsl/monitoring/monitoring.py", line 551, in monitor
d['psutil_process_disk_write'] += child.io_counters().write_bytes
AttributeError: 'Process' object has no attribute 'io_counters'
|
AttributeError
|
def monitor_wrapper(
f, try_id, task_id, monitoring_hub_url, run_id, logging_level, sleep_dur
):
"""Internal
Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
"""
def wrapped(*args, **kwargs):
# Send first message to monitoring router
try:
monitor(
os.getpid(),
task_id,
monitoring_hub_url,
run_id,
logging_level,
sleep_dur,
first_message=True,
)
except Exception:
pass
# create the monitor process and start
p = Process(
target=monitor,
args=(
os.getpid(),
try_id,
task_id,
monitoring_hub_url,
run_id,
logging_level,
sleep_dur,
),
name="Monitor-Wrapper-{}".format(task_id),
)
p.start()
try:
return f(*args, **kwargs)
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
return wrapped
|
def monitor_wrapper(
f, try_id, task_id, monitoring_hub_url, run_id, logging_level, sleep_dur
):
"""Internal
Wrap the Parsl app with a function that will call the monitor function and point it at the correct pid when the task begins.
"""
def wrapped(*args, **kwargs):
command_q = Queue(maxsize=10)
p = Process(
target=monitor,
args=(
os.getpid(),
try_id,
task_id,
monitoring_hub_url,
run_id,
command_q,
logging_level,
sleep_dur,
),
name="Monitor-Wrapper-{}".format(task_id),
)
p.start()
try:
try:
return f(*args, **kwargs)
finally:
command_q.put("Finished")
p.join()
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
return wrapped
|
https://github.com/Parsl/parsl/issues/1837
|
2020-08-08 17:10:06,436 - root - ERROR - Exception reading IO counters for child memory_percent. Recorded IO usage may be incomplete
Traceback (most recent call last):
File "/lus/theta-fs0/projects/CSC249ADCD08/colmena/env/lib/python3.7/site-packages/parsl/monitoring/monitoring.py", line 551, in monitor
d['psutil_process_disk_write'] += child.io_counters().write_bytes
AttributeError: 'Process' object has no attribute 'io_counters'
|
AttributeError
|
def wrapped(*args, **kwargs):
# Send first message to monitoring router
try:
monitor(
os.getpid(),
task_id,
monitoring_hub_url,
run_id,
logging_level,
sleep_dur,
first_message=True,
)
except Exception:
pass
# create the monitor process and start
p = Process(
target=monitor,
args=(
os.getpid(),
try_id,
task_id,
monitoring_hub_url,
run_id,
logging_level,
sleep_dur,
),
name="Monitor-Wrapper-{}".format(task_id),
)
p.start()
try:
return f(*args, **kwargs)
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
|
def wrapped(*args, **kwargs):
command_q = Queue(maxsize=10)
p = Process(
target=monitor,
args=(
os.getpid(),
try_id,
task_id,
monitoring_hub_url,
run_id,
command_q,
logging_level,
sleep_dur,
),
name="Monitor-Wrapper-{}".format(task_id),
)
p.start()
try:
try:
return f(*args, **kwargs)
finally:
command_q.put("Finished")
p.join()
finally:
# There's a chance of zombification if the workers are killed by some signals
p.terminate()
p.join()
|
https://github.com/Parsl/parsl/issues/1837
|
2020-08-08 17:10:06,436 - root - ERROR - Exception reading IO counters for child memory_percent. Recorded IO usage may be incomplete
Traceback (most recent call last):
File "/lus/theta-fs0/projects/CSC249ADCD08/colmena/env/lib/python3.7/site-packages/parsl/monitoring/monitoring.py", line 551, in monitor
d['psutil_process_disk_write'] += child.io_counters().write_bytes
AttributeError: 'Process' object has no attribute 'io_counters'
|
AttributeError
|
def monitor(
pid,
try_id,
task_id,
monitoring_hub_url,
run_id,
logging_level=logging.INFO,
sleep_dur=10,
first_message=False,
):
"""Internal
Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.
"""
import platform
import time
radio = UDPRadio(monitoring_hub_url, source_id=task_id)
if first_message:
msg = {
"run_id": run_id,
"task_id": task_id,
"hostname": platform.node(),
"first_msg": first_message,
"timestamp": datetime.datetime.now(),
}
radio.send(msg)
return
import psutil
import logging
format_string = (
"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
)
logging.basicConfig(
filename="{logbase}/monitor.{task_id}.{pid}.log".format(
logbase="/tmp", task_id=task_id, pid=pid
),
level=logging_level,
format=format_string,
)
logging.debug("start of monitor")
# these values are simple to log. Other information is available in special formats such as memory below.
simple = [
"cpu_num",
"cpu_percent",
"create_time",
"cwd",
"exe",
"memory_percent",
"nice",
"name",
"num_threads",
"pid",
"ppid",
"status",
"username",
]
# values that can be summed up to see total resources used by task process and its children
summable_values = ["cpu_percent", "memory_percent", "num_threads"]
pm = psutil.Process(pid)
pm.cpu_percent()
children_user_time = {}
children_system_time = {}
total_children_user_time = 0.0
total_children_system_time = 0.0
while True:
logging.debug("start of monitoring loop")
try:
d = {
"psutil_process_" + str(k): v
for k, v in pm.as_dict().items()
if k in simple
}
d["run_id"] = run_id
d["task_id"] = task_id
d["try_id"] = try_id
d["resource_monitoring_interval"] = sleep_dur
d["hostname"] = platform.node()
d["first_msg"] = first_message
d["timestamp"] = datetime.datetime.now()
logging.debug("getting children")
children = pm.children(recursive=True)
logging.debug("got children")
d["psutil_cpu_count"] = psutil.cpu_count()
d["psutil_process_memory_virtual"] = pm.memory_info().vms
d["psutil_process_memory_resident"] = pm.memory_info().rss
d["psutil_process_time_user"] = pm.cpu_times().user
d["psutil_process_time_system"] = pm.cpu_times().system
d["psutil_process_children_count"] = len(children)
try:
d["psutil_process_disk_write"] = pm.io_counters().write_bytes
d["psutil_process_disk_read"] = pm.io_counters().read_bytes
except Exception:
# occasionally pid temp files that hold this information are unvailable to be read so set to zero
logging.exception(
"Exception reading IO counters for main process. Recorded IO usage may be incomplete",
exc_info=True,
)
d["psutil_process_disk_write"] = 0
d["psutil_process_disk_read"] = 0
for child in children:
for k, v in child.as_dict(attrs=summable_values).items():
d["psutil_process_" + str(k)] += v
child_user_time = child.cpu_times().user
child_system_time = child.cpu_times().system
total_children_user_time += child_user_time - children_user_time.get(
child.pid, 0
)
total_children_system_time += (
child_system_time - children_system_time.get(child.pid, 0)
)
children_user_time[child.pid] = child_user_time
children_system_time[child.pid] = child_system_time
d["psutil_process_memory_virtual"] += child.memory_info().vms
d["psutil_process_memory_resident"] += child.memory_info().rss
try:
d["psutil_process_disk_write"] += child.io_counters().write_bytes
d["psutil_process_disk_read"] += child.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so add zero
logging.exception(
"Exception reading IO counters for child {k}. Recorded IO usage may be incomplete".format(
k=k
),
exc_info=True,
)
d["psutil_process_disk_write"] += 0
d["psutil_process_disk_read"] += 0
d["psutil_process_time_user"] += total_children_user_time
d["psutil_process_time_system"] += total_children_system_time
logging.debug("sending message")
radio.send(d)
except Exception:
logging.exception(
"Exception getting the resource usage. Not sending usage to Hub",
exc_info=True,
)
logging.debug("sleeping")
time.sleep(sleep_dur)
logger.info("Monitor exiting")
|
def monitor(
pid,
try_id,
task_id,
monitoring_hub_url,
run_id,
command_q,
logging_level=logging.INFO,
sleep_dur=10,
):
"""Internal
Monitors the Parsl task's resources by pointing psutil to the task's pid and watching it and its children.
"""
import psutil
import platform
import logging
import time
import queue
format_string = (
"%(asctime)s.%(msecs)03d %(name)s:%(lineno)d [%(levelname)s] %(message)s"
)
logging.basicConfig(
filename="{logbase}/monitor.{task_id}.{pid}.log".format(
logbase="/tmp", task_id=task_id, pid=pid
),
level=logging_level,
format=format_string,
)
logging.debug("start of monitor")
radio = UDPRadio(monitoring_hub_url, source_id=task_id)
# these values are simple to log. Other information is available in special formats such as memory below.
simple = [
"cpu_num",
"cpu_percent",
"create_time",
"cwd",
"exe",
"memory_percent",
"nice",
"name",
"num_threads",
"pid",
"ppid",
"status",
"username",
]
# values that can be summed up to see total resources used by task process and its children
summable_values = ["cpu_percent", "memory_percent", "num_threads"]
pm = psutil.Process(pid)
pm.cpu_percent()
first_msg = True
children_user_time = {}
children_system_time = {}
total_children_user_time = 0.0
total_children_system_time = 0.0
while True:
logging.debug("start of monitoring loop")
try:
d = {
"psutil_process_" + str(k): v
for k, v in pm.as_dict().items()
if k in simple
}
d["run_id"] = run_id
d["task_id"] = task_id
d["try_id"] = try_id
d["resource_monitoring_interval"] = sleep_dur
d["hostname"] = platform.node()
d["first_msg"] = first_msg
d["timestamp"] = datetime.datetime.now()
logging.debug("getting children")
children = pm.children(recursive=True)
logging.debug("got children")
d["psutil_cpu_count"] = psutil.cpu_count()
d["psutil_process_memory_virtual"] = pm.memory_info().vms
d["psutil_process_memory_resident"] = pm.memory_info().rss
d["psutil_process_time_user"] = pm.cpu_times().user
d["psutil_process_time_system"] = pm.cpu_times().system
d["psutil_process_children_count"] = len(children)
try:
d["psutil_process_disk_write"] = pm.io_counters().write_bytes
d["psutil_process_disk_read"] = pm.io_counters().read_bytes
except Exception:
# occasionally pid temp files that hold this information are unvailable to be read so set to zero
logging.exception(
"Exception reading IO counters for main process. Recorded IO usage may be incomplete",
exc_info=True,
)
d["psutil_process_disk_write"] = 0
d["psutil_process_disk_read"] = 0
for child in children:
for k, v in child.as_dict(attrs=summable_values).items():
d["psutil_process_" + str(k)] += v
child_user_time = child.cpu_times().user
child_system_time = child.cpu_times().system
total_children_user_time += child_user_time - children_user_time.get(
child.pid, 0
)
total_children_system_time += (
child_system_time - children_system_time.get(child.pid, 0)
)
children_user_time[child.pid] = child_user_time
children_system_time[child.pid] = child_system_time
d["psutil_process_memory_virtual"] += child.memory_info().vms
d["psutil_process_memory_resident"] += child.memory_info().rss
try:
d["psutil_process_disk_write"] += child.io_counters().write_bytes
d["psutil_process_disk_read"] += child.io_counters().read_bytes
except Exception:
# occassionally pid temp files that hold this information are unvailable to be read so add zero
logging.exception(
"Exception reading IO counters for child {k}. Recorded IO usage may be incomplete".format(
k=k
),
exc_info=True,
)
d["psutil_process_disk_write"] += 0
d["psutil_process_disk_read"] += 0
d["psutil_process_time_user"] += total_children_user_time
d["psutil_process_time_system"] += total_children_system_time
logging.debug("sending message")
radio.send(d)
first_msg = False
except Exception:
logging.exception(
"Exception getting the resource usage. Not sending usage to Hub",
exc_info=True,
)
try:
msg = command_q.get(block=False)
if msg == "Finished":
logging.info(
"Received task finished message. Ending the monitoring loop now."
)
break
except queue.Empty:
logging.debug("Have not received any message.")
logging.debug("sleeping")
time.sleep(sleep_dur)
|
https://github.com/Parsl/parsl/issues/1837
|
2020-08-08 17:10:06,436 - root - ERROR - Exception reading IO counters for child memory_percent. Recorded IO usage may be incomplete
Traceback (most recent call last):
File "/lus/theta-fs0/projects/CSC249ADCD08/colmena/env/lib/python3.7/site-packages/parsl/monitoring/monitoring.py", line 551, in monitor
d['psutil_process_disk_write'] += child.io_counters().write_bytes
AttributeError: 'Process' object has no attribute 'io_counters'
|
AttributeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.