after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof("preprocessing_step")
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config, step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == "random_horizontal_flip":
config = preprocessor_step_config.random_horizontal_flip
return (
preprocessor.random_horizontal_flip,
{
"keypoint_flip_permutation": tuple(config.keypoint_flip_permutation),
},
)
if step_type == "random_vertical_flip":
config = preprocessor_step_config.random_vertical_flip
return (
preprocessor.random_vertical_flip,
{
"keypoint_flip_permutation": tuple(config.keypoint_flip_permutation),
},
)
if step_type == "random_rotation90":
return (preprocessor.random_rotation90, {})
if step_type == "random_crop_image":
config = preprocessor_step_config.random_crop_image
return (
preprocessor.random_crop_image,
{
"min_object_covered": config.min_object_covered,
"aspect_ratio_range": (
config.min_aspect_ratio,
config.max_aspect_ratio,
),
"area_range": (config.min_area, config.max_area),
"overlap_thresh": config.overlap_thresh,
"random_coef": config.random_coef,
},
)
if step_type == "random_pad_image":
config = preprocessor_step_config.random_pad_image
min_image_size = None
if config.HasField("min_image_height") != config.HasField("min_image_width"):
raise ValueError(
"min_image_height and min_image_width should be either "
"both set or both unset."
)
if config.HasField("min_image_height"):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if config.HasField("max_image_height") != config.HasField("max_image_width"):
raise ValueError(
"max_image_height and max_image_width should be either "
"both set or both unset."
)
if config.HasField("max_image_height"):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError("pad_color should have 3 elements (RGB) if set!")
if not pad_color:
pad_color = None
return (
preprocessor.random_pad_image,
{
"min_image_size": min_image_size,
"max_image_size": max_image_size,
"pad_color": pad_color,
},
)
if step_type == "random_crop_pad_image":
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError("min_padded_size_ratio should have 2 elements if set!")
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError("max_padded_size_ratio should have 2 elements if set!")
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError("pad_color should have 3 elements if set!")
kwargs = {
"min_object_covered": config.min_object_covered,
"aspect_ratio_range": (config.min_aspect_ratio, config.max_aspect_ratio),
"area_range": (config.min_area, config.max_area),
"overlap_thresh": config.overlap_thresh,
"random_coef": config.random_coef,
}
if min_padded_size_ratio:
kwargs["min_padded_size_ratio"] = tuple(min_padded_size_ratio)
if max_padded_size_ratio:
kwargs["max_padded_size_ratio"] = tuple(max_padded_size_ratio)
if pad_color:
kwargs["pad_color"] = tuple(pad_color)
return (preprocessor.random_crop_pad_image, kwargs)
if step_type == "random_resize_method":
config = preprocessor_step_config.random_resize_method
return (
preprocessor.random_resize_method,
{
"target_size": [config.target_height, config.target_width],
},
)
if step_type == "resize_image":
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (
preprocessor.resize_image,
{
"new_height": config.new_height,
"new_width": config.new_width,
"method": method,
},
)
if step_type == "ssd_random_crop":
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (
preprocessor.ssd_random_crop,
{
"min_object_covered": min_object_covered,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
},
)
return (preprocessor.ssd_random_crop, {})
if step_type == "ssd_random_crop_pad":
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [
(op.min_padded_size_ratio[0], op.min_padded_size_ratio[1])
for op in config.operations
]
max_padded_size_ratio = [
(op.max_padded_size_ratio[0], op.max_padded_size_ratio[1])
for op in config.operations
]
pad_color = [
(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations
]
return (
preprocessor.ssd_random_crop_pad,
{
"min_object_covered": min_object_covered,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
"min_padded_size_ratio": min_padded_size_ratio,
"max_padded_size_ratio": max_padded_size_ratio,
"pad_color": pad_color,
},
)
return (preprocessor.ssd_random_crop_pad, {})
if step_type == "ssd_random_crop_fixed_aspect_ratio":
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (
preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
"min_object_covered": min_object_covered,
"aspect_ratio": config.aspect_ratio,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
},
)
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == "ssd_random_crop_pad_fixed_aspect_ratio":
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [
(op.min_padded_size_ratio[0], op.min_padded_size_ratio[1])
for op in config.operations
]
max_padded_size_ratio = [
(op.max_padded_size_ratio[0], op.max_padded_size_ratio[1])
for op in config.operations
]
return (
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio,
{
"min_object_covered": min_object_covered,
"aspect_ratio": config.aspect_ratio,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
"min_padded_size_ratio": min_padded_size_ratio,
"max_padded_size_ratio": max_padded_size_ratio,
},
)
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, {})
raise ValueError("Unknown preprocessing step.")
|
def build(preprocessor_step_config):
"""Builds preprocessing step based on the configuration.
Args:
preprocessor_step_config: PreprocessingStep configuration proto.
Returns:
function, argmap: A callable function and an argument map to call function
with.
Raises:
ValueError: On invalid configuration.
"""
step_type = preprocessor_step_config.WhichOneof("preprocessing_step")
if step_type in PREPROCESSING_FUNCTION_MAP:
preprocessing_function = PREPROCESSING_FUNCTION_MAP[step_type]
step_config = _get_step_config_from_proto(preprocessor_step_config, step_type)
function_args = _get_dict_from_proto(step_config)
return (preprocessing_function, function_args)
if step_type == "random_horizontal_flip":
config = preprocessor_step_config.random_horizontal_flip
return (
preprocessor.random_horizontal_flip,
{
"keypoint_flip_permutation": tuple(config.keypoint_flip_permutation),
},
)
if step_type == "random_vertical_flip":
config = preprocessor_step_config.random_vertical_flip
return (
preprocessor.random_vertical_flip,
{
"keypoint_flip_permutation": tuple(config.keypoint_flip_permutation),
},
)
if step_type == "random_rotation90":
return (preprocessor.random_rotation90, {})
if step_type == "random_crop_image":
config = preprocessor_step_config.random_crop_image
return (
preprocessor.random_crop_image,
{
"min_object_covered": config.min_object_covered,
"aspect_ratio_range": (
config.min_aspect_ratio,
config.max_aspect_ratio,
),
"area_range": (config.min_area, config.max_area),
"overlap_thresh": config.overlap_thresh,
"random_coef": config.random_coef,
},
)
if step_type == "random_pad_image":
config = preprocessor_step_config.random_pad_image
min_image_size = None
if config.HasField("min_image_height") != config.HasField("min_image_width"):
raise ValueError(
"min_image_height and min_image_width should be either "
"both set or both unset."
)
if config.HasField("min_image_height"):
min_image_size = (config.min_image_height, config.min_image_width)
max_image_size = None
if config.HasField("max_image_height") != config.HasField("max_image_width"):
raise ValueError(
"max_image_height and max_image_width should be either "
"both set or both unset."
)
if config.HasField("max_image_height"):
max_image_size = (config.max_image_height, config.max_image_width)
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError("pad_color should have 3 elements (RGB) if set!")
if not pad_color:
pad_color = None
return (
preprocessor.random_pad_image,
{
"min_image_size": min_image_size,
"max_image_size": max_image_size,
"pad_color": pad_color,
},
)
if step_type == "random_crop_pad_image":
config = preprocessor_step_config.random_crop_pad_image
min_padded_size_ratio = config.min_padded_size_ratio
if min_padded_size_ratio and len(min_padded_size_ratio) != 2:
raise ValueError("min_padded_size_ratio should have 3 elements if set!")
max_padded_size_ratio = config.max_padded_size_ratio
if max_padded_size_ratio and len(max_padded_size_ratio) != 2:
raise ValueError("max_padded_size_ratio should have 3 elements if set!")
pad_color = config.pad_color
if pad_color and len(pad_color) != 3:
raise ValueError("pad_color should have 3 elements if set!")
return (
preprocessor.random_crop_pad_image,
{
"min_object_covered": config.min_object_covered,
"aspect_ratio_range": (
config.min_aspect_ratio,
config.max_aspect_ratio,
),
"area_range": (config.min_area, config.max_area),
"overlap_thresh": config.overlap_thresh,
"random_coef": config.random_coef,
"min_padded_size_ratio": (
min_padded_size_ratio if min_padded_size_ratio else None
),
"max_padded_size_ratio": (
max_padded_size_ratio if max_padded_size_ratio else None
),
"pad_color": (pad_color if pad_color else None),
},
)
if step_type == "random_resize_method":
config = preprocessor_step_config.random_resize_method
return (
preprocessor.random_resize_method,
{
"target_size": [config.target_height, config.target_width],
},
)
if step_type == "resize_image":
config = preprocessor_step_config.resize_image
method = RESIZE_METHOD_MAP[config.method]
return (
preprocessor.resize_image,
{
"new_height": config.new_height,
"new_width": config.new_width,
"method": method,
},
)
if step_type == "ssd_random_crop":
config = preprocessor_step_config.ssd_random_crop
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (
preprocessor.ssd_random_crop,
{
"min_object_covered": min_object_covered,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
},
)
return (preprocessor.ssd_random_crop, {})
if step_type == "ssd_random_crop_pad":
config = preprocessor_step_config.ssd_random_crop_pad
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [
(op.min_padded_size_ratio[0], op.min_padded_size_ratio[1])
for op in config.operations
]
max_padded_size_ratio = [
(op.max_padded_size_ratio[0], op.max_padded_size_ratio[1])
for op in config.operations
]
pad_color = [
(op.pad_color_r, op.pad_color_g, op.pad_color_b)
for op in config.operations
]
return (
preprocessor.ssd_random_crop_pad,
{
"min_object_covered": min_object_covered,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
"min_padded_size_ratio": min_padded_size_ratio,
"max_padded_size_ratio": max_padded_size_ratio,
"pad_color": pad_color,
},
)
return (preprocessor.ssd_random_crop_pad, {})
if step_type == "ssd_random_crop_fixed_aspect_ratio":
config = preprocessor_step_config.ssd_random_crop_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
return (
preprocessor.ssd_random_crop_fixed_aspect_ratio,
{
"min_object_covered": min_object_covered,
"aspect_ratio": config.aspect_ratio,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
},
)
return (preprocessor.ssd_random_crop_fixed_aspect_ratio, {})
if step_type == "ssd_random_crop_pad_fixed_aspect_ratio":
config = preprocessor_step_config.ssd_random_crop_pad_fixed_aspect_ratio
if config.operations:
min_object_covered = [op.min_object_covered for op in config.operations]
aspect_ratio_range = [
(op.min_aspect_ratio, op.max_aspect_ratio) for op in config.operations
]
area_range = [(op.min_area, op.max_area) for op in config.operations]
overlap_thresh = [op.overlap_thresh for op in config.operations]
random_coef = [op.random_coef for op in config.operations]
min_padded_size_ratio = [
(op.min_padded_size_ratio[0], op.min_padded_size_ratio[1])
for op in config.operations
]
max_padded_size_ratio = [
(op.max_padded_size_ratio[0], op.max_padded_size_ratio[1])
for op in config.operations
]
return (
preprocessor.ssd_random_crop_pad_fixed_aspect_ratio,
{
"min_object_covered": min_object_covered,
"aspect_ratio": config.aspect_ratio,
"aspect_ratio_range": aspect_ratio_range,
"area_range": area_range,
"overlap_thresh": overlap_thresh,
"random_coef": random_coef,
"min_padded_size_ratio": min_padded_size_ratio,
"max_padded_size_ratio": max_padded_size_ratio,
},
)
return (preprocessor.ssd_random_crop_pad_fixed_aspect_ratio, {})
raise ValueError("Unknown preprocessing step.")
|
https://github.com/tensorflow/models/issues/2753
|
Traceback (most recent call last):
File "../gpu-env/lib/python3.5/site-packages/object_detection/train.py", line 163, in <module>
tf.app.run()
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "../gpu-env/lib/python3.5/site-packages/object_detection/train.py", line 159, in main
worker_job_name, is_chief, FLAGS.train_dir)
File "/home/dan/gpu-env/lib/python3.5/site-packages/object_detection/trainer.py", line 217, in train
train_config.prefetch_queue_capacity, data_augmentation_options)
File "/home/dan/gpu-env/lib/python3.5/site-packages/object_detection/trainer.py", line 77, in create_input_queue
include_keypoints=include_keypoints))
File "/home/dan/gpu-env/lib/python3.5/site-packages/object_detection/core/preprocessor.py", line 2547, in preprocess
results = func(*args, **params)
File "/home/dan/gpu-env/lib/python3.5/site-packages/object_detection/core/preprocessor.py", line 1272, in random_crop_pad_image
min_padded_size_ratio)
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py", line 885, in binary_op_wrapper
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 836, in convert_to_tensor
as_ref=False)
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 926, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py", line 229, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py", line 208, in constant
value, dtype=dtype, shape=shape, verify_shape=verify_shape))
File "/home/dan/gpu-env/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py", line 371, in make_tensor_proto
raise ValueError("None values not supported.")
ValueError: None values not supported.
|
ValueError
|
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device("/cpu:0"):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
"global_step", [], initializer=tf.constant_initializer(0), trainable=False
)
# Calculate the learning rate schedule.
num_batches_per_epoch = (
cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(
cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True,
)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device("/gpu:%d" % i):
with tf.name_scope("%s_%d" % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar("learning_rate", lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + "/gradients", grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step
)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement,
)
)
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), "Model diverged with loss = NaN"
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = (
"%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)"
)
print(
format_str
% (
datetime.now(),
step,
loss_value,
examples_per_sec,
sec_per_batch,
)
)
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, "model.ckpt")
saver.save(sess, checkpoint_path, global_step=step)
|
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device("/cpu:0"):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
"global_step", [], initializer=tf.constant_initializer(0), trainable=False
)
# Calculate the learning rate schedule.
num_batches_per_epoch = (
cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size
)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(
cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True,
)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device("/gpu:%d" % i):
with tf.name_scope("%s_%d" % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar("learning_rate", lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + "/gradients", grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step
)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement,
)
)
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), "Model diverged with loss = NaN"
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = (
"%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)"
)
print(
format_str
% (
datetime.now(),
step,
loss_value,
examples_per_sec,
sec_per_batch,
)
)
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, "model.ckpt")
saver.save(sess, checkpoint_path, global_step=step)
|
https://github.com/tensorflow/models/issues/901
|
# python cifar10_multi_gpu_train.py --num_gpus=4
I tensorflow/stream_executor/dso_loader.cc:125] successfully opened CUDA library libcublas.so.8.0 locally
I tensorflow/stream_executor/dso_loader.cc:125] successfully opened CUDA library libcudnn.so.5 locally
I tensorflow/stream_executor/dso_loader.cc:125] successfully opened CUDA library libcufft.so.8.0 locally
I tensorflow/stream_executor/dso_loader.cc:125] successfully opened CUDA library libcuda.so.1 locally
I tensorflow/stream_executor/dso_loader.cc:125] successfully opened CUDA library libcurand.so.8.0 locally
Filling queue with 20000 CIFAR images before starting to train. This will take a few minutes.
WARNING:tensorflow:From /home/***/Downloads/models/tutorials/image/cifar10/cifar10_input.py:135: image_summary (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2016-11-30.
Instructions for updating:
Please switch to tf.summary.image. Note that tf.summary.image uses the node name instead of the tag. This means that TensorFlow will automatically de-duplicate summary names based on the scope they are created in. Also, the max_images argument was renamed to max_outputs.
Traceback (most recent call last):
File "cifar10_multi_gpu_train.py", line 273, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 44, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "cifar10_multi_gpu_train.py", line 269, in main
train()
File "cifar10_multi_gpu_train.py", line 171, in train
loss = tower_loss(scope)
File "cifar10_multi_gpu_train.py", line 78, in tower_loss
logits = cifar10.inference(images)
File "/home/***/Downloads/models/tutorials/image/cifar10/cifar10.py", line 207, in inference
wd=0.0)
File "/home/***/Downloads/models/tutorials/image/cifar10/cifar10.py", line 137, in _variable_with_weight_decay
weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
AttributeError: 'module' object has no attribute 'mul'
|
AttributeError
|
def __call__(self, direction, factor, values):
angles_deg = np.asarray(values) / factor
damping_ratios = np.cos((180 - angles_deg) * np.pi / 180)
ret = ["%.2f" % val for val in damping_ratios]
return ret
|
def __call__(self, direction, factor, values):
angles_deg = values / factor
damping_ratios = np.cos((180 - angles_deg) * np.pi / 180)
ret = ["%.2f" % val for val in damping_ratios]
return ret
|
https://github.com/python-control/python-control/issues/457
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-4e6a52bd77da> in <module>
1 import control
2 g = control.tf(1, [1,1])
----> 3 control.pzmap(g, grid=True)
~/src/python-control/control/pzmap.py in pzmap(sys, plot, grid, title, **kwargs)
104 ax, fig = zgrid()
105 else:
--> 106 ax, fig = sgrid()
107 else:
108 ax, fig = nogrid()
~/src/python-control/control/grid.py in sgrid()
80 ax.axis[:].invert_ticklabel_direction()
81
---> 82 ax.axis["wnxneg"] = axis = ax.new_floating_axis(0, 180)
83 axis.set_ticklabel_direction("-")
84 axis.label.set_visible(False)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in new_floating_axis(self, nth_coord, value, axis_direction)
571 def new_floating_axis(self, nth_coord, value, axis_direction="bottom"):
572 gh = self.get_grid_helper()
--> 573 axis = gh.new_floating_axis(nth_coord, value,
574 axis_direction=axis_direction,
575 axes=self)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in new_floating_axis(self, nth_coord, value, axes, axis_direction)
348 self, nth_coord, value, axis_direction)
349
--> 350 axisline = AxisArtist(axes, _helper)
351
352 # _helper = FloatingAxisArtistHelper(self, nth_coord,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in __init__(self, axes, helper, offset, axis_direction, **kwargs)
744 self._axis_direction = axis_direction
745
--> 746 self._init_line()
747 self._init_ticks(**kwargs)
748 self._init_offsetText(axis_direction)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in _init_line(self)
885 if axisline_style is None:
886 self.line = PathPatch(
--> 887 self._axis_artist_helper.get_line(self.axes),
888 color=rcParams['axes.edgecolor'],
889 fill=False,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in get_line(self, axes)
268
269 def get_line(self, axes):
--> 270 self.update_lim(axes)
271 x, y = self.grid_info["line_xy"]
272
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in update_lim(self, axes)
99
100 def update_lim(self, axes):
--> 101 self.grid_helper.update_lim(axes)
102
103 x1, x2 = axes.get_xlim()
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in update_lim(self, axes)
325
326 if self._force_update or self._old_limits != (x1, x2, y1, y2):
--> 327 self._update(x1, x2, y1, y2)
328 self._force_update = False
329 self._old_limits = (x1, x2, y1, y2)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update(self, x1, x2, y1, y2)
317 if self.valid() and self._old_values == (x1, x2, y1, y2):
318 return
--> 319 self._update_grid(x1, y1, x2, y2)
320 self._old_values = (x1, x2, y1, y2)
321 self._force_update = False
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update_grid(self, x1, y1, x2, y2)
365
366 def _update_grid(self, x1, y1, x2, y2):
--> 367 self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
368
369 def get_gridlines(self, which="major", axis="both"):
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_finder.py in get_grid_info(self, x1, y1, x2, y2)
102 """
103
--> 104 extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
105
106 # min & max rage of lat (or lon) for each grid line will be drawn.
~/src/python-control/control/grid.py in __call__(self, transform_xy, x1, y1, x2, y2)
42
43 lon_min, lon_max, lat_min, lat_max = \
---> 44 self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
45
46 return lon_min, lon_max, lat_min, lat_max
AttributeError: 'ModifiedExtremeFinderCycle' object has no attribute '_adjust_extremes'
|
AttributeError
|
def __call__(self, transform_xy, x1, y1, x2, y2):
x, y = np.meshgrid(np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny))
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
with np.errstate(invalid="ignore"):
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
# Changed from 180 to 360 to be able to span only
# 90-270 (left hand side)
lon -= 360.0 * ((lon - lon0) > 360.0)
if self.lat_cycle is not None: # pragma: no cover
lat0 = np.nanmin(lat)
lat -= 360.0 * ((lat - lat0) > 180.0)
lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
lon_min, lon_max, lat_min, lat_max = self._add_pad(
lon_min, lon_max, lat_min, lat_max
)
# check cycle
if self.lon_cycle:
lon_max = min(lon_max, lon_min + self.lon_cycle)
if self.lat_cycle: # pragma: no cover
lat_max = min(lat_max, lat_min + self.lat_cycle)
if self.lon_minmax is not None:
min0 = self.lon_minmax[0]
lon_min = max(min0, lon_min)
max0 = self.lon_minmax[1]
lon_max = min(max0, lon_max)
if self.lat_minmax is not None:
min0 = self.lat_minmax[0]
lat_min = max(min0, lat_min)
max0 = self.lat_minmax[1]
lat_max = min(max0, lat_max)
return lon_min, lon_max, lat_min, lat_max
|
def __call__(self, transform_xy, x1, y1, x2, y2):
x_, y_ = np.linspace(x1, x2, self.nx), np.linspace(y1, y2, self.ny)
x, y = np.meshgrid(x_, y_)
lon, lat = transform_xy(np.ravel(x), np.ravel(y))
with np.errstate(invalid="ignore"):
if self.lon_cycle is not None:
lon0 = np.nanmin(lon)
# Changed from 180 to 360 to be able to span only
# 90-270 (left hand side)
lon -= 360.0 * ((lon - lon0) > 360.0)
if self.lat_cycle is not None:
lat0 = np.nanmin(lat)
# Changed from 180 to 360 to be able to span only
# 90-270 (left hand side)
lat -= 360.0 * ((lat - lat0) > 360.0)
lon_min, lon_max = np.nanmin(lon), np.nanmax(lon)
lat_min, lat_max = np.nanmin(lat), np.nanmax(lat)
lon_min, lon_max, lat_min, lat_max = self._adjust_extremes(
lon_min, lon_max, lat_min, lat_max
)
return lon_min, lon_max, lat_min, lat_max
|
https://github.com/python-control/python-control/issues/457
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-4e6a52bd77da> in <module>
1 import control
2 g = control.tf(1, [1,1])
----> 3 control.pzmap(g, grid=True)
~/src/python-control/control/pzmap.py in pzmap(sys, plot, grid, title, **kwargs)
104 ax, fig = zgrid()
105 else:
--> 106 ax, fig = sgrid()
107 else:
108 ax, fig = nogrid()
~/src/python-control/control/grid.py in sgrid()
80 ax.axis[:].invert_ticklabel_direction()
81
---> 82 ax.axis["wnxneg"] = axis = ax.new_floating_axis(0, 180)
83 axis.set_ticklabel_direction("-")
84 axis.label.set_visible(False)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in new_floating_axis(self, nth_coord, value, axis_direction)
571 def new_floating_axis(self, nth_coord, value, axis_direction="bottom"):
572 gh = self.get_grid_helper()
--> 573 axis = gh.new_floating_axis(nth_coord, value,
574 axis_direction=axis_direction,
575 axes=self)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in new_floating_axis(self, nth_coord, value, axes, axis_direction)
348 self, nth_coord, value, axis_direction)
349
--> 350 axisline = AxisArtist(axes, _helper)
351
352 # _helper = FloatingAxisArtistHelper(self, nth_coord,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in __init__(self, axes, helper, offset, axis_direction, **kwargs)
744 self._axis_direction = axis_direction
745
--> 746 self._init_line()
747 self._init_ticks(**kwargs)
748 self._init_offsetText(axis_direction)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in _init_line(self)
885 if axisline_style is None:
886 self.line = PathPatch(
--> 887 self._axis_artist_helper.get_line(self.axes),
888 color=rcParams['axes.edgecolor'],
889 fill=False,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in get_line(self, axes)
268
269 def get_line(self, axes):
--> 270 self.update_lim(axes)
271 x, y = self.grid_info["line_xy"]
272
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in update_lim(self, axes)
99
100 def update_lim(self, axes):
--> 101 self.grid_helper.update_lim(axes)
102
103 x1, x2 = axes.get_xlim()
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in update_lim(self, axes)
325
326 if self._force_update or self._old_limits != (x1, x2, y1, y2):
--> 327 self._update(x1, x2, y1, y2)
328 self._force_update = False
329 self._old_limits = (x1, x2, y1, y2)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update(self, x1, x2, y1, y2)
317 if self.valid() and self._old_values == (x1, x2, y1, y2):
318 return
--> 319 self._update_grid(x1, y1, x2, y2)
320 self._old_values = (x1, x2, y1, y2)
321 self._force_update = False
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update_grid(self, x1, y1, x2, y2)
365
366 def _update_grid(self, x1, y1, x2, y2):
--> 367 self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
368
369 def get_gridlines(self, which="major", axis="both"):
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_finder.py in get_grid_info(self, x1, y1, x2, y2)
102 """
103
--> 104 extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
105
106 # min & max rage of lat (or lon) for each grid line will be drawn.
~/src/python-control/control/grid.py in __call__(self, transform_xy, x1, y1, x2, y2)
42
43 lon_min, lon_max, lat_min, lat_max = \
---> 44 self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
45
46 return lon_min, lon_max, lat_min, lat_max
AttributeError: 'ModifiedExtremeFinderCycle' object has no attribute '_adjust_extremes'
|
AttributeError
|
def pzmap(sys, plot=None, grid=None, title="Pole Zero Map", **kwargs):
"""
Plot a pole/zero map for a linear system.
Parameters
----------
sys: LTI (StateSpace or TransferFunction)
Linear system for which poles and zeros are computed.
plot: bool, optional
If ``True`` a graph is generated with Matplotlib,
otherwise the poles and zeros are only computed and returned.
grid: boolean (default = False)
If True plot omega-damping grid.
Returns
-------
pole: array
The systems poles
zeros: array
The system's zeros.
"""
# Check to see if legacy 'Plot' keyword was used
if "Plot" in kwargs:
import warnings
warnings.warn(
"'Plot' keyword is deprecated in pzmap; use 'plot'", FutureWarning
)
plot = kwargs["Plot"]
# Get parameter values
plot = config._get_param("pzmap", "plot", plot, True)
grid = config._get_param("pzmap", "grid", grid, False)
if not isinstance(sys, LTI):
raise TypeError("Argument ``sys``: must be a linear system.")
poles = sys.pole()
zeros = sys.zero()
if plot:
import matplotlib.pyplot as plt
if grid:
if isdtime(sys, strict=True):
ax, fig = zgrid()
else:
ax, fig = sgrid()
else:
ax, fig = nogrid()
# Plot the locations of the poles and zeros
if len(poles) > 0:
ax.scatter(real(poles), imag(poles), s=50, marker="x", facecolors="k")
if len(zeros) > 0:
ax.scatter(
real(zeros),
imag(zeros),
s=50,
marker="o",
facecolors="none",
edgecolors="k",
)
plt.title(title)
# Return locations of poles and zeros as a tuple
return poles, zeros
|
def pzmap(sys, plot=True, grid=False, title="Pole Zero Map", **kwargs):
"""
Plot a pole/zero map for a linear system.
Parameters
----------
sys: LTI (StateSpace or TransferFunction)
Linear system for which poles and zeros are computed.
plot: bool, optional
If ``True`` a graph is generated with Matplotlib,
otherwise the poles and zeros are only computed and returned.
grid: boolean (default = False)
If True plot omega-damping grid.
Returns
-------
pole: array
The systems poles
zeros: array
The system's zeros.
"""
# Check to see if legacy 'Plot' keyword was used
if "Plot" in kwargs:
import warnings
warnings.warn(
"'Plot' keyword is deprecated in pzmap; use 'plot'", FutureWarning
)
plot = kwargs["Plot"]
# Get parameter values
plot = config._get_param("rlocus", "plot", plot, True)
grid = config._get_param("rlocus", "grid", grid, False)
if not isinstance(sys, LTI):
raise TypeError("Argument ``sys``: must be a linear system.")
poles = sys.pole()
zeros = sys.zero()
if plot:
import matplotlib.pyplot as plt
if grid:
if isdtime(sys, strict=True):
ax, fig = zgrid()
else:
ax, fig = sgrid()
else:
ax, fig = nogrid()
# Plot the locations of the poles and zeros
if len(poles) > 0:
ax.scatter(real(poles), imag(poles), s=50, marker="x", facecolors="k")
if len(zeros) > 0:
ax.scatter(
real(zeros),
imag(zeros),
s=50,
marker="o",
facecolors="none",
edgecolors="k",
)
plt.title(title)
# Return locations of poles and zeros as a tuple
return poles, zeros
|
https://github.com/python-control/python-control/issues/457
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-7-4e6a52bd77da> in <module>
1 import control
2 g = control.tf(1, [1,1])
----> 3 control.pzmap(g, grid=True)
~/src/python-control/control/pzmap.py in pzmap(sys, plot, grid, title, **kwargs)
104 ax, fig = zgrid()
105 else:
--> 106 ax, fig = sgrid()
107 else:
108 ax, fig = nogrid()
~/src/python-control/control/grid.py in sgrid()
80 ax.axis[:].invert_ticklabel_direction()
81
---> 82 ax.axis["wnxneg"] = axis = ax.new_floating_axis(0, 180)
83 axis.set_ticklabel_direction("-")
84 axis.label.set_visible(False)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in new_floating_axis(self, nth_coord, value, axis_direction)
571 def new_floating_axis(self, nth_coord, value, axis_direction="bottom"):
572 gh = self.get_grid_helper()
--> 573 axis = gh.new_floating_axis(nth_coord, value,
574 axis_direction=axis_direction,
575 axes=self)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in new_floating_axis(self, nth_coord, value, axes, axis_direction)
348 self, nth_coord, value, axis_direction)
349
--> 350 axisline = AxisArtist(axes, _helper)
351
352 # _helper = FloatingAxisArtistHelper(self, nth_coord,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in __init__(self, axes, helper, offset, axis_direction, **kwargs)
744 self._axis_direction = axis_direction
745
--> 746 self._init_line()
747 self._init_ticks(**kwargs)
748 self._init_offsetText(axis_direction)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axis_artist.py in _init_line(self)
885 if axisline_style is None:
886 self.line = PathPatch(
--> 887 self._axis_artist_helper.get_line(self.axes),
888 color=rcParams['axes.edgecolor'],
889 fill=False,
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in get_line(self, axes)
268
269 def get_line(self, axes):
--> 270 self.update_lim(axes)
271 x, y = self.grid_info["line_xy"]
272
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in update_lim(self, axes)
99
100 def update_lim(self, axes):
--> 101 self.grid_helper.update_lim(axes)
102
103 x1, x2 = axes.get_xlim()
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/axislines.py in update_lim(self, axes)
325
326 if self._force_update or self._old_limits != (x1, x2, y1, y2):
--> 327 self._update(x1, x2, y1, y2)
328 self._force_update = False
329 self._old_limits = (x1, x2, y1, y2)
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update(self, x1, x2, y1, y2)
317 if self.valid() and self._old_values == (x1, x2, y1, y2):
318 return
--> 319 self._update_grid(x1, y1, x2, y2)
320 self._old_values = (x1, x2, y1, y2)
321 self._force_update = False
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_helper_curvelinear.py in _update_grid(self, x1, y1, x2, y2)
365
366 def _update_grid(self, x1, y1, x2, y2):
--> 367 self.grid_info = self.grid_finder.get_grid_info(x1, y1, x2, y2)
368
369 def get_gridlines(self, which="major", axis="both"):
~/.miniconda3/envs/control-dev/lib/python3.8/site-packages/mpl_toolkits/axisartist/grid_finder.py in get_grid_info(self, x1, y1, x2, y2)
102 """
103
--> 104 extremes = self.extreme_finder(self.inv_transform_xy, x1, y1, x2, y2)
105
106 # min & max rage of lat (or lon) for each grid line will be drawn.
~/src/python-control/control/grid.py in __call__(self, transform_xy, x1, y1, x2, y2)
42
43 lon_min, lon_max, lat_min, lat_max = \
---> 44 self._adjust_extremes(lon_min, lon_max, lat_min, lat_max)
45
46 return lon_min, lon_max, lat_min, lat_max
AttributeError: 'ModifiedExtremeFinderCycle' object has no attribute '_adjust_extremes'
|
AttributeError
|
def __str__(self):
"""String representation of an input/output system"""
str = "System: " + (self.name if self.name else "(None)") + "\n"
str += "Inputs (%s): " % self.ninputs
for key in self.input_index:
str += key + ", "
str += "\nOutputs (%s): " % self.noutputs
for key in self.output_index:
str += key + ", "
str += "\nStates (%s): " % self.nstates
for key in self.state_index:
str += key + ", "
return str
|
def __str__(self):
"""String representation of an input/output system"""
str = "System: " + (self.name if self.name else "(none)") + "\n"
str += "Inputs (%d): " % self.ninputs
for key in self.input_index:
str += key + ", "
str += "\nOutputs (%d): " % self.noutputs
for key in self.output_index:
str += key + ", "
str += "\nStates (%d): " % self.nstates
for key in self.state_index:
str += key + ", "
return str
|
https://github.com/python-control/python-control/issues/329
|
sys = ctl.NonlinearIOSystem(lambda t, x, u, params: x)
print(sys)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-85c64df40808> in <module>()
----> 1 print(sys)
/home/arnold/pythonBox/control_dev/python-control-rabraker/control/iosys.py in __str__(self)
214 """String representation of an input/output system"""
215 str = "System: " + (self.name if self.name else "(none)") + "\n"
--> 216 str += "Inputs (%d): " % self.ninputs
217 for key in self.input_index: str += key + ", "
218 str += "\nOutputs (%d): " % self.noutputs
TypeError: %d format: a number is required, not NoneType
|
TypeError
|
def __add__(sys1, sys2):
"""Add two input/output systems (parallel interconnection)"""
# TODO: Allow addition of scalars and matrices
if not isinstance(sys2, InputOutputSystem):
raise ValueError("Unknown I/O system object ", sys2)
elif isinstance(sys1, StateSpace) and isinstance(sys2, StateSpace):
# Special case: maintain linear systems structure
new_ss_sys = StateSpace.__add__(sys1, sys2)
# TODO: set input and output names
new_io_sys = LinearIOSystem(new_ss_sys)
return new_io_sys
# Make sure number of input and outputs match
if sys1.ninputs != sys2.ninputs or sys1.noutputs != sys2.noutputs:
raise ValueError(
"Can't add systems with different numbers of inputs or outputs."
)
ninputs = sys1.ninputs
noutputs = sys1.noutputs
# Create a new system to handle the composition
newsys = InterconnectedSystem((sys1, sys2))
# Set up the input map
newsys.set_input_map(np.concatenate((np.eye(ninputs), np.eye(ninputs)), axis=0))
# TODO: set up input names
# Set up the output map
newsys.set_output_map(np.concatenate((np.eye(noutputs), np.eye(noutputs)), axis=1))
# TODO: set up output names
# Return the newly created system
return newsys
|
def __add__(sys1, sys2):
"""Add two input/output systems (parallel interconnection)"""
# TODO: Allow addition of scalars and matrices
if not isinstance(sys2, InputOutputSystem):
raise ValueError("Unknown I/O system object ", sys2)
elif isinstance(sys1, StateSpace) and isinstance(sys2, StateSpace):
# Special case: maintain linear systems structure
new_ss_sys = StateSpace.__add__(sys1, sys2)
# TODO: set input and output names
new_io_sys = LinearIOSystem(new_ss_sys)
return new_io_sys
# Make sure number of input and outputs match
if sys1.ninputs != sys2.ninputs or sys1.noutputs != sys2.noutputs:
raise ValueError(
"Can't add systems with different numbers of inputs or outputs."
)
ninputs = sys1.ninputs
noutputs = sys1.noutputs
# Make sure timebase are compatible
dt = _find_timebase(sys1, sys2)
if dt is False:
raise ValueError("System timebases are not compabile")
# Create a new system to handle the composition
newsys = InterconnectedSystem((sys1, sys2), dt=dt)
# Set up the input map
newsys.set_input_map(np.concatenate((np.eye(ninputs), np.eye(ninputs)), axis=0))
# TODO: set up input names
# Set up the output map
newsys.set_output_map(np.concatenate((np.eye(noutputs), np.eye(noutputs)), axis=1))
# TODO: set up output names
# Return the newly created system
return newsys
|
https://github.com/python-control/python-control/issues/329
|
sys = ctl.NonlinearIOSystem(lambda t, x, u, params: x)
print(sys)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-85c64df40808> in <module>()
----> 1 print(sys)
/home/arnold/pythonBox/control_dev/python-control-rabraker/control/iosys.py in __str__(self)
214 """String representation of an input/output system"""
215 str = "System: " + (self.name if self.name else "(none)") + "\n"
--> 216 str += "Inputs (%d): " % self.ninputs
217 for key in self.input_index: str += key + ", "
218 str += "\nOutputs (%d): " % self.noutputs
TypeError: %d format: a number is required, not NoneType
|
TypeError
|
def __init__(
self,
syslist,
connections=[],
inplist=[],
outlist=[],
inputs=None,
outputs=None,
states=None,
params={},
dt=None,
name=None,
):
"""Create an I/O system from a list of systems + connection info.
The InterconnectedSystem class is used to represent an input/output
system that consists of an interconnection between a set of subystems.
The outputs of each subsystem can be summed together to to provide
inputs to other subsystems. The overall system inputs and outputs can
be any subset of subsystem inputs and outputs.
Parameters
----------
syslist : array_like of InputOutputSystems
The list of input/output systems to be connected
connections : tuple of connection specifications, optional
Description of the internal connections between the subsystems.
Each element of the tuple describes an input to one of the
subsystems. The entries are are of the form:
(input-spec, output-spec1, output-spec2, ...)
The input-spec should be a tuple of the form `(subsys_i, inp_j)`
where `subsys_i` is the index into `syslist` and `inp_j` is the
index into the input vector for the subsystem. If `subsys_i` has
a single input, then the subsystem index `subsys_i` can be listed
as the input-spec. If systems and signals are given names, then
the form 'sys.sig' or ('sys', 'sig') are also recognized.
Each output-spec should be a tuple of the form `(subsys_i, out_j,
gain)`. The input will be constructed by summing the listed
outputs after multiplying by the gain term. If the gain term is
omitted, it is assumed to be 1. If the system has a single
output, then the subsystem index `subsys_i` can be listed as the
input-spec. If systems and signals are given names, then the form
'sys.sig', ('sys', 'sig') or ('sys', 'sig', gain) are also
recognized, and the special form '-sys.sig' can be used to specify
a signal with gain -1.
If omitted, the connection map (matrix) can be specified using the
:func:`~control.InterconnectedSystem.set_connect_map` method.
inplist : tuple of input specifications, optional
List of specifications for how the inputs for the overall system
are mapped to the subsystem inputs. The input specification is
the same as the form defined in the connection specification.
Each system input is added to the input for the listed subsystem.
If omitted, the input map can be specified using the
`set_input_map` method.
outlist : tuple of output specifications, optional
List of specifications for how the outputs for the subsystems are
mapped to overall system outputs. The output specification is the
same as the form defined in the connection specification
(including the optional gain term). Numbered outputs must be
chosen from the list of subsystem outputs, but named outputs can
also be contained in the list of subsystem inputs.
If omitted, the output map can be specified using the
`set_output_map` method.
params : dict, optional
Parameter values for the systems. Passed to the evaluation
functions for the system as default values, overriding internal
defaults.
dt : timebase, optional
The timebase for the system, used to specify whether the system is
operating in continuous or discrete time. It can have the
following values:
* dt = None No timebase specified
* dt = 0 Continuous time system
* dt > 0 Discrete time system with sampling time dt
* dt = True Discrete time with unspecified sampling time
name : string, optional
System name (used for specifying signals).
"""
# Convert input and output names to lists if they aren't already
if not isinstance(inplist, (list, tuple)):
inplist = [inplist]
if not isinstance(outlist, (list, tuple)):
outlist = [outlist]
# Check to make sure all systems are consistent
self.syslist = syslist
self.syslist_index = {}
dt = None
nstates = 0
self.state_offset = []
ninputs = 0
self.input_offset = []
noutputs = 0
self.output_offset = []
system_count = 0
for sys in syslist:
# Make sure time bases are consistent
# TODO: Use lti._find_timebase() instead?
if dt is None and sys.dt is not None:
# Timebase was not specified; set to match this system
dt = sys.dt
elif dt != sys.dt:
raise TypeError("System timebases are not compatible")
# Make sure number of inputs, outputs, states is given
if sys.ninputs is None or sys.noutputs is None or sys.nstates is None:
raise TypeError(
"System '%s' must define number of inputs, "
"outputs, states in order to be connected" % sys.name
)
# Keep track of the offsets into the states, inputs, outputs
self.input_offset.append(ninputs)
self.output_offset.append(noutputs)
self.state_offset.append(nstates)
# Keep track of the total number of states, inputs, outputs
nstates += sys.nstates
ninputs += sys.ninputs
noutputs += sys.noutputs
# Store the index to the system for later retrieval
# TODO: look for duplicated system names
self.syslist_index[sys.name] = system_count
system_count += 1
# Check for duplicate systems or duplicate names
sysobj_list = []
sysname_list = []
for sys in syslist:
if sys in sysobj_list:
warn("Duplicate object found in system list: %s" % str(sys))
elif sys.name is not None and sys.name in sysname_list:
warn("Duplicate name found in system list: %s" % sys.name)
sysobj_list.append(sys)
sysname_list.append(sys.name)
# Create the I/O system
super(InterconnectedSystem, self).__init__(
inputs=len(inplist), outputs=len(outlist), states=nstates, params=params, dt=dt
)
# If input or output list was specified, update it
nsignals, self.input_index = self._process_signal_list(inputs, prefix="u")
if nsignals is not None and len(inplist) != nsignals:
raise ValueError("Wrong number/type of inputs given.")
nsignals, self.output_index = self._process_signal_list(outputs, prefix="y")
if nsignals is not None and len(outlist) != nsignals:
raise ValueError("Wrong number/type of outputs given.")
# Convert the list of interconnections to a connection map (matrix)
self.connect_map = np.zeros((ninputs, noutputs))
for connection in connections:
input_index = self._parse_input_spec(connection[0])
for output_spec in connection[1:]:
output_index, gain = self._parse_output_spec(output_spec)
self.connect_map[input_index, output_index] = gain
# Convert the input list to a matrix: maps system to subsystems
self.input_map = np.zeros((ninputs, self.ninputs))
for index, inpspec in enumerate(inplist):
if isinstance(inpspec, (int, str, tuple)):
inpspec = [inpspec]
for spec in inpspec:
self.input_map[self._parse_input_spec(spec), index] = 1
# Convert the output list to a matrix: maps subsystems to system
self.output_map = np.zeros((self.noutputs, noutputs + ninputs))
for index in range(len(outlist)):
ylist_index, gain = self._parse_output_spec(outlist[index])
self.output_map[index, ylist_index] = gain
# Save the parameters for the system
self.params = params.copy()
|
def __init__(
self,
syslist,
connections=[],
inplist=[],
outlist=[],
inputs=None,
outputs=None,
states=None,
params={},
dt=None,
name=None,
):
"""Create an I/O system from a list of systems + connection info.
The InterconnectedSystem class is used to represent an input/output
system that consists of an interconnection between a set of subystems.
The outputs of each subsystem can be summed together to to provide
inputs to other subsystems. The overall system inputs and outputs can
be any subset of subsystem inputs and outputs.
Parameters
----------
syslist : array_like of InputOutputSystems
The list of input/output systems to be connected
connections : tuple of connection specifications, optional
Description of the internal connections between the subsystems.
Each element of the tuple describes an input to one of the
subsystems. The entries are are of the form:
(input-spec, output-spec1, output-spec2, ...)
The input-spec should be a tuple of the form `(subsys_i, inp_j)`
where `subsys_i` is the index into `syslist` and `inp_j` is the
index into the input vector for the subsystem. If `subsys_i` has
a single input, then the subsystem index `subsys_i` can be listed
as the input-spec. If systems and signals are given names, then
the form 'sys.sig' or ('sys', 'sig') are also recognized.
Each output-spec should be a tuple of the form `(subsys_i, out_j,
gain)`. The input will be constructed by summing the listed
outputs after multiplying by the gain term. If the gain term is
omitted, it is assumed to be 1. If the system has a single
output, then the subsystem index `subsys_i` can be listed as the
input-spec. If systems and signals are given names, then the form
'sys.sig', ('sys', 'sig') or ('sys', 'sig', gain) are also
recognized, and the special form '-sys.sig' can be used to specify
a signal with gain -1.
If omitted, the connection map (matrix) can be specified using the
:func:`~control.InterconnectedSystem.set_connect_map` method.
inplist : tuple of input specifications, optional
List of specifications for how the inputs for the overall system
are mapped to the subsystem inputs. The input specification is
the same as the form defined in the connection specification.
Each system input is added to the input for the listed subsystem.
If omitted, the input map can be specified using the
`set_input_map` method.
outlist : tuple of output specifications, optional
List of specifications for how the outputs for the subsystems are
mapped to overall system outputs. The output specification is the
same as the form defined in the connection specification
(including the optional gain term). Numbered outputs must be
chosen from the list of subsystem outputs, but named outputs can
also be contained in the list of subsystem inputs.
If omitted, the output map can be specified using the
`set_output_map` method.
params : dict, optional
Parameter values for the systems. Passed to the evaluation
functions for the system as default values, overriding internal
defaults.
dt : timebase, optional
The timebase for the system, used to specify whether the system is
operating in continuous or discrete time. It can have the
following values:
* dt = None No timebase specified
* dt = 0 Continuous time system
* dt > 0 Discrete time system with sampling time dt
* dt = True Discrete time with unspecified sampling time
name : string, optional
System name (used for specifying signals).
"""
# Convert input and output names to lists if they aren't already
if not isinstance(inplist, (list, tuple)):
inplist = [inplist]
if not isinstance(outlist, (list, tuple)):
outlist = [outlist]
# Check to make sure all systems are consistent
self.syslist = syslist
self.syslist_index = {}
dt = None
nstates = 0
self.state_offset = []
ninputs = 0
self.input_offset = []
noutputs = 0
self.output_offset = []
system_count = 0
for sys in syslist:
# Make sure time bases are consistent
if dt is None and sys.dt is not None:
# Timebase was not specified; set to match this system
dt = sys.dt
elif dt != sys.dt:
raise TypeError("System timebases are not compatible")
# Make sure number of inputs, outputs, states is given
if sys.ninputs is None or sys.noutputs is None or sys.nstates is None:
raise TypeError(
"System '%s' must define number of inputs, "
"outputs, states in order to be connected" % sys
)
# Keep track of the offsets into the states, inputs, outputs
self.input_offset.append(ninputs)
self.output_offset.append(noutputs)
self.state_offset.append(nstates)
# Keep track of the total number of states, inputs, outputs
nstates += sys.nstates
ninputs += sys.ninputs
noutputs += sys.noutputs
# Store the index to the system for later retrieval
# TODO: look for duplicated system names
self.syslist_index[sys.name] = system_count
system_count += 1
# Check for duplicate systems or duplicate names
sysobj_list = []
sysname_list = []
for sys in syslist:
if sys in sysobj_list:
warn("Duplicate object found in system list: %s" % str(sys))
elif sys.name is not None and sys.name in sysname_list:
warn("Duplicate name found in system list: %s" % sys.name)
sysobj_list.append(sys)
sysname_list.append(sys.name)
# Create the I/O system
super(InterconnectedSystem, self).__init__(
inputs=len(inplist), outputs=len(outlist), states=nstates, params=params, dt=dt
)
# If input or output list was specified, update it
nsignals, self.input_index = self._process_signal_list(inputs, prefix="u")
if nsignals is not None and len(inplist) != nsignals:
raise ValueError("Wrong number/type of inputs given.")
nsignals, self.output_index = self._process_signal_list(outputs, prefix="y")
if nsignals is not None and len(outlist) != nsignals:
raise ValueError("Wrong number/type of outputs given.")
# Convert the list of interconnections to a connection map (matrix)
self.connect_map = np.zeros((ninputs, noutputs))
for connection in connections:
input_index = self._parse_input_spec(connection[0])
for output_spec in connection[1:]:
output_index, gain = self._parse_output_spec(output_spec)
self.connect_map[input_index, output_index] = gain
# Convert the input list to a matrix: maps system to subsystems
self.input_map = np.zeros((ninputs, self.ninputs))
for index, inpspec in enumerate(inplist):
if isinstance(inpspec, (int, str, tuple)):
inpspec = [inpspec]
for spec in inpspec:
self.input_map[self._parse_input_spec(spec), index] = 1
# Convert the output list to a matrix: maps subsystems to system
self.output_map = np.zeros((self.noutputs, noutputs + ninputs))
for index in range(len(outlist)):
ylist_index, gain = self._parse_output_spec(outlist[index])
self.output_map[index, ylist_index] = gain
# Save the parameters for the system
self.params = params.copy()
|
https://github.com/python-control/python-control/issues/329
|
sys = ctl.NonlinearIOSystem(lambda t, x, u, params: x)
print(sys)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-85c64df40808> in <module>()
----> 1 print(sys)
/home/arnold/pythonBox/control_dev/python-control-rabraker/control/iosys.py in __str__(self)
214 """String representation of an input/output system"""
215 str = "System: " + (self.name if self.name else "(none)") + "\n"
--> 216 str += "Inputs (%d): " % self.ninputs
217 for key in self.input_index: str += key + ", "
218 str += "\nOutputs (%d): " % self.noutputs
TypeError: %d format: a number is required, not NoneType
|
TypeError
|
def __init__(
self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
assert self.node_id, "Empty node id (RAY_NODE_ID)."
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}"
)
self.http_session = None
|
def __init__(
self,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
assert self.node_id, "Empty node id (RAY_NODE_ID)."
self.ip = ray._private.services.get_node_ip_address()
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
"{}:{}".format(self.ip, self.node_manager_port)
)
self.http_session = None
|
https://github.com/ray-project/ray/issues/11940
|
2020-11-11 14:13:37,114 WARNING worker.py:1111 -- The agent on node *** failed with the following error:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/new_dashboard/agent.py", line 298, in <module>
loop.run_until_complete(agent.run())
File "/usr/lib/python3.6/asyncio/base_events.py", line 484, in run_until_complete
return future.result()
File "/usr/local/lib/python3.6/dist-packages/ray/new_dashboard/agent.py", line 172, in run
agent_ip_address=self.ip))
File "/usr/local/lib/python3.6/dist-packages/grpc/experimental/aio/_call.py", line 286, in __await__
self._cython_call._status)
grpc.experimental.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"@1605096817.110308830","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":4090,"referenced_errors":[{"created":"@1605096817.110303917","description":"failed to connect to all addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":394,"grpc_status":14}]}"
|
grpc.experimental.aio._call.AioRpcError
|
def start_raylet(
redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
log_dir,
resource_spec,
plasma_directory,
object_store_memory,
min_worker_port=None,
max_worker_port=None,
worker_port_list=None,
object_manager_port=None,
redis_password=None,
metrics_agent_port=None,
metrics_export_port=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
java_worker_options=None,
load_code_from_local=False,
huge_pages=False,
fate_share=None,
socket_to_use=None,
head_node=False,
start_initial_python_workers_for_first_job=False,
code_search_path=None,
):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
log_dir (str): The path of the dir where log files are created.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
min_worker_port (int): The lowest port number that workers will bind
on. If not set, random ports will be chosen.
max_worker_port (int): The highest port number that workers will bind
on. If set, min_worker_port must also be set.
redis_password: The password to use when connecting to Redis.
metrics_agent_port(int): The port where metrics agent is bound to.
metrics_export_port(int): The port at which metrics are exposed to.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
java_worker_options (list): The command options for Java worker.
code_search_path (list): Code search path for worker. code_search_path
is added to worker command in non-multi-tenancy mode and job_config
in multi-tenancy mode.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config_str = serialize_config(config)
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static)
)
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()]
)
gcs_ip_address, gcs_port = redis_address.split(":")
has_java_command = False
if shutil.which("java") is not None:
has_java_command = True
ray_java_installed = False
try:
jars_dir = get_ray_jars_dir()
if os.path.exists(jars_dir):
ray_java_installed = True
except Exception:
pass
include_java = has_java_command and ray_java_installed
if include_java is True:
java_worker_command = build_java_worker_command(
json.loads(java_worker_options) if java_worker_options else [],
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
code_search_path,
)
else:
java_worker_command = []
if os.path.exists(DEFAULT_WORKER_EXECUTABLE):
cpp_worker_command = build_cpp_worker_command(
"",
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
cpp_worker_command = []
# Create the command that the Raylet will use to start workers.
start_worker_command = [
sys.executable,
worker_path,
f"--node-ip-address={node_ip_address}",
f"--node-manager-port={node_manager_port}",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--redis-address={redis_address}",
f"--config-list={config_str}",
f"--temp-dir={temp_dir}",
f"--metrics-agent-port={metrics_agent_port}",
]
if code_search_path:
start_worker_command.append(f"--code-search-path={code_search_path}")
if redis_password:
start_worker_command += [f"--redis-password={redis_password}"]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if min_worker_port is None:
min_worker_port = 0
if max_worker_port is None:
max_worker_port = 0
if code_search_path is not None and len(code_search_path) > 0:
load_code_from_local = True
if load_code_from_local:
start_worker_command += ["--load-code-from-local"]
# Create agent command
agent_command = [
sys.executable,
"-u",
os.path.join(RAY_PATH, "new_dashboard/agent.py"),
f"--node-ip-address={node_ip_address}",
f"--redis-address={redis_address}",
f"--metrics-export-port={metrics_export_port}",
f"--dashboard-agent-port={metrics_agent_port}",
f"--node-manager-port={node_manager_port}",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--temp-dir={temp_dir}",
f"--log-dir={log_dir}",
]
if redis_password is not None and len(redis_password) != 0:
agent_command.append("--redis-password={}".format(redis_password))
command = [
RAYLET_EXECUTABLE,
f"--raylet_socket_name={raylet_name}",
f"--store_socket_name={plasma_store_name}",
f"--object_manager_port={object_manager_port}",
f"--min_worker_port={min_worker_port}",
f"--max_worker_port={max_worker_port}",
f"--node_manager_port={node_manager_port}",
f"--node_ip_address={node_ip_address}",
f"--redis_address={gcs_ip_address}",
f"--redis_port={gcs_port}",
f"--num_initial_workers={num_initial_workers}",
f"--maximum_startup_concurrency={maximum_startup_concurrency}",
f"--static_resource_list={resource_argument}",
f"--config_list={config_str}",
f"--python_worker_command={subprocess.list2cmdline(start_worker_command)}", # noqa
f"--java_worker_command={subprocess.list2cmdline(java_worker_command)}", # noqa
f"--cpp_worker_command={subprocess.list2cmdline(cpp_worker_command)}", # noqa
f"--redis_password={redis_password or ''}",
f"--temp_dir={temp_dir}",
f"--session_dir={session_dir}",
f"--metrics-agent-port={metrics_agent_port}",
f"--metrics_export_port={metrics_export_port}",
]
if worker_port_list is not None:
command.append(f"--worker_port_list={worker_port_list}")
if start_initial_python_workers_for_first_job:
command.append(
"--num_initial_python_workers_for_first_job={}".format(
resource_spec.num_cpus
)
)
command.append("--agent_command={}".format(subprocess.list2cmdline(agent_command)))
if config.get("plasma_store_as_thread"):
# command related to the plasma store
command += [
f"--object_store_memory={object_store_memory}",
f"--plasma_directory={plasma_directory}",
]
if huge_pages:
command.append("--huge_pages")
if socket_to_use:
socket_to_use.close()
if head_node:
command.append("--head_node")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
|
def start_raylet(
redis_address,
node_ip_address,
node_manager_port,
raylet_name,
plasma_store_name,
worker_path,
temp_dir,
session_dir,
log_dir,
resource_spec,
plasma_directory,
object_store_memory,
min_worker_port=None,
max_worker_port=None,
worker_port_list=None,
object_manager_port=None,
redis_password=None,
metrics_agent_port=None,
metrics_export_port=None,
use_valgrind=False,
use_profiler=False,
stdout_file=None,
stderr_file=None,
config=None,
java_worker_options=None,
load_code_from_local=False,
huge_pages=False,
fate_share=None,
socket_to_use=None,
head_node=False,
start_initial_python_workers_for_first_job=False,
code_search_path=None,
):
"""Start a raylet, which is a combined local scheduler and object manager.
Args:
redis_address (str): The address of the primary Redis server.
node_ip_address (str): The IP address of this node.
node_manager_port(int): The port to use for the node manager. This must
not be 0.
raylet_name (str): The name of the raylet socket to create.
plasma_store_name (str): The name of the plasma store socket to connect
to.
worker_path (str): The path of the Python file that new worker
processes will execute.
temp_dir (str): The path of the temporary directory Ray will use.
session_dir (str): The path of this session.
log_dir (str): The path of the dir where log files are created.
resource_spec (ResourceSpec): Resources for this raylet.
object_manager_port: The port to use for the object manager. If this is
None, then the object manager will choose its own port.
min_worker_port (int): The lowest port number that workers will bind
on. If not set, random ports will be chosen.
max_worker_port (int): The highest port number that workers will bind
on. If set, min_worker_port must also be set.
redis_password: The password to use when connecting to Redis.
metrics_agent_port(int): The port where metrics agent is bound to.
metrics_export_port(int): The port at which metrics are exposed to.
use_valgrind (bool): True if the raylet should be started inside
of valgrind. If this is True, use_profiler must be False.
use_profiler (bool): True if the raylet should be started inside
a profiler. If this is True, use_valgrind must be False.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
config (dict|None): Optional Raylet configuration that will
override defaults in RayConfig.
java_worker_options (list): The command options for Java worker.
code_search_path (list): Code search path for worker. code_search_path
is added to worker command in non-multi-tenancy mode and job_config
in multi-tenancy mode.
Returns:
ProcessInfo for the process that was started.
"""
# The caller must provide a node manager port so that we can correctly
# populate the command to start a worker.
assert node_manager_port is not None and node_manager_port != 0
config_str = serialize_config(config)
if use_valgrind and use_profiler:
raise ValueError("Cannot use valgrind and profiler at the same time.")
assert resource_spec.resolved()
num_initial_workers = resource_spec.num_cpus
static_resources = resource_spec.to_resource_dict()
# Limit the number of workers that can be started in parallel by the
# raylet. However, make sure it is at least 1.
num_cpus_static = static_resources.get("CPU", 0)
maximum_startup_concurrency = max(
1, min(multiprocessing.cpu_count(), num_cpus_static)
)
# Format the resource argument in a form like 'CPU,1.0,GPU,0,Custom,3'.
resource_argument = ",".join(
["{},{}".format(*kv) for kv in static_resources.items()]
)
gcs_ip_address, gcs_port = redis_address.split(":")
has_java_command = False
if shutil.which("java") is not None:
has_java_command = True
ray_java_installed = False
try:
jars_dir = get_ray_jars_dir()
if os.path.exists(jars_dir):
ray_java_installed = True
except Exception:
pass
include_java = has_java_command and ray_java_installed
if include_java is True:
java_worker_command = build_java_worker_command(
json.loads(java_worker_options) if java_worker_options else [],
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
code_search_path,
)
else:
java_worker_command = []
if os.path.exists(DEFAULT_WORKER_EXECUTABLE):
cpp_worker_command = build_cpp_worker_command(
"",
redis_address,
node_manager_port,
plasma_store_name,
raylet_name,
redis_password,
session_dir,
)
else:
cpp_worker_command = []
# Create the command that the Raylet will use to start workers.
start_worker_command = [
sys.executable,
worker_path,
f"--node-ip-address={node_ip_address}",
f"--node-manager-port={node_manager_port}",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--redis-address={redis_address}",
f"--config-list={config_str}",
f"--temp-dir={temp_dir}",
f"--metrics-agent-port={metrics_agent_port}",
]
if code_search_path:
start_worker_command.append(f"--code-search-path={code_search_path}")
if redis_password:
start_worker_command += [f"--redis-password={redis_password}"]
# If the object manager port is None, then use 0 to cause the object
# manager to choose its own port.
if object_manager_port is None:
object_manager_port = 0
if min_worker_port is None:
min_worker_port = 0
if max_worker_port is None:
max_worker_port = 0
if code_search_path is not None and len(code_search_path) > 0:
load_code_from_local = True
if load_code_from_local:
start_worker_command += ["--load-code-from-local"]
# Create agent command
agent_command = [
sys.executable,
"-u",
os.path.join(RAY_PATH, "new_dashboard/agent.py"),
f"--redis-address={redis_address}",
f"--metrics-export-port={metrics_export_port}",
f"--dashboard-agent-port={metrics_agent_port}",
f"--node-manager-port={node_manager_port}",
f"--object-store-name={plasma_store_name}",
f"--raylet-name={raylet_name}",
f"--temp-dir={temp_dir}",
f"--log-dir={log_dir}",
]
if redis_password is not None and len(redis_password) != 0:
agent_command.append("--redis-password={}".format(redis_password))
command = [
RAYLET_EXECUTABLE,
f"--raylet_socket_name={raylet_name}",
f"--store_socket_name={plasma_store_name}",
f"--object_manager_port={object_manager_port}",
f"--min_worker_port={min_worker_port}",
f"--max_worker_port={max_worker_port}",
f"--node_manager_port={node_manager_port}",
f"--node_ip_address={node_ip_address}",
f"--redis_address={gcs_ip_address}",
f"--redis_port={gcs_port}",
f"--num_initial_workers={num_initial_workers}",
f"--maximum_startup_concurrency={maximum_startup_concurrency}",
f"--static_resource_list={resource_argument}",
f"--config_list={config_str}",
f"--python_worker_command={subprocess.list2cmdline(start_worker_command)}", # noqa
f"--java_worker_command={subprocess.list2cmdline(java_worker_command)}", # noqa
f"--cpp_worker_command={subprocess.list2cmdline(cpp_worker_command)}", # noqa
f"--redis_password={redis_password or ''}",
f"--temp_dir={temp_dir}",
f"--session_dir={session_dir}",
f"--metrics-agent-port={metrics_agent_port}",
f"--metrics_export_port={metrics_export_port}",
]
if worker_port_list is not None:
command.append(f"--worker_port_list={worker_port_list}")
if start_initial_python_workers_for_first_job:
command.append(
"--num_initial_python_workers_for_first_job={}".format(
resource_spec.num_cpus
)
)
command.append("--agent_command={}".format(subprocess.list2cmdline(agent_command)))
if config.get("plasma_store_as_thread"):
# command related to the plasma store
command += [
f"--object_store_memory={object_store_memory}",
f"--plasma_directory={plasma_directory}",
]
if huge_pages:
command.append("--huge_pages")
if socket_to_use:
socket_to_use.close()
if head_node:
command.append("--head_node")
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_RAYLET,
use_valgrind=use_valgrind,
use_gdb=False,
use_valgrind_profiler=use_profiler,
use_perftools_profiler=("RAYLET_PERFTOOLS_PATH" in os.environ),
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
return process_info
|
https://github.com/ray-project/ray/issues/11940
|
2020-11-11 14:13:37,114 WARNING worker.py:1111 -- The agent on node *** failed with the following error:
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/new_dashboard/agent.py", line 298, in <module>
loop.run_until_complete(agent.run())
File "/usr/lib/python3.6/asyncio/base_events.py", line 484, in run_until_complete
return future.result()
File "/usr/local/lib/python3.6/dist-packages/ray/new_dashboard/agent.py", line 172, in run
agent_ip_address=self.ip))
File "/usr/local/lib/python3.6/dist-packages/grpc/experimental/aio/_call.py", line 286, in __await__
self._cython_call._status)
grpc.experimental.aio._call.AioRpcError: <AioRpcError of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"@1605096817.110308830","description":"Failed to pick subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":4090,"referenced_errors":[{"created":"@1605096817.110303917","description":"failed to connect to all addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":394,"grpc_status":14}]}"
|
grpc.experimental.aio._call.AioRpcError
|
def __init__(
self,
redis_address,
autoscaling_config,
redis_password=None,
prefix_cluster_info=False,
):
# Initialize the Redis clients.
ray.state.state._initialize_global_state(
redis_address, redis_password=redis_password
)
self.redis = ray._private.services.create_redis_client(
redis_address, password=redis_password
)
# Initialize the gcs stub for getting all node resource usage.
gcs_address = self.redis.get("GcsServerAddress").decode("utf-8")
options = (("grpc.enable_http_proxy", 0),)
gcs_channel = grpc.insecure_channel(gcs_address, options=options)
self.gcs_node_resources_stub = gcs_service_pb2_grpc.NodeResourceInfoGcsServiceStub(
gcs_channel
)
# Set the redis client and mode so _internal_kv works for autoscaler.
worker = ray.worker.global_worker
worker.redis_client = self.redis
worker.mode = 0
head_node_ip = redis_address.split(":")[0]
self.load_metrics = LoadMetrics(local_ip=head_node_ip)
self.last_avail_resources = None
self.event_summarizer = EventSummarizer()
self.prefix_cluster_info = prefix_cluster_info
self.autoscaling_config = autoscaling_config
self.autoscaler = None
logger.info("Monitor: Started")
|
def __init__(
self,
redis_address,
autoscaling_config,
redis_password=None,
prefix_cluster_info=False,
):
# Initialize the Redis clients.
ray.state.state._initialize_global_state(
redis_address, redis_password=redis_password
)
self.redis = ray._private.services.create_redis_client(
redis_address, password=redis_password
)
# Initialize the gcs stub for getting all node resource usage.
gcs_address = self.redis.get("GcsServerAddress").decode("utf-8")
gcs_channel = grpc.insecure_channel(gcs_address)
self.gcs_node_resources_stub = gcs_service_pb2_grpc.NodeResourceInfoGcsServiceStub(
gcs_channel
)
# Set the redis client and mode so _internal_kv works for autoscaler.
worker = ray.worker.global_worker
worker.redis_client = self.redis
worker.mode = 0
head_node_ip = redis_address.split(":")[0]
self.load_metrics = LoadMetrics(local_ip=head_node_ip)
self.last_avail_resources = None
self.event_summarizer = EventSummarizer()
self.prefix_cluster_info = prefix_cluster_info
self.autoscaling_config = autoscaling_config
self.autoscaler = None
logger.info("Monitor: Started")
|
https://github.com/ray-project/ray/issues/14350
|
In [1]: import ray
In [2]: ray.init()
2021-02-25 22:41:24,961 INFO services.py:1226 -- View the Ray dashboard at http://127.0.0.1:8265
2021-02-25 22:41:26,775 WARNING worker.py:1063 -- The autoscaler failed with the following error:
Traceback (most recent call last):
File "/home/lxy/git_repository/ray/python/ray/monitor.py", line 272, in run
self._run()
File "/home/lxy/git_repository/ray/python/ray/monitor.py", line 166, in _run
self.update_load_metrics()
File "/home/lxy/git_repository/ray/python/ray/monitor.py", line 131, in update_load_metrics
request, timeout=4)
File "/home/lxy/miniconda3/envs/ray/lib/python3.7/site-packages/grpc/_channel.py", line 826, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/home/lxy/miniconda3/envs/ray/lib/python3.7/site-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Socket closed"
debug_error_string = "{"created":"@1614264084.283816946","description":"Error received from peer ipv4:10.239.4.80:913","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Socket closed","grpc_status":14}"
|
grpc._channel._InactiveRpcError
|
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
cli_logger.print(
"Terminating instance {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
node_id,
) # todo: show node name?
node.terminate()
else:
cli_logger.print(
"Stopping instance {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
node_id,
) # todo: show node name?
node.stop()
else:
node.terminate()
# TODO (Alex): We are leaking the tag cache here. Naively, we would
# want to just remove the cache entry here, but terminating can be
# asyncrhonous or error, which would result in a use after free error.
# If this leak becomes bad, we can garbage collect the tag cache when
# the node cache is updated.
pass
|
def terminate_node(self, node_id):
node = self._get_cached_node(node_id)
if self.cache_stopped_nodes:
if node.spot_instance_request_id:
cli_logger.print(
"Terminating instance {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
node_id,
) # todo: show node name?
node.terminate()
else:
cli_logger.print(
"Stopping instance {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
node_id,
) # todo: show node name?
node.stop()
else:
node.terminate()
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
|
https://github.com/ray-project/ray/issues/14264
|
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Removing 1 nodes of type cpu_48_spot (idle).
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Adding 1 nodes of type cpu_48_spot.
2021-02-17 14:55:40,430 INFO load_metrics.py:102 – LoadMetrics: Removed mapping: 172.31.23.116 - 1613573430.7000167
2021-02-17 14:55:40,430 INFO load_metrics.py:109 – LoadMetrics: Removed 1 stale ip mappings: {‘172.31.23.116’} not in {‘172.31.16.240’, ‘172.31.27.173’, ‘172.31.26.163’, ‘172.31.20.177’, ‘172.31.25.79’, ‘172.31.28.159’, ‘172.31.21.227’, ‘172.31.24.131’, ‘172.31.31.164’, ‘172.31.22.24’, ‘172.31.26.41’, ‘172.31.19.126’, ‘172.31.22.66’, ‘172.31.26.13’, ‘172.31.30.105’, ‘172.31.25.157’, ‘172.31.27.26’}
2021-02-17 14:55:40,744 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:40,744 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:46,909 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:46,909 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:47,082 INFO monitor.py:207 – :event_summary:Resized to 724 CPUs.
2021-02-17 14:55:52,997 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:52,998 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:58,965 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:58,965 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:05,002 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:05,003 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:10,999 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:11,000 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,001 CRITICAL autoscaler.py:152 – StandardAutoscaler: Too many errors, abort.
2021-02-17 14:56:11,001 ERROR monitor.py:271 – Error in monitor loop
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 269, in run
self._run()
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 202, in _run
self.autoscaler.update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 154, in update
raise e
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,002 ERROR autoscaler.py:724 – StandardAutoscaler: kill_workers triggered
2021-02-17 14:56:11,453 ERROR autoscaler.py:729 – StandardAutoscaler: terminated 16 node(s)
2021-02-17 14:56:11,453 INFO monitor.py:250 – Monitor: Exception caught. Taking down workers…
2021-02-17 14:56:11,680 INFO monitor.py:262 – Monitor: Workers taken down.
|
KeyError
|
def terminate_nodes(self, node_ids):
if not node_ids:
return
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
# todo: show node names?
cli_logger.print(
"Stopping instances {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
cli_logger.render_list(on_demand_ids),
)
self.ec2.meta.client.stop_instances(InstanceIds=on_demand_ids)
if spot_ids:
cli_logger.print(
"Terminating instances {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
cli_logger.render_list(spot_ids),
)
self.ec2.meta.client.terminate_instances(InstanceIds=spot_ids)
else:
self.ec2.meta.client.terminate_instances(InstanceIds=node_ids)
|
def terminate_nodes(self, node_ids):
if not node_ids:
return
if self.cache_stopped_nodes:
spot_ids = []
on_demand_ids = []
for node_id in node_ids:
if self._get_cached_node(node_id).spot_instance_request_id:
spot_ids += [node_id]
else:
on_demand_ids += [node_id]
if on_demand_ids:
# todo: show node names?
cli_logger.print(
"Stopping instances {} "
+ cf.dimmed(
"(to terminate instead, "
"set `cache_stopped_nodes: False` "
"under `provider` in the cluster configuration)"
),
cli_logger.render_list(on_demand_ids),
)
self.ec2.meta.client.stop_instances(InstanceIds=on_demand_ids)
if spot_ids:
cli_logger.print(
"Terminating instances {} "
+ cf.dimmed("(cannot stop spot instances, only terminate)"),
cli_logger.render_list(spot_ids),
)
self.ec2.meta.client.terminate_instances(InstanceIds=spot_ids)
else:
self.ec2.meta.client.terminate_instances(InstanceIds=node_ids)
for node_id in node_ids:
self.tag_cache.pop(node_id, None)
self.tag_cache_pending.pop(node_id, None)
|
https://github.com/ray-project/ray/issues/14264
|
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Removing 1 nodes of type cpu_48_spot (idle).
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Adding 1 nodes of type cpu_48_spot.
2021-02-17 14:55:40,430 INFO load_metrics.py:102 – LoadMetrics: Removed mapping: 172.31.23.116 - 1613573430.7000167
2021-02-17 14:55:40,430 INFO load_metrics.py:109 – LoadMetrics: Removed 1 stale ip mappings: {‘172.31.23.116’} not in {‘172.31.16.240’, ‘172.31.27.173’, ‘172.31.26.163’, ‘172.31.20.177’, ‘172.31.25.79’, ‘172.31.28.159’, ‘172.31.21.227’, ‘172.31.24.131’, ‘172.31.31.164’, ‘172.31.22.24’, ‘172.31.26.41’, ‘172.31.19.126’, ‘172.31.22.66’, ‘172.31.26.13’, ‘172.31.30.105’, ‘172.31.25.157’, ‘172.31.27.26’}
2021-02-17 14:55:40,744 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:40,744 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:46,909 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:46,909 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:47,082 INFO monitor.py:207 – :event_summary:Resized to 724 CPUs.
2021-02-17 14:55:52,997 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:52,998 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:58,965 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:58,965 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:05,002 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:05,003 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:10,999 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:11,000 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,001 CRITICAL autoscaler.py:152 – StandardAutoscaler: Too many errors, abort.
2021-02-17 14:56:11,001 ERROR monitor.py:271 – Error in monitor loop
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 269, in run
self._run()
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 202, in _run
self.autoscaler.update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 154, in update
raise e
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,002 ERROR autoscaler.py:724 – StandardAutoscaler: kill_workers triggered
2021-02-17 14:56:11,453 ERROR autoscaler.py:729 – StandardAutoscaler: terminated 16 node(s)
2021-02-17 14:56:11,453 INFO monitor.py:250 – Monitor: Exception caught. Taking down workers…
2021-02-17 14:56:11,680 INFO monitor.py:262 – Monitor: Workers taken down.
|
KeyError
|
def _print(self, msg: str, _level_str: str = "INFO", _linefeed: bool = True):
"""Proxy for printing messages.
Args:
msg (str): Message to print.
linefeed (bool):
If `linefeed` is `False` no linefeed is printed at the
end of the message.
"""
if self.pretty:
rendered_message = " " * self.indent_level + msg
else:
if msg.strip() == "":
return
caller_info = _external_caller_info()
record = logging.LogRecord(
name="cli",
# We override the level name later
# TODO(maximsmol): give approximate level #s to our log levels
level=0,
# The user-facing logs do not need this information anyway
# and it would be very tedious to extract since _print
# can be at varying depths in the call stack
# TODO(maximsmol): do it anyway to be extra
pathname=caller_info["filename"],
lineno=caller_info["lineno"],
msg=msg,
args={},
# No exception
exc_info=None,
)
record.levelname = _level_str
rendered_message = self._formatter.format(record)
# We aren't using standard python logging convention, so we hardcode
# the log levels for now.
if _level_str in ["WARNING", "ERROR", "PANIC"]:
stream = sys.stderr
else:
stream = sys.stdout
if not _linefeed:
stream.write(rendered_message)
stream.flush()
return
print(rendered_message, file=stream)
|
def _print(self, msg: str, _level_str: str = "INFO", _linefeed: bool = True):
"""Proxy for printing messages.
Args:
msg (str): Message to print.
linefeed (bool):
If `linefeed` is `False` no linefeed is printed at the
end of the message.
"""
if self.pretty:
rendered_message = " " * self.indent_level + msg
else:
if msg.strip() == "":
return
caller_info = _external_caller_info()
record = logging.LogRecord(
name="cli",
# We override the level name later
# TODO(maximsmol): give approximate level #s to our log levels
level=0,
# The user-facing logs do not need this information anyway
# and it would be very tedious to extract since _print
# can be at varying depths in the call stack
# TODO(maximsmol): do it anyway to be extra
pathname=caller_info["filename"],
lineno=caller_info["lineno"],
msg=msg,
args={},
# No exception
exc_info=None,
)
record.levelname = _level_str
rendered_message = self._formatter.format(record)
if not _linefeed:
sys.stdout.write(rendered_message)
sys.stdout.flush()
return
print(rendered_message)
|
https://github.com/ray-project/ray/issues/14264
|
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Removing 1 nodes of type cpu_48_spot (idle).
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Adding 1 nodes of type cpu_48_spot.
2021-02-17 14:55:40,430 INFO load_metrics.py:102 – LoadMetrics: Removed mapping: 172.31.23.116 - 1613573430.7000167
2021-02-17 14:55:40,430 INFO load_metrics.py:109 – LoadMetrics: Removed 1 stale ip mappings: {‘172.31.23.116’} not in {‘172.31.16.240’, ‘172.31.27.173’, ‘172.31.26.163’, ‘172.31.20.177’, ‘172.31.25.79’, ‘172.31.28.159’, ‘172.31.21.227’, ‘172.31.24.131’, ‘172.31.31.164’, ‘172.31.22.24’, ‘172.31.26.41’, ‘172.31.19.126’, ‘172.31.22.66’, ‘172.31.26.13’, ‘172.31.30.105’, ‘172.31.25.157’, ‘172.31.27.26’}
2021-02-17 14:55:40,744 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:40,744 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:46,909 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:46,909 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:47,082 INFO monitor.py:207 – :event_summary:Resized to 724 CPUs.
2021-02-17 14:55:52,997 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:52,998 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:58,965 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:58,965 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:05,002 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:05,003 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:10,999 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:11,000 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,001 CRITICAL autoscaler.py:152 – StandardAutoscaler: Too many errors, abort.
2021-02-17 14:56:11,001 ERROR monitor.py:271 – Error in monitor loop
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 269, in run
self._run()
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 202, in _run
self.autoscaler.update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 154, in update
raise e
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,002 ERROR autoscaler.py:724 – StandardAutoscaler: kill_workers triggered
2021-02-17 14:56:11,453 ERROR autoscaler.py:729 – StandardAutoscaler: terminated 16 node(s)
2021-02-17 14:56:11,453 INFO monitor.py:250 – Monitor: Exception caught. Taking down workers…
2021-02-17 14:56:11,680 INFO monitor.py:262 – Monitor: Workers taken down.
|
KeyError
|
def _handle_failure(self, error):
logger.exception("Error in monitor loop")
if (
self.autoscaler is not None
and os.environ.get("RAY_AUTOSCALER_FATESHARE_WORKERS", "") == "1"
):
self.autoscaler.kill_workers()
# Take down autoscaler workers if necessary.
self.destroy_autoscaler_workers()
# Something went wrong, so push an error to all current and future
# drivers.
message = f"The autoscaler failed with the following error:\n{error}"
if _internal_kv_initialized():
_internal_kv_put(DEBUG_AUTOSCALING_ERROR, message, overwrite=True)
redis_client = ray._private.services.create_redis_client(
args.redis_address, password=args.redis_password
)
from ray.utils import push_error_to_driver_through_redis
push_error_to_driver_through_redis(
redis_client, ray_constants.MONITOR_DIED_ERROR, message
)
|
def _handle_failure(self, error):
logger.exception("Error in monitor loop")
if self.autoscaler is not None:
self.autoscaler.kill_workers()
# Take down autoscaler workers if necessary.
self.destroy_autoscaler_workers()
# Something went wrong, so push an error to all current and future
# drivers.
message = f"The autoscaler failed with the following error:\n{error}"
if _internal_kv_initialized():
_internal_kv_put(DEBUG_AUTOSCALING_ERROR, message, overwrite=True)
redis_client = ray._private.services.create_redis_client(
args.redis_address, password=args.redis_password
)
from ray.utils import push_error_to_driver_through_redis
push_error_to_driver_through_redis(
redis_client, ray_constants.MONITOR_DIED_ERROR, message
)
|
https://github.com/ray-project/ray/issues/14264
|
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Removing 1 nodes of type cpu_48_spot (idle).
2021-02-17 14:55:34,817 INFO monitor.py:207 – :event_summary:Adding 1 nodes of type cpu_48_spot.
2021-02-17 14:55:40,430 INFO load_metrics.py:102 – LoadMetrics: Removed mapping: 172.31.23.116 - 1613573430.7000167
2021-02-17 14:55:40,430 INFO load_metrics.py:109 – LoadMetrics: Removed 1 stale ip mappings: {‘172.31.23.116’} not in {‘172.31.16.240’, ‘172.31.27.173’, ‘172.31.26.163’, ‘172.31.20.177’, ‘172.31.25.79’, ‘172.31.28.159’, ‘172.31.21.227’, ‘172.31.24.131’, ‘172.31.31.164’, ‘172.31.22.24’, ‘172.31.26.41’, ‘172.31.19.126’, ‘172.31.22.66’, ‘172.31.26.13’, ‘172.31.30.105’, ‘172.31.25.157’, ‘172.31.27.26’}
2021-02-17 14:55:40,744 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:40,744 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:46,909 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:46,909 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:47,082 INFO monitor.py:207 – :event_summary:Resized to 724 CPUs.
2021-02-17 14:55:52,997 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:52,998 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:55:58,965 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:55:58,965 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:05,002 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:05,003 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:10,999 ERROR autoscaler.py:266 – StandardAutoscaler: i-02b77234ffad2072c: Terminating failed to setup/initialize node.
2021-02-17 14:56:11,000 ERROR autoscaler.py:139 – StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,001 CRITICAL autoscaler.py:152 – StandardAutoscaler: Too many errors, abort.
2021-02-17 14:56:11,001 ERROR monitor.py:271 – Error in monitor loop
Traceback (most recent call last):
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 269, in run
self._run()
File “/home/centos/.local/lib/python3.7/site-packages/ray/monitor.py”, line 202, in _run
self.autoscaler.update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 154, in update
raise e
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 137, in update
self._update()
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 270, in _update
self._get_node_type(node_id) + " (launch failed).",
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/autoscaler.py”, line 598, in _get_node_type
node_tags = self.provider.node_tags(node_id)
File “/home/centos/.local/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py”, line 170, in node_tags
d1 = self.tag_cache[node_id]
KeyError: ‘i-02b77234ffad2072c’
2021-02-17 14:56:11,002 ERROR autoscaler.py:724 – StandardAutoscaler: kill_workers triggered
2021-02-17 14:56:11,453 ERROR autoscaler.py:729 – StandardAutoscaler: terminated 16 node(s)
2021-02-17 14:56:11,453 INFO monitor.py:250 – Monitor: Exception caught. Taking down workers…
2021-02-17 14:56:11,680 INFO monitor.py:262 – Monitor: Workers taken down.
|
KeyError
|
def close_all_files(self):
"""Close all open files (so that we can open more)."""
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
try:
# Test if the worker process that generated the log file
# is still alive. Only applies to worker processes.
if (
file_info.worker_pid != "raylet"
and file_info.worker_pid != "gcs_server"
and file_info.worker_pid != "autoscaler"
):
os.kill(file_info.worker_pid, 0)
except OSError:
# The process is not alive any more, so move the log file
# out of the log directory so glob.glob will not be slowed
# by it.
target = os.path.join(
self.logs_dir, "old", os.path.basename(file_info.filename)
)
try:
shutil.move(file_info.filename, target)
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning(
f"Warning: The file {file_info.filename} was not found."
)
else:
raise e
else:
self.closed_file_infos.append(file_info)
self.can_open_more_files = True
|
def close_all_files(self):
"""Close all open files (so that we can open more)."""
while len(self.open_file_infos) > 0:
file_info = self.open_file_infos.pop(0)
file_info.file_handle.close()
file_info.file_handle = None
try:
# Test if the worker process that generated the log file
# is still alive. Only applies to worker processes.
if (
file_info.worker_pid != "raylet"
and file_info.worker_pid != "gcs_server"
):
os.kill(file_info.worker_pid, 0)
except OSError:
# The process is not alive any more, so move the log file
# out of the log directory so glob.glob will not be slowed
# by it.
target = os.path.join(
self.logs_dir, "old", os.path.basename(file_info.filename)
)
try:
shutil.move(file_info.filename, target)
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
logger.warning(
f"Warning: The file {file_info.filename} was not found."
)
else:
raise e
else:
self.closed_file_infos.append(file_info)
self.can_open_more_files = True
|
https://github.com/ray-project/ray/issues/12565
|
2020-12-02 07:26:37,751 WARNING worker.py:1011 -- The log monitor on node ip-172-31-18-179 failed with the following error:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 354, in <module>
log_monitor.run()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 275, in run
self.open_closed_files()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 164, in open_closed_files
self.close_all_files()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 102, in close_all_files
os.kill(file_info.worker_pid, 0)
TypeError: an integer is required (got type str)
|
TypeError
|
def update_log_filenames(self):
"""Update the list of log files to monitor."""
# output of user code is written here
log_file_paths = glob.glob(f"{self.logs_dir}/worker*[.out|.err]")
# segfaults and other serious errors are logged here
raylet_err_paths = glob.glob(f"{self.logs_dir}/raylet*.err")
# If gcs server restarts, there can be multiple log files.
gcs_err_path = glob.glob(f"{self.logs_dir}/gcs_server*.err")
for file_path in log_file_paths + raylet_err_paths + gcs_err_path:
if os.path.isfile(file_path) and file_path not in self.log_filenames:
job_match = JOB_LOG_PATTERN.match(file_path)
if job_match:
job_id = job_match.group(2)
worker_pid = int(job_match.group(3))
else:
job_id = None
worker_pid = None
is_err_file = file_path.endswith("err")
self.log_filenames.add(file_path)
self.closed_file_infos.append(
LogFileInfo(
filename=file_path,
size_when_last_opened=0,
file_position=0,
file_handle=None,
is_err_file=is_err_file,
job_id=job_id,
worker_pid=worker_pid,
)
)
log_filename = os.path.basename(file_path)
logger.info(f"Beginning to track file {log_filename}")
|
def update_log_filenames(self):
"""Update the list of log files to monitor."""
# output of user code is written here
log_file_paths = glob.glob(f"{self.logs_dir}/worker*[.out|.err]")
# segfaults and other serious errors are logged here
raylet_err_paths = glob.glob(f"{self.logs_dir}/raylet*.err")
# If gcs server restarts, there can be multiple log files.
gcs_err_path = glob.glob(f"{self.logs_dir}/gcs_server*.err")
for file_path in log_file_paths + raylet_err_paths + gcs_err_path:
if os.path.isfile(file_path) and file_path not in self.log_filenames:
job_match = JOB_LOG_PATTERN.match(file_path)
if job_match:
job_id = job_match.group(2)
worker_pid = job_match.group(3)
else:
job_id = None
worker_pid = None
is_err_file = file_path.endswith("err")
self.log_filenames.add(file_path)
self.closed_file_infos.append(
LogFileInfo(
filename=file_path,
size_when_last_opened=0,
file_position=0,
file_handle=None,
is_err_file=is_err_file,
job_id=job_id,
worker_pid=worker_pid,
)
)
log_filename = os.path.basename(file_path)
logger.info(f"Beginning to track file {log_filename}")
|
https://github.com/ray-project/ray/issues/12565
|
2020-12-02 07:26:37,751 WARNING worker.py:1011 -- The log monitor on node ip-172-31-18-179 failed with the following error:
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 354, in <module>
log_monitor.run()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 275, in run
self.open_closed_files()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 164, in open_closed_files
self.close_all_files()
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/log_monitor.py", line 102, in close_all_files
os.kill(file_info.worker_pid, 0)
TypeError: an integer is required (got type str)
|
TypeError
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
if sys.platform not in ["win32", "cygwin"]:
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port, *_ = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
tasks = [m.run(self.server) for m in modules]
if sys.platform not in ["win32", "cygwin"]:
tasks.append(check_parent_task)
await asyncio.gather(*tasks)
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
if sys.platform not in ["win32", "cygwin"]:
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
tasks = [m.run(self.server) for m in modules]
if sys.platform not in ["win32", "cygwin"]:
tasks.append(check_parent_task)
await asyncio.gather(*tasks)
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
def setup_static_dir():
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client", "build"
)
module_name = os.path.basename(os.path.dirname(__file__))
if not os.path.isdir(build_dir):
raise FrontendNotFoundError(
errno.ENOENT,
"Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
f"(cd python/ray/{module_name}/client "
"&& npm install "
"&& npm ci "
"&& npm run build)",
build_dir,
)
static_dir = os.path.join(build_dir, "static")
routes.static("/static", static_dir, follow_symlinks=True)
return build_dir
|
def setup_static_dir():
build_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "client", "build"
)
module_name = os.path.basename(os.path.dirname(__file__))
if not os.path.isdir(build_dir):
raise OSError(
errno.ENOENT,
"Dashboard build directory not found. If installing "
"from source, please follow the additional steps "
"required to build the dashboard"
f"(cd python/ray/{module_name}/client "
"&& npm install "
"&& npm ci "
"&& npm run build)",
build_dir,
)
static_dir = os.path.join(build_dir, "static")
routes.static("/static", static_dir, follow_symlinks=True)
return build_dir
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
def __init__(
self, host, port, port_retries, redis_address, redis_password=None, log_dir=None
):
self.dashboard_head = dashboard_head.DashboardHead(
http_host=host,
http_port=port,
http_port_retries=port_retries,
redis_address=redis_address,
redis_password=redis_password,
log_dir=log_dir,
)
# Setup Dashboard Routes
try:
build_dir = setup_static_dir()
logger.info("Setup static dir for dashboard: %s", build_dir)
except FrontendNotFoundError as ex:
# Not to raise FrontendNotFoundError due to NPM incompatibilities
# with Windows.
# Please refer to ci.sh::build_dashboard_front_end()
if sys.platform in ["win32", "cygwin"]:
logger.warning(ex)
else:
raise ex
dashboard_utils.ClassMethodRouteTable.bind(self)
|
def __init__(self, host, port, redis_address, redis_password=None, log_dir=None):
self.dashboard_head = dashboard_head.DashboardHead(
http_host=host,
http_port=port,
redis_address=redis_address,
redis_password=redis_password,
log_dir=log_dir,
)
# Setup Dashboard Routes
build_dir = setup_static_dir()
logger.info("Setup static dir for dashboard: %s", build_dir)
dashboard_utils.ClassMethodRouteTable.bind(self)
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
def __init__(
self,
http_host,
http_port,
http_port_retries,
redis_address,
redis_password,
log_dir,
):
# NodeInfoGcsService
self._gcs_node_info_stub = None
self._gcs_rpc_error_counter = 0
# Public attributes are accessible for all head modules.
# Walkaround for issue: https://github.com/ray-project/ray/issues/7084
self.http_host = "127.0.0.1" if http_host == "localhost" else http_host
self.http_port = http_port
self.http_port_retries = http_port_retries
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.log_dir = log_dir
self.aioredis_client = None
self.aiogrpc_gcs_channel = None
self.http_session = None
self.ip = ray._private.services.get_node_ip_address()
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port("[::]:0")
logger.info("Dashboard head grpc address: %s:%s", self.ip, self.grpc_port)
|
def __init__(self, http_host, http_port, redis_address, redis_password, log_dir):
# NodeInfoGcsService
self._gcs_node_info_stub = None
self._gcs_rpc_error_counter = 0
# Public attributes are accessible for all head modules.
self.http_host = http_host
self.http_port = http_port
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.log_dir = log_dir
self.aioredis_client = None
self.aiogrpc_gcs_channel = None
self.http_session = None
self.ip = ray._private.services.get_node_ip_address()
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port("[::]:0")
logger.info("Dashboard head grpc address: %s:%s", self.ip, self.grpc_port)
logger.info("Dashboard head http address: %s:%s", self.http_host, self.http_port)
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
async def run(self):
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionError):
logger.error(
"Dashboard head exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Waiting for GCS is ready.
while True:
try:
gcs_address = await self.aioredis_client.get(
dashboard_consts.REDIS_KEY_GCS_SERVER_ADDRESS
)
if not gcs_address:
raise Exception("GCS address not found.")
logger.info("Connect to GCS at %s", gcs_address)
options = (("grpc.enable_http_proxy", 0),)
channel = aiogrpc.insecure_channel(gcs_address, options=options)
except Exception as ex:
logger.error("Connect to GCS failed: %s, retry...", ex)
await asyncio.sleep(dashboard_consts.CONNECT_GCS_INTERVAL_SECONDS)
else:
self.aiogrpc_gcs_channel = channel
break
# Create a NodeInfoGcsServiceStub.
self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub(
self.aiogrpc_gcs_channel
)
# Start a grpc asyncio server.
await self.server.start()
async def _async_notify():
"""Notify signals from queue."""
while True:
co = await dashboard_utils.NotifyQueue.get()
try:
await co
except Exception:
logger.exception(f"Error notifying coroutine {co}")
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
runner = aiohttp.web.AppRunner(app)
await runner.setup()
last_ex = None
for i in range(1 + self.http_port_retries):
try:
site = aiohttp.web.TCPSite(runner, self.http_host, self.http_port)
await site.start()
break
except OSError as e:
last_ex = e
self.http_port += 1
logger.warning("Try to use port %s: %s", self.http_port, e)
else:
raise Exception(
f"Failed to find a valid port for dashboard after "
f"{self.http_port_retries} retries: {last_ex}"
)
http_host, http_port, *_ = site._server.sockets[0].getsockname()
logger.info("Dashboard head http address: %s:%s", http_host, http_port)
# Write the dashboard head port to redis.
await self.aioredis_client.set(
ray_constants.REDIS_KEY_DASHBOARD, f"{http_host}:{http_port}"
)
await self.aioredis_client.set(
dashboard_consts.REDIS_KEY_DASHBOARD_RPC, f"{self.ip}:{self.grpc_port}"
)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Freeze signal after all modules loaded.
dashboard_utils.SignalManager.freeze()
concurrent_tasks = [
self._update_nodes(),
_async_notify(),
DataOrganizer.purge(),
DataOrganizer.organize(),
]
await asyncio.gather(*concurrent_tasks, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
|
async def run(self):
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionError):
logger.error(
"Dashboard head exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Waiting for GCS is ready.
while True:
try:
gcs_address = await self.aioredis_client.get(
dashboard_consts.REDIS_KEY_GCS_SERVER_ADDRESS
)
if not gcs_address:
raise Exception("GCS address not found.")
logger.info("Connect to GCS at %s", gcs_address)
options = (("grpc.enable_http_proxy", 0),)
channel = aiogrpc.insecure_channel(gcs_address, options=options)
except Exception as ex:
logger.error("Connect to GCS failed: %s, retry...", ex)
await asyncio.sleep(dashboard_consts.CONNECT_GCS_INTERVAL_SECONDS)
else:
self.aiogrpc_gcs_channel = channel
break
# Create a NodeInfoGcsServiceStub.
self._gcs_node_info_stub = gcs_service_pb2_grpc.NodeInfoGcsServiceStub(
self.aiogrpc_gcs_channel
)
# Start a grpc asyncio server.
await self.server.start()
# Write the dashboard head port to redis.
await self.aioredis_client.set(
dashboard_consts.REDIS_KEY_DASHBOARD, self.ip + ":" + str(self.http_port)
)
await self.aioredis_client.set(
dashboard_consts.REDIS_KEY_DASHBOARD_RPC, self.ip + ":" + str(self.grpc_port)
)
async def _async_notify():
"""Notify signals from queue."""
while True:
co = await dashboard_utils.NotifyQueue.get()
try:
await co
except Exception:
logger.exception(f"Error notifying coroutine {co}")
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
web_server = aiohttp.web._run_app(app, host=self.http_host, port=self.http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Freeze signal after all modules loaded.
dashboard_utils.SignalManager.freeze()
concurrent_tasks = [
self._update_nodes(),
_async_notify(),
DataOrganizer.purge(),
DataOrganizer.organize(),
web_server,
]
await asyncio.gather(*concurrent_tasks, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
def start_dashboard(
require_dashboard,
host,
redis_address,
temp_dir,
logdir,
port=ray_constants.DEFAULT_DASHBOARD_PORT,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None,
max_bytes=0,
backup_count=0,
):
"""Start a dashboard process.
Args:
require_dashboard (bool): If true, this will raise an exception if we
fail to start the dashboard. Otherwise it will print a warning if
we fail to start the dashboard.
host (str): The host to bind the dashboard web server to.
port (str): The port to bind the dashboard web server to.
Defaults to 8265.
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
logdir (str): The log directory used to generate dashboard log.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
max_bytes (int): Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count (int): Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
Returns:
ProcessInfo for the process that was started.
"""
port_retries = 10
if port != ray_constants.DEFAULT_DASHBOARD_PORT:
port_test_socket = socket.socket()
port_test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
except socket.error:
raise ValueError(f"The given dashboard port {port} is already in use")
port_retries = 0
dashboard_dir = "new_dashboard"
dashboard_filepath = os.path.join(RAY_PATH, dashboard_dir, "dashboard.py")
command = [
sys.executable,
"-u",
dashboard_filepath,
f"--host={host}",
f"--port={port}",
f"--port-retries={port_retries}",
f"--redis-address={redis_address}",
f"--temp-dir={temp_dir}",
f"--log-dir={logdir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
if redis_password:
command += ["--redis-password", redis_password]
dashboard_dependencies_present = True
try:
import aiohttp # noqa: F401
import grpc # noqa: F401
except ImportError:
dashboard_dependencies_present = False
warning_message = (
"Failed to start the dashboard. The dashboard requires Python 3 "
"as well as 'pip install aiohttp grpcio'."
)
if require_dashboard:
raise ImportError(warning_message)
else:
logger.warning(warning_message)
if dashboard_dependencies_present:
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
redis_client = ray._private.services.create_redis_client(
redis_address, redis_password
)
dashboard_url = None
dashboard_returncode = None
for _ in range(20):
dashboard_url = redis_client.get(ray_constants.REDIS_KEY_DASHBOARD)
if dashboard_url is not None:
dashboard_url = dashboard_url.decode("utf-8")
break
dashboard_returncode = process_info.process.poll()
if dashboard_returncode is not None:
break
time.sleep(1)
if dashboard_url is None:
dashboard_log = os.path.join(logdir, "dashboard.log")
returncode_str = (
f", return code {dashboard_returncode}"
if dashboard_returncode is not None
else ""
)
# Read last n lines of dashboard log. The log file may be large.
n = 10
lines = []
try:
with open(dashboard_log, "rb") as f:
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
end = mm.size()
for _ in range(n):
sep = mm.rfind(b"\n", 0, end - 1)
if sep == -1:
break
lines.append(mm[sep + 1 : end].decode("utf-8"))
end = sep
lines.append(f" The last {n} lines of {dashboard_log}:")
except Exception:
pass
last_log_str = "\n".join(reversed(lines[-n:]))
raise Exception(
f"Failed to start the dashboard{returncode_str}.{last_log_str}"
)
logger.info(
"View the Ray dashboard at %s%shttp://%s%s%s",
colorama.Style.BRIGHT,
colorama.Fore.GREEN,
dashboard_url,
colorama.Fore.RESET,
colorama.Style.NORMAL,
)
return dashboard_url, process_info
else:
return None, None
|
def start_dashboard(
require_dashboard,
host,
redis_address,
temp_dir,
logdir,
port=ray_constants.DEFAULT_DASHBOARD_PORT,
stdout_file=None,
stderr_file=None,
redis_password=None,
fate_share=None,
max_bytes=0,
backup_count=0,
):
"""Start a dashboard process.
Args:
require_dashboard (bool): If true, this will raise an exception if we
fail to start the dashboard. Otherwise it will print a warning if
we fail to start the dashboard.
host (str): The host to bind the dashboard web server to.
port (str): The port to bind the dashboard web server to.
Defaults to 8265.
redis_address (str): The address of the Redis instance.
temp_dir (str): The temporary directory used for log files and
information for this Ray session.
logdir (str): The log directory used to generate dashboard log.
stdout_file: A file handle opened for writing to redirect stdout to. If
no redirection should happen, then this should be None.
stderr_file: A file handle opened for writing to redirect stderr to. If
no redirection should happen, then this should be None.
redis_password (str): The password of the redis server.
max_bytes (int): Log rotation parameter. Corresponding to
RotatingFileHandler's maxBytes.
backup_count (int): Log rotation parameter. Corresponding to
RotatingFileHandler's backupCount.
Returns:
ProcessInfo for the process that was started.
"""
port_test_socket = socket.socket()
port_test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if port == ray_constants.DEFAULT_DASHBOARD_PORT:
while True:
try:
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
break
except socket.error:
port += 1
else:
try:
port_test_socket.bind(("127.0.0.1", port))
port_test_socket.close()
except socket.error:
raise ValueError(f"The given dashboard port {port} is already in use")
dashboard_dir = "new_dashboard"
dashboard_filepath = os.path.join(RAY_PATH, dashboard_dir, "dashboard.py")
command = [
sys.executable,
"-u",
dashboard_filepath,
f"--host={host}",
f"--port={port}",
f"--redis-address={redis_address}",
f"--temp-dir={temp_dir}",
f"--log-dir={logdir}",
f"--logging-rotate-bytes={max_bytes}",
f"--logging-rotate-backup-count={backup_count}",
]
if redis_password:
command += ["--redis-password", redis_password]
dashboard_dependencies_present = True
try:
import aiohttp # noqa: F401
import grpc # noqa: F401
except ImportError:
dashboard_dependencies_present = False
warning_message = (
"Failed to start the dashboard. The dashboard requires Python 3 "
"as well as 'pip install aiohttp grpcio'."
)
if require_dashboard:
raise ImportError(warning_message)
else:
logger.warning(warning_message)
if dashboard_dependencies_present:
process_info = start_ray_process(
command,
ray_constants.PROCESS_TYPE_DASHBOARD,
stdout_file=stdout_file,
stderr_file=stderr_file,
fate_share=fate_share,
)
dashboard_url = f"{host if host != '0.0.0.0' else get_node_ip_address()}:{port}"
logger.info(
"View the Ray dashboard at {}{}http://{}{}{}".format(
colorama.Style.BRIGHT,
colorama.Fore.GREEN,
dashboard_url,
colorama.Fore.RESET,
colorama.Style.NORMAL,
)
)
return dashboard_url, process_info
else:
return None, None
|
https://github.com/ray-project/ray/issues/13351
|
2021-01-07 03:57:35,395 WARNING worker.py:1044 -- The dashboard on node travis-job-a2c3f054-a588-45a5-b10a-2456ba486c28 failed with the following error:
Traceback (most recent call last):
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1073, in create_server
sock.bind(sa)
OSError: [Errno 98] Address already in use
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 197, in <module>
loop.run_until_complete(dashboard.run())
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 488, in run_until_complete
return future.result()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/dashboard.py", line 100, in run
await self.dashboard_head.run()
File "/home/travis/build/ray-project/ray/python/ray/new_dashboard/head.py", line 220, in run
*(m.run(self.server) for m in modules))
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web.py", line 369, in _run_app
await site.start()
File "/opt/miniconda/lib/python3.6/site-packages/aiohttp/web_runner.py", line 103, in start
reuse_port=self._reuse_port)
File "/opt/miniconda/lib/python3.6/asyncio/base_events.py", line 1077, in create_server
% (sa, err.strerror.lower()))
OSError: [Errno 98] error while attempting to bind on address ('127.0.0.1', 8265): address already in use
|
OSError
|
def learn_on_batch(self, samples: SampleBatchType) -> dict:
"""Update policies based on the given batch.
This is the equivalent to apply_gradients(compute_gradients(samples)),
but can be optimized to avoid pulling gradients into CPU memory.
Returns:
info: dictionary of extra metadata from compute_gradients().
Examples:
>>> batch = worker.sample()
>>> worker.learn_on_batch(samples)
"""
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)
)
)
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
# Decompress SampleBatch, in case some columns are compressed.
batch.decompress_if_needed()
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = {
DEFAULT_POLICY_ID: self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples
)
}
if log_once("learn_out"):
logger.debug("Training out:\n\n{}\n".format(summarize(info_out)))
return info_out
|
def learn_on_batch(self, samples: SampleBatchType) -> dict:
"""Update policies based on the given batch.
This is the equivalent to apply_gradients(compute_gradients(samples)),
but can be optimized to avoid pulling gradients into CPU memory.
Returns:
info: dictionary of extra metadata from compute_gradients().
Examples:
>>> batch = worker.sample()
>>> worker.learn_on_batch(samples)
"""
if log_once("learn_on_batch"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize(samples)
)
)
if isinstance(samples, MultiAgentBatch):
info_out = {}
to_fetch = {}
if self.tf_sess is not None:
builder = TFRunBuilder(self.tf_sess, "learn_on_batch")
else:
builder = None
for pid, batch in samples.policy_batches.items():
if pid not in self.policies_to_train:
continue
policy = self.policy_map[pid]
if builder and hasattr(policy, "_build_learn_on_batch"):
to_fetch[pid] = policy._build_learn_on_batch(builder, batch)
else:
info_out[pid] = policy.learn_on_batch(batch)
info_out.update({k: builder.get(v) for k, v in to_fetch.items()})
else:
info_out = {
DEFAULT_POLICY_ID: self.policy_map[DEFAULT_POLICY_ID].learn_on_batch(
samples
)
}
if log_once("learn_out"):
logger.debug("Training out:\n\n{}\n".format(summarize(info_out)))
return info_out
|
https://github.com/ray-project/ray/issues/13824
|
Traceback (most recent call last):
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 519, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/ray_trial_executor.py", line 497, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/worker.py", line 1379, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::PPO.train() (pid=50904, ip=129.2.189.150)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 415, in ray._raylet.execute_task.function_executor
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 508, in train
raise e
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 494, in train
result = Trainable.train(self)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/trainable.py", line 183, in train
result = self.step()
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 147, in step
res = next(self.train_exec_impl)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/execution/train_ops.py", line 191, in __call__
[tuples[k] for k in state_keys]))
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 218, in load_data
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
File "/home/justinkterry/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/home/justinkterry/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1142, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/home/justinkterry/.local/lib/python3.6/site-packages/numpy/core/_asarray.py", line 83, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'BCJNGGhAv0cAAAAAAAA71wUAAGGABZW0RwABAPMZjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWQC8ATwAAgD8EAP///////////////////////7u/jpNnPxXgcz+cPX/IEjIx4N1dBAB/QGxlP+DKe1QALgBMAAQEAH9+LGI/Rnx9VAAqBEwABQgAL6lqUAAqOeHdXQQAf9QcRz/SPnVUACb/CNgYWj/0OTo/FXIRP267Dz+4oiY/vgVuUAAmsXaAEz9aXzM/OSdcmAEA+AAv7+5QACcAQAAMBAA/1LlwoAAmCDwBBAwAP6kteVAAJgycAD/OZGNMACYA2BUIUAB/I8BeP/YheFAAKgTkAH9Tw14/m3FyTAAStRDyfj+/aGY/62VjBADxCKqHYz9zSng/57RaP288TT8VSlw/LiljBAB/wjJmP1DuflwABuKGTlQ/uqsDP6mYtT6KiwQA8QTfgdg+CT8yP8McMT+IO90+zzy1BAD5BK8gtT5mRAE/nj9UP4SYbj9x7GIEAAAUAPEAHKt+P6WfWT9qWUI/o4g/BADxBL/LTT+shHQ/5Sp3P0/iTj+f2T8EAPEI1qxCP8yPVj/9tXA/U5AZP6rsuz4U7rQEALGZsLc+0TMZPxzxcbwANTqOY7QCMahPehQAAJQBAAwABEgBACQAABQA8QB4+n8/x4loP5uwSD+bKkAEAHFCsUg/P0hlHAAAJAAALAAEOAAARAAAFAAPBAD///////////////////////+/r8GFaD/j7XI/NnvwEwcP5BIZBPAUdaxzYT/wdnZ0Ew9EABkILAAEUAAACABvfApeP2BLHBcnAFgAAEgACAQAfx0VYD+1g3yoACYEEBQICAAvCkYgFy//ANBTVj+14jU/TAQSP3O0IqAAJvEEOEYRP2+7Dz9+RRc/mbY3PwOVWxQVAAwBDwQAISYXUwgYBEgBP5O2XqAAJgg4AQQMAD+CTmZQACYElAAICAAvK3soFxMPEAEBCEgAAAwAb3BNZD+w+iwDJwTkAAAIAG9pgGE/OhXkGisAlAAABABvC/FpPywD3BoT4rtVaT/fSSk/3UsXP/xSBADwBd8MHj9Ep1E/rZQ6P6oOAj87bvI+BAC//F/yPq7jET+0uF+UAQbi95VoP4uhGj/sPO8+biAEAPEESaUCP0N5TT+sL18/1WcgP27EDwQA8QhQrBA/RiMsP6c8az9KYTs/AQsXP8iKFgQAAAwAABQA8QCJAnk/tMh/P3xgYz+Ctl0EAGJrCHo/ou2IFwB8FwBAAQAEAACUFwC4AfEACul4P1VRNT9FGPY+oBXwBABxhN3xPkqzMRwAACQAACwAADQAAAQAAEQAABQADwQA////////////////////////b2+Z13U/l4FAFCcMsBIAzBJ/BZxfP+wyboQvMgBMFAQEAACELy86SKwAKwRMAAQIAC9JFzAvKwRIAAgIACLtuuATDBwBDxAAEQBAAQQEAL94J1o/r5E6P0klP+wUBg9UAA0EtBcAfC+v13ETP58HMz/hyHAXJwi0FwT0AADIFA+IAA0PIAAFBEQACAgAP2gIafAAJgQ8AQgIAD9MfXNQACYImAAADABvm3JiP3/3hAInAEQABAQAAIAvLygUHDAnAFQBBOgAAJgAL/gQTAArAFAA/wxEbU0/R6ouP1HyMz/ACjU/6AM1PyhPQD+nuHJEAQYAnC8ApC8EsC8ACADxBGRLYz+8OHg/VoNBP2Vu7D7JEssEAL/5A8s+WoIIPyo6X1AABqKeP1Q/kqMDP05KtC8AvC/xBJJT2D6jKzI/SWZ/P/tQbz+9slcEAHGEalw/cTV1pBcArBcAtBcMwBe1TMB3P8rdWT9lrUK4L3H7G04/yql0hAAAfBcESAEEzAIApBcPwBchMZSMBXBH8RaUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////BQDwBEsAdJRiTdARhZSMAUOUdJRSlC4AAAAA'
Result for PPO_pistonball_v3_f4d39_00000:
{}
|
ValueError
|
def __call__(self, samples: SampleBatchType) -> (SampleBatchType, List[dict]):
_check_sample_batch_type(samples)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({DEFAULT_POLICY_ID: samples}, samples.count)
metrics = _get_shared_metrics()
load_timer = metrics.timers[LOAD_BATCH_TIMER]
learn_timer = metrics.timers[LEARN_ON_BATCH_TIMER]
with load_timer:
# (1) Load data into GPUs.
num_loaded_tuples = {}
for policy_id, batch in samples.policy_batches.items():
# Not a policy-to-train.
if policy_id not in self.policies:
continue
# Decompress SampleBatch, in case some columns are compressed.
batch.decompress_if_needed()
policy = self.workers.local_worker().get_policy(policy_id)
policy._debug_vars()
tuples = policy._get_loss_inputs_dict(batch, shuffle=self.shuffle_sequences)
data_keys = list(policy._loss_input_dict_no_rnn.values())
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = self.optimizers[policy_id].load_data(
self.sess,
[tuples[k] for k in data_keys],
[tuples[k] for k in state_keys],
)
with learn_timer:
# (2) Execute minibatch SGD on loaded data.
fetches = {}
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = max(
1, int(tuples_per_device) // int(self.per_device_batch_size)
)
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] * self.per_device_batch_size
)
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
if logger.getEffectiveLevel() <= logging.DEBUG:
avg = averaged(iter_extra_fetches)
logger.debug("{} {}".format(i, avg))
fetches[policy_id] = averaged(iter_extra_fetches, axis=0)
load_timer.push_units_processed(samples.count)
learn_timer.push_units_processed(samples.count)
metrics.counters[STEPS_TRAINED_COUNTER] += samples.count
metrics.info[LEARNER_INFO] = fetches
if self.workers.remote_workers():
with metrics.timers[WORKER_UPDATE_TIMER]:
weights = ray.put(self.workers.local_worker().get_weights(self.policies))
for e in self.workers.remote_workers():
e.set_weights.remote(weights, _get_global_vars())
# Also update global vars of the local worker.
self.workers.local_worker().set_global_vars(_get_global_vars())
return samples, fetches
|
def __call__(self, samples: SampleBatchType) -> (SampleBatchType, List[dict]):
_check_sample_batch_type(samples)
# Handle everything as if multiagent
if isinstance(samples, SampleBatch):
samples = MultiAgentBatch({DEFAULT_POLICY_ID: samples}, samples.count)
metrics = _get_shared_metrics()
load_timer = metrics.timers[LOAD_BATCH_TIMER]
learn_timer = metrics.timers[LEARN_ON_BATCH_TIMER]
with load_timer:
# (1) Load data into GPUs.
num_loaded_tuples = {}
for policy_id, batch in samples.policy_batches.items():
if policy_id not in self.policies:
continue
policy = self.workers.local_worker().get_policy(policy_id)
policy._debug_vars()
tuples = policy._get_loss_inputs_dict(batch, shuffle=self.shuffle_sequences)
data_keys = list(policy._loss_input_dict_no_rnn.values())
if policy._state_inputs:
state_keys = policy._state_inputs + [policy._seq_lens]
else:
state_keys = []
num_loaded_tuples[policy_id] = self.optimizers[policy_id].load_data(
self.sess,
[tuples[k] for k in data_keys],
[tuples[k] for k in state_keys],
)
with learn_timer:
# (2) Execute minibatch SGD on loaded data.
fetches = {}
for policy_id, tuples_per_device in num_loaded_tuples.items():
optimizer = self.optimizers[policy_id]
num_batches = max(
1, int(tuples_per_device) // int(self.per_device_batch_size)
)
logger.debug("== sgd epochs for {} ==".format(policy_id))
for i in range(self.num_sgd_iter):
iter_extra_fetches = defaultdict(list)
permutation = np.random.permutation(num_batches)
for batch_index in range(num_batches):
batch_fetches = optimizer.optimize(
self.sess, permutation[batch_index] * self.per_device_batch_size
)
for k, v in batch_fetches[LEARNER_STATS_KEY].items():
iter_extra_fetches[k].append(v)
if logger.getEffectiveLevel() <= logging.DEBUG:
avg = averaged(iter_extra_fetches)
logger.debug("{} {}".format(i, avg))
fetches[policy_id] = averaged(iter_extra_fetches, axis=0)
load_timer.push_units_processed(samples.count)
learn_timer.push_units_processed(samples.count)
metrics.counters[STEPS_TRAINED_COUNTER] += samples.count
metrics.info[LEARNER_INFO] = fetches
if self.workers.remote_workers():
with metrics.timers[WORKER_UPDATE_TIMER]:
weights = ray.put(self.workers.local_worker().get_weights(self.policies))
for e in self.workers.remote_workers():
e.set_weights.remote(weights, _get_global_vars())
# Also update global vars of the local worker.
self.workers.local_worker().set_global_vars(_get_global_vars())
return samples, fetches
|
https://github.com/ray-project/ray/issues/13824
|
Traceback (most recent call last):
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 519, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/ray_trial_executor.py", line 497, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/worker.py", line 1379, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(ValueError): ray::PPO.train() (pid=50904, ip=129.2.189.150)
File "python/ray/_raylet.pyx", line 463, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 415, in ray._raylet.execute_task.function_executor
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 508, in train
raise e
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer.py", line 494, in train
result = Trainable.train(self)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/tune/trainable.py", line 183, in train
result = self.step()
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/agents/trainer_template.py", line 147, in step
res = next(self.train_exec_impl)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 843, in apply_filter
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/execution/train_ops.py", line 191, in __call__
[tuples[k] for k in state_keys]))
File "/home/justinkterry/.local/lib/python3.6/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 218, in load_data
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
File "/home/justinkterry/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/home/justinkterry/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1142, in _run
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
File "/home/justinkterry/.local/lib/python3.6/site-packages/numpy/core/_asarray.py", line 83, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: could not convert string to float: 'BCJNGGhAv0cAAAAAAAA71wUAAGGABZW0RwABAPMZjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWQC8ATwAAgD8EAP///////////////////////7u/jpNnPxXgcz+cPX/IEjIx4N1dBAB/QGxlP+DKe1QALgBMAAQEAH9+LGI/Rnx9VAAqBEwABQgAL6lqUAAqOeHdXQQAf9QcRz/SPnVUACb/CNgYWj/0OTo/FXIRP267Dz+4oiY/vgVuUAAmsXaAEz9aXzM/OSdcmAEA+AAv7+5QACcAQAAMBAA/1LlwoAAmCDwBBAwAP6kteVAAJgycAD/OZGNMACYA2BUIUAB/I8BeP/YheFAAKgTkAH9Tw14/m3FyTAAStRDyfj+/aGY/62VjBADxCKqHYz9zSng/57RaP288TT8VSlw/LiljBAB/wjJmP1DuflwABuKGTlQ/uqsDP6mYtT6KiwQA8QTfgdg+CT8yP8McMT+IO90+zzy1BAD5BK8gtT5mRAE/nj9UP4SYbj9x7GIEAAAUAPEAHKt+P6WfWT9qWUI/o4g/BADxBL/LTT+shHQ/5Sp3P0/iTj+f2T8EAPEI1qxCP8yPVj/9tXA/U5AZP6rsuz4U7rQEALGZsLc+0TMZPxzxcbwANTqOY7QCMahPehQAAJQBAAwABEgBACQAABQA8QB4+n8/x4loP5uwSD+bKkAEAHFCsUg/P0hlHAAAJAAALAAEOAAARAAAFAAPBAD///////////////////////+/r8GFaD/j7XI/NnvwEwcP5BIZBPAUdaxzYT/wdnZ0Ew9EABkILAAEUAAACABvfApeP2BLHBcnAFgAAEgACAQAfx0VYD+1g3yoACYEEBQICAAvCkYgFy//ANBTVj+14jU/TAQSP3O0IqAAJvEEOEYRP2+7Dz9+RRc/mbY3PwOVWxQVAAwBDwQAISYXUwgYBEgBP5O2XqAAJgg4AQQMAD+CTmZQACYElAAICAAvK3soFxMPEAEBCEgAAAwAb3BNZD+w+iwDJwTkAAAIAG9pgGE/OhXkGisAlAAABABvC/FpPywD3BoT4rtVaT/fSSk/3UsXP/xSBADwBd8MHj9Ep1E/rZQ6P6oOAj87bvI+BAC//F/yPq7jET+0uF+UAQbi95VoP4uhGj/sPO8+biAEAPEESaUCP0N5TT+sL18/1WcgP27EDwQA8QhQrBA/RiMsP6c8az9KYTs/AQsXP8iKFgQAAAwAABQA8QCJAnk/tMh/P3xgYz+Ctl0EAGJrCHo/ou2IFwB8FwBAAQAEAACUFwC4AfEACul4P1VRNT9FGPY+oBXwBABxhN3xPkqzMRwAACQAACwAADQAAAQAAEQAABQADwQA////////////////////////b2+Z13U/l4FAFCcMsBIAzBJ/BZxfP+wyboQvMgBMFAQEAACELy86SKwAKwRMAAQIAC9JFzAvKwRIAAgIACLtuuATDBwBDxAAEQBAAQQEAL94J1o/r5E6P0klP+wUBg9UAA0EtBcAfC+v13ETP58HMz/hyHAXJwi0FwT0AADIFA+IAA0PIAAFBEQACAgAP2gIafAAJgQ8AQgIAD9MfXNQACYImAAADABvm3JiP3/3hAInAEQABAQAAIAvLygUHDAnAFQBBOgAAJgAL/gQTAArAFAA/wxEbU0/R6ouP1HyMz/ACjU/6AM1PyhPQD+nuHJEAQYAnC8ApC8EsC8ACADxBGRLYz+8OHg/VoNBP2Vu7D7JEssEAL/5A8s+WoIIPyo6X1AABqKeP1Q/kqMDP05KtC8AvC/xBJJT2D6jKzI/SWZ/P/tQbz+9slcEAHGEalw/cTV1pBcArBcAtBcMwBe1TMB3P8rdWT9lrUK4L3H7G04/yql0hAAAfBcESAEEzAIApBcPwBchMZSMBXBH8RaUjAVkdHlwZZSTlIwCZjSUiYiHlFKUKEsDjAE8lE5OTkr/////BQDwBEsAdJRiTdARhZSMAUOUdJRSlC4AAAAA'
Result for PPO_pistonball_v3_f4d39_00000:
{}
|
ValueError
|
def _get(self, ref: ClientObjectRef, timeout: float):
req = ray_client_pb2.GetRequest(id=ref.id, timeout=timeout)
try:
data = self.data_client.GetObject(req)
except grpc.RpcError as e:
raise e.details()
if not data.valid:
try:
err = cloudpickle.loads(data.error)
except pickle.UnpicklingError:
logger.exception("Failed to deserialize {}".format(data.error))
raise
logger.error(err)
raise err
return loads_from_server(data.data)
|
def _get(self, ref: ClientObjectRef, timeout: float):
req = ray_client_pb2.GetRequest(id=ref.id, timeout=timeout)
try:
data = self.data_client.GetObject(req)
except grpc.RpcError as e:
raise e.details()
if not data.valid:
try:
err = cloudpickle.loads(data.error)
except Exception:
logger.exception("Failed to deserialize {}".format(data.error))
raise
logger.error(err)
raise err
return loads_from_server(data.data)
|
https://github.com/ray-project/ray/issues/14161
|
Failed to deserialize b"\x80\x05\x95\xf1\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\nValueError\x94\x93\x94\x8c\xcfFailed to look up actor with name 'abc'. You are either trying to look up a named actor you didn't create, the named actor died, or the actor hasn't been created because named actor creation is asynchronous.\x94\x85\x94R\x94."
Traceback (most recent call last):
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 275, in _call_schedule_for_task
raise cloudpickle.loads(ticket.error)
ValueError: Failed to look up actor with name 'abc'. You are either trying to look up a named actor you didn't create, the named actor died, or the actor hasn't been created because named actor creation is asynchronous.
|
ValueError
|
def _call_schedule_for_task(self, task: ray_client_pb2.ClientTask) -> List[bytes]:
logger.debug("Scheduling %s" % task)
task.client_id = self._client_id
try:
ticket = self.server.Schedule(task, metadata=self.metadata)
except grpc.RpcError as e:
raise decode_exception(e.details)
if not ticket.valid:
try:
raise cloudpickle.loads(ticket.error)
except pickle.UnpicklingError:
logger.exception("Failed to deserialize {}".format(ticket.error))
raise
return ticket.return_ids
|
def _call_schedule_for_task(self, task: ray_client_pb2.ClientTask) -> List[bytes]:
logger.debug("Scheduling %s" % task)
task.client_id = self._client_id
try:
ticket = self.server.Schedule(task, metadata=self.metadata)
except grpc.RpcError as e:
raise decode_exception(e.details)
if not ticket.valid:
try:
raise cloudpickle.loads(ticket.error)
except Exception:
logger.exception("Failed to deserialize {}".format(ticket.error))
raise
return ticket.return_ids
|
https://github.com/ray-project/ray/issues/14161
|
Failed to deserialize b"\x80\x05\x95\xf1\x00\x00\x00\x00\x00\x00\x00\x8c\x08builtins\x94\x8c\nValueError\x94\x93\x94\x8c\xcfFailed to look up actor with name 'abc'. You are either trying to look up a named actor you didn't create, the named actor died, or the actor hasn't been created because named actor creation is asynchronous.\x94\x85\x94R\x94."
Traceback (most recent call last):
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 275, in _call_schedule_for_task
raise cloudpickle.loads(ticket.error)
ValueError: Failed to look up actor with name 'abc'. You are either trying to look up a named actor you didn't create, the named actor died, or the actor hasn't been created because named actor creation is asynchronous.
|
ValueError
|
def normalize(data, wrt):
"""Normalize data to be in range (0,1), with respect to (wrt) boundaries,
which can be specified.
"""
return (data - np.min(wrt, axis=0)) / (
np.max(wrt, axis=0) - np.min(wrt, axis=0) + 1e-8
)
|
def normalize(data, wrt):
"""Normalize data to be in range (0,1), with respect to (wrt) boundaries,
which can be specified.
"""
return (data - np.min(wrt, axis=0)) / (np.max(wrt, axis=0) - np.min(wrt, axis=0))
|
https://github.com/ray-project/ray/issues/14069
|
Traceback (most recent call last):
File "./tune_pb2.py", line 303, in <module>
raise_on_failed_trial=False)
File "/home/john/anaconda3/envs/python3.7/lib/python3.7/site-packages/ray/tune/tune.py", line 411, in run
runner.step()
File "/home/john/anaconda3/envs/python3.7/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 572, in step
self.trial_executor.on_no_available_trials(self)
File "/home/john/anaconda3/envs/python3.7/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 183, in on_no_available_trials
raise TuneError("There are paused trials, but no more pending "
ray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.
|
ray.tune.error.TuneError
|
def on_step_begin(self, **info):
import click
from ray.autoscaler._private.commands import kill_node
failures = 0
max_failures = 3
# With 10% probability inject failure to a worker.
if random.random() < self.probability and not self.disable:
# With 10% probability fully terminate the node.
should_terminate = random.random() < self.probability
while failures < max_failures:
try:
kill_node(
self.config_path,
yes=True,
hard=should_terminate,
override_cluster_name=None,
)
except click.exceptions.ClickException:
failures += 1
logger.exception(
"Killing random node failed in attempt "
"{}. "
"Retrying {} more times".format(
str(failures), str(max_failures - failures)
)
)
|
def on_step_begin(self, **info):
from ray.autoscaler._private.commands import kill_node
# With 10% probability inject failure to a worker.
if random.random() < self.probability and not self.disable:
# With 10% probability fully terminate the node.
should_terminate = random.random() < self.probability
kill_node(
self.config_path,
yes=True,
hard=should_terminate,
override_cluster_name=None,
)
|
https://github.com/ray-project/ray/issues/13923
|
2021-02-04 17:58:07,590 INFO commands.py:283 -- Checking AWS environment settings
2021-02-04 17:58:08,874 INFO commands.py:431 -- A random node will be killed. Confirm [y/N]: y [automatic, due to --yes]
2021-02-04 17:58:09,027 INFO commands.py:441 -- Shutdown i-03aa1f3b86602ada0
2021-02-04 17:58:09,028 INFO command_runner.py:356 -- Fetched IP: 52.36.104.14
2021-02-04 17:58:09,028 INFO log_timer.py:27 -- NodeUpdater: i-03aa1f3b86602ada0: Got IP [LogTimer=0ms]
Warning: Permanently added '52.36.104.14' (ECDSA) to the list of known hosts.
Error: No such container: ray_container
Shared connection to 52.36.104.14 closed.
2021-02-04 17:59:20,400 WARNING util.py:152 -- The `callbacks.on_step_begin` operation took 72.837 s, which may be a performance bottleneck.
Traceback (most recent call last):
File "/home/ray/pytorch_pbt_failure.py", line 136, in <module>
stop={"training_iteration": 1} if args.smoke_test else None)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/tune.py", line 421, in run
runner.step()
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 360, in step
iteration=self._iteration, trials=self._trials)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/callback.py", line 172, in on_step_begin
callback.on_step_begin(**info)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/tune/utils/mock.py", line 122, in on_step_begin
override_cluster_name=None)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 460, in kill_node
_exec(updater, "ray stop", False, False)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 912, in _exec
shutdown_after_run=shutdown_after_run)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 627, in run
ssh_options_override_ssh_key=ssh_options_override_ssh_key)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 519, in run
final_cmd, with_output, exit_on_fail, silent=silent)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 445, in _run_helper
"Command failed:\n\n {}\n".format(joined_cmd)) from None
click.exceptions.ClickException: Command failed:
ssh -tt -i ~/ray_bootstrap_key.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_070dd72385/3d9ed41da7/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@52.36.104.14 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (docker exec -it ray_container /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (ray stop)'"'"'"'"'"'"'"'"''"'"' )'
|
click.exceptions.ClickException
|
def _check_ami(config):
"""Provide helpful message for missing ImageId for node configuration."""
_set_config_info(head_ami_src="config", workers_ami_src="config")
region = config["provider"]["region"]
default_ami = DEFAULT_AMI.get(region)
if not default_ami:
# If we do not provide a default AMI for the given region, noop.
return
head_ami = config["head_node"].get("ImageId", "").lower()
if head_ami in ["", "latest_dlami"]:
config["head_node"]["ImageId"] = default_ami
_set_config_info(head_ami_src="dlami")
worker_ami = config["worker_nodes"].get("ImageId", "").lower()
if worker_ami in ["", "latest_dlami"]:
config["worker_nodes"]["ImageId"] = default_ami
_set_config_info(workers_ami_src="dlami")
|
def _check_ami(config):
"""Provide helpful message for missing ImageId for node configuration."""
_set_config_info(head_ami_src="config", workers_ami_src="config")
region = config["provider"]["region"]
default_ami = DEFAULT_AMI.get(region)
if not default_ami:
# If we do not provide a default AMI for the given region, noop.
return
if config["head_node"].get("ImageId", "").lower() == "latest_dlami":
config["head_node"]["ImageId"] = default_ami
_set_config_info(head_ami_src="dlami")
if config["worker_nodes"].get("ImageId", "").lower() == "latest_dlami":
config["worker_nodes"]["ImageId"] = default_ami
_set_config_info(workers_ami_src="dlami")
|
https://github.com/ray-project/ray/issues/13800
|
Traceback (most recent call last):
File "/home/ubuntu/.local/bin/ray", line 11, in <module>
load_entry_point('ray', 'console_scripts', 'ray')()
File "/home/ubuntu/ray/python/ray/scripts/scripts.py", line 1519, in main
return cli()
File "/home/ubuntu/.local/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/.local/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/.local/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/.local/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/.local/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/ray/python/ray/scripts/scripts.py", line 1171, in submit
create_or_update_cluster(
File "/home/ubuntu/ray/python/ray/autoscaler/_private/commands.py", line 226, in create_or_update_cluster
try_logging_config(config)
File "/home/ubuntu/ray/python/ray/autoscaler/_private/commands.py", line 73, in try_logging_config
log_to_cli(config)
File "/home/ubuntu/ray/python/ray/autoscaler/_private/aws/config.py", line 179, in log_to_cli
print_info(
File "/home/ubuntu/ray/python/ray/autoscaler/_private/aws/config.py", line 129, in print_info
head_value_str = config["head_node"][key]
KeyError: 'ImageId'
|
KeyError
|
def on_checkpoint(self, checkpoint):
"""Starts tracking checkpoint metadata on checkpoint.
Sets the newest checkpoint. For PERSISTENT checkpoints: Deletes
previous checkpoint as long as it isn't one of the best ones. Also
deletes the worst checkpoint if at capacity.
Args:
checkpoint (Checkpoint): Trial state checkpoint.
"""
if checkpoint.storage == Checkpoint.MEMORY:
self.newest_memory_checkpoint = checkpoint
return
old_checkpoint = self.newest_persistent_checkpoint
if old_checkpoint.value == checkpoint.value:
return
self.newest_persistent_checkpoint = checkpoint
# Remove the old checkpoint if it isn't one of the best ones.
if old_checkpoint.value and old_checkpoint not in self._membership:
self.delete(old_checkpoint)
try:
queue_item = QueueItem(self._priority(checkpoint), checkpoint)
except KeyError:
logger.error(
"Result dict has no key: {}. "
"checkpoint_score_attr must be set to a key in the "
"result dict.".format(self._checkpoint_score_attr)
)
return
if len(self._best_checkpoints) < self.keep_checkpoints_num:
heapq.heappush(self._best_checkpoints, queue_item)
self._membership.add(checkpoint)
elif queue_item.priority >= self._best_checkpoints[0].priority:
worst = heapq.heappushpop(self._best_checkpoints, queue_item).value
self._membership.add(checkpoint)
if worst in self._membership:
self._membership.remove(worst)
# Don't delete the newest checkpoint. It will be deleted on the
# next on_checkpoint() call since it isn't in self._membership.
if worst != checkpoint:
self.delete(worst)
|
def on_checkpoint(self, checkpoint):
"""Starts tracking checkpoint metadata on checkpoint.
Sets the newest checkpoint. For PERSISTENT checkpoints: Deletes
previous checkpoint as long as it isn't one of the best ones. Also
deletes the worst checkpoint if at capacity.
Args:
checkpoint (Checkpoint): Trial state checkpoint.
"""
if checkpoint.storage == Checkpoint.MEMORY:
self.newest_memory_checkpoint = checkpoint
return
old_checkpoint = self.newest_persistent_checkpoint
self.newest_persistent_checkpoint = checkpoint
# Remove the old checkpoint if it isn't one of the best ones.
if old_checkpoint.value and old_checkpoint not in self._membership:
self.delete(old_checkpoint)
try:
queue_item = QueueItem(self._priority(checkpoint), checkpoint)
except KeyError:
logger.error(
"Result dict has no key: {}. "
"checkpoint_score_attr must be set to a key in the "
"result dict.".format(self._checkpoint_score_attr)
)
return
if len(self._best_checkpoints) < self.keep_checkpoints_num:
heapq.heappush(self._best_checkpoints, queue_item)
self._membership.add(checkpoint)
elif queue_item.priority >= self._best_checkpoints[0].priority:
worst = heapq.heappushpop(self._best_checkpoints, queue_item).value
self._membership.add(checkpoint)
if worst in self._membership:
self._membership.remove(worst)
# Don't delete the newest checkpoint. It will be deleted on the
# next on_checkpoint() call since it isn't in self._membership.
if worst != checkpoint:
self.delete(worst)
|
https://github.com/ray-project/ray/issues/9036
|
Failure # 1 (occurred at 2020-06-19_11-26-36)
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 294, in start_trial
self._start_trial(trial, checkpoint, train=train)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 235, in _start_trial
self.restore(trial, checkpoint)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 673, in restore
data_dict = TrainableUtil.pickle_checkpoint(value)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 62, in pickle_checkpoint
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 87, in find_checkpoint_dir
raise FileNotFoundError("Path does not exist", checkpoint_path)
FileNotFoundError: [Errno Path does not exist] /content/TRASH_TUNE_PBT_oversampling_mimic_densenet121/TUNE_Model_0_2020-06-19_11-24-215xncry9c/checkpoint_6/
|
FileNotFoundError
|
def __init__(self, controller_handle, sync: bool):
self.controller_handle = controller_handle
self.sync = sync
self.router = Router(controller_handle)
if sync:
self.async_loop = create_or_get_async_loop_in_thread()
asyncio.run_coroutine_threadsafe(
self.router.setup_in_async_loop(),
self.async_loop,
)
else:
self.async_loop = asyncio.get_event_loop()
self.async_loop.create_task(self.router.setup_in_async_loop())
|
def __init__(self, controller_handle, sync: bool):
self.router = Router(controller_handle)
if sync:
self.async_loop = create_or_get_async_loop_in_thread()
asyncio.run_coroutine_threadsafe(
self.router.setup_in_async_loop(),
self.async_loop,
)
else:
self.async_loop = asyncio.get_event_loop()
self.async_loop.create_task(self.router.setup_in_async_loop())
|
https://github.com/ray-project/ray/issues/13180
|
2021-01-04 21:15:59,360 INFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265
(pid=6369) 2021-01-04 21:16:01,432 INFO controller.py:346 -- Starting router with name 'hRhwaS:SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0' on node 'node:192.168.31.141-0' listening on '127.0.0.1:8000'
(pid=6366) INFO: Started server process [6366]
Traceback (most recent call last):
File "a.py", line 11, in <module>
ray.put(handle)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 1411, in put
object_ref = worker.put_object(value, pin_object=True)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 262, in put_object
serialized_value = self.get_serialization_context().serialize(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 406, in serialize
return self._serialize_to_msgpack(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 386, in _serialize_to_msgpack
self._serialize_to_pickle5(metadata, python_objects)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 346, in _serialize_to_pickle5
raise e
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 343, in _serialize_to_pickle5
value, protocol=5, buffer_callback=writer.buffer_callback)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 563, in dump
return Pickler.dump(self, obj)
File "stringsource", line 2, in ray._raylet.Count.__reduce_cython__
TypeError: self.c_tag_keys,self.metric cannot be converted to a Python object for pickling
(pid=6369) 2021-01-04 21:16:03,924 INFO controller.py:753 -- Registering route 'f' to endpoint 'f' with methods '['GET']'.
2021-01-04 21:16:06,021 INFO api.py:65 -- Shutting down Ray Serve because client went out of scope. To prevent this, either keep a reference to the client object or use serve.start(detached=True).
|
TypeError
|
def __init__(
self,
router, # ThreadProxiedRouter
endpoint_name,
handle_options: Optional[HandleOptions] = None,
):
self.router = router
self.endpoint_name = endpoint_name
self.handle_options = handle_options or HandleOptions()
|
def __init__(
self, router: Router, endpoint_name, handle_options: Optional[HandleOptions] = None
):
self.router = router
self.endpoint_name = endpoint_name
self.handle_options = handle_options or HandleOptions()
|
https://github.com/ray-project/ray/issues/13180
|
2021-01-04 21:15:59,360 INFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265
(pid=6369) 2021-01-04 21:16:01,432 INFO controller.py:346 -- Starting router with name 'hRhwaS:SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0' on node 'node:192.168.31.141-0' listening on '127.0.0.1:8000'
(pid=6366) INFO: Started server process [6366]
Traceback (most recent call last):
File "a.py", line 11, in <module>
ray.put(handle)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 1411, in put
object_ref = worker.put_object(value, pin_object=True)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 262, in put_object
serialized_value = self.get_serialization_context().serialize(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 406, in serialize
return self._serialize_to_msgpack(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 386, in _serialize_to_msgpack
self._serialize_to_pickle5(metadata, python_objects)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 346, in _serialize_to_pickle5
raise e
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 343, in _serialize_to_pickle5
value, protocol=5, buffer_callback=writer.buffer_callback)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 563, in dump
return Pickler.dump(self, obj)
File "stringsource", line 2, in ray._raylet.Count.__reduce_cython__
TypeError: self.c_tag_keys,self.metric cannot be converted to a Python object for pickling
(pid=6369) 2021-01-04 21:16:03,924 INFO controller.py:753 -- Registering route 'f' to endpoint 'f' with methods '['GET']'.
2021-01-04 21:16:06,021 INFO api.py:65 -- Shutting down Ray Serve because client went out of scope. To prevent this, either keep a reference to the client object or use serve.start(detached=True).
|
TypeError
|
async def remote(self, request_data: Optional[Union[Dict, Any]] = None, **kwargs):
"""Issue an asynchronous request to the endpoint.
Returns a Ray ObjectRef whose results can be waited for or retrieved
using ray.wait or ray.get (or ``await object_ref``), respectively.
Returns:
ray.ObjectRef
Args:
request_data(dict, Any): If it's a dictionary, the data will be
available in ``request.json()`` or ``request.form()``.
Otherwise, it will be available in ``request.body()``.
``**kwargs``: All keyword arguments will be available in
``request.query_params``.
"""
return await self.router._remote(
self.endpoint_name, self.handle_options, request_data, kwargs
)
|
async def remote(self, request_data: Optional[Union[Dict, Any]] = None, **kwargs):
"""Issue an asynchrounous request to the endpoint.
Returns a Ray ObjectRef whose results can be waited for or retrieved
using ray.wait or ray.get (or ``await object_ref``), respectively.
Returns:
ray.ObjectRef
Args:
request_data(dict, Any): If it's a dictionary, the data will be
available in ``request.json()`` or ``request.form()``.
Otherwise, it will be available in ``request.body()``.
``**kwargs``: All keyword arguments will be available in
``request.query_params``.
"""
return await self.router._remote(
self.endpoint_name, self.handle_options, request_data, kwargs
)
|
https://github.com/ray-project/ray/issues/13180
|
2021-01-04 21:15:59,360 INFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265
(pid=6369) 2021-01-04 21:16:01,432 INFO controller.py:346 -- Starting router with name 'hRhwaS:SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0' on node 'node:192.168.31.141-0' listening on '127.0.0.1:8000'
(pid=6366) INFO: Started server process [6366]
Traceback (most recent call last):
File "a.py", line 11, in <module>
ray.put(handle)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 1411, in put
object_ref = worker.put_object(value, pin_object=True)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/worker.py", line 262, in put_object
serialized_value = self.get_serialization_context().serialize(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 406, in serialize
return self._serialize_to_msgpack(value)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 386, in _serialize_to_msgpack
self._serialize_to_pickle5(metadata, python_objects)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 346, in _serialize_to_pickle5
raise e
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/serialization.py", line 343, in _serialize_to_pickle5
value, protocol=5, buffer_callback=writer.buffer_callback)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "/Users/simonmo/miniconda3/envs/py37/lib/python3.7/site-packages/ray/cloudpickle/cloudpickle_fast.py", line 563, in dump
return Pickler.dump(self, obj)
File "stringsource", line 2, in ray._raylet.Count.__reduce_cython__
TypeError: self.c_tag_keys,self.metric cannot be converted to a Python object for pickling
(pid=6369) 2021-01-04 21:16:03,924 INFO controller.py:753 -- Registering route 'f' to endpoint 'f' with methods '['GET']'.
2021-01-04 21:16:06,021 INFO api.py:65 -- Shutting down Ray Serve because client went out of scope. To prevent this, either keep a reference to the client object or use serve.start(detached=True).
|
TypeError
|
def __init__(
self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
options = (("grpc.enable_http_proxy", 0),)
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}", options=options
)
self.http_session = None
|
def __init__(
self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
# TODO(edoakes): RAY_RAYLET_PID isn't properly set on Windows. This is
# only used for fate-sharing with the raylet and we need a different
# fate-sharing mechanism for Windows anyways.
if sys.platform not in ["win32", "cygwin"]:
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
options = (("grpc.enable_http_proxy", 0),)
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}", options=options
)
self.http_session = None
|
https://github.com/ray-project/ray/issues/13199
|
2021-01-05T07:27:22.1039161Z 2021-01-05 07:26:40,486 WARNING worker.py:1044 -- The agent on node fv-az68-689 failed with the following error:
2021-01-05T07:27:22.1039976Z Traceback (most recent call last):
2021-01-05T07:27:22.1040684Z File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 311, in <module>
2021-01-05T07:27:22.1041457Z loop.run_until_complete(agent.run())
2021-01-05T07:27:22.1043175Z File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 587, in run_until_complete
2021-01-05T07:27:22.1044233Z return future.result()
2021-01-05T07:27:22.1044986Z File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 187, in run
2021-01-05T07:27:22.1045759Z await asyncio.gather(check_parent_task,
2021-01-05T07:27:22.1046632Z UnboundLocalError: local variable 'check_parent_task' referenced before assignment
2021-01-05T07:27:22.1047247Z
2021-01-05T07:27:22.1047797Z �[2m�[36m(pid=None)�[0m Traceback (most recent call last):
2021-01-05T07:27:22.1048322Z
2021-01-05T07:27:22.1049013Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 322, in <module>
2021-01-05T07:27:22.1049555Z
2021-01-05T07:27:22.1050018Z �[2m�[36m(pid=None)�[0m raise e
2021-01-05T07:27:22.1050352Z
2021-01-05T07:27:22.1050980Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 311, in <module>
2021-01-05T07:27:22.1051524Z
2021-01-05T07:27:22.1052129Z �[2m�[36m(pid=None)�[0m loop.run_until_complete(agent.run())
2021-01-05T07:27:22.1052559Z
2021-01-05T07:27:22.1053390Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 587, in run_until_complete
2021-01-05T07:27:22.1054078Z
2021-01-05T07:27:22.1054625Z �[2m�[36m(pid=None)�[0m return future.result()
2021-01-05T07:27:22.1055026Z
2021-01-05T07:27:22.1055681Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 187, in run
2021-01-05T07:27:22.1056156Z
2021-01-05T07:27:22.1056751Z �[2m�[36m(pid=None)�[0m await asyncio.gather(check_parent_task,
2021-01-05T07:27:22.1057213Z
2021-01-05T07:27:22.1058008Z �[2m�[36m(pid=None)�[0m UnboundLocalError: local variable 'check_parent_task' referenced before assignment
2021-01-05T07:27:22.1058653Z
2021-01-05T07:27:22.1059146Z �[2m�[36m(pid=None)�[0m --- Logging error ---
2021-01-05T07:27:22.1059512Z
2021-01-05T07:27:22.1060051Z �[2m�[36m(pid=None)�[0m Traceback (most recent call last):
2021-01-05T07:27:22.1060458Z
2021-01-05T07:27:22.1061184Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\handlers.py", line 69, in emit
2021-01-05T07:27:22.1072363Z
2021-01-05T07:27:22.1073081Z �[2m�[36m(pid=None)�[0m if self.shouldRollover(record):
2021-01-05T07:27:22.1073557Z
2021-01-05T07:27:22.1074417Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\handlers.py", line 183, in shouldRollover
2021-01-05T07:27:22.1075116Z
2021-01-05T07:27:22.1075651Z �[2m�[36m(pid=None)�[0m self.stream = self._open()
2021-01-05T07:27:22.1076051Z
2021-01-05T07:27:22.1076745Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\__init__.py", line 1116, in _open
2021-01-05T07:27:22.1077345Z
2021-01-05T07:27:22.1078106Z �[2m�[36m(pid=None)�[0m return open(self.baseFilename, self.mode, encoding=self.encoding)
2021-01-05T07:27:22.1078730Z
2021-01-05T07:27:22.1079292Z �[2m�[36m(pid=None)�[0m NameError: name 'open' is not defined
2021-01-05T07:27:22.1079708Z
2021-01-05T07:27:22.1080172Z �[2m�[36m(pid=None)�[0m Call stack:
2021-01-05T07:27:22.1080503Z
2021-01-05T07:27:22.1082589Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\site-packages\aiohttp\client.py", line 320, in __del__
2021-01-05T07:27:22.1083331Z
2021-01-05T07:27:22.1083952Z �[2m�[36m(pid=None)�[0m self._loop.call_exception_handler(context)
2021-01-05T07:27:22.1084419Z
2021-01-05T07:27:22.1085281Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 1645, in call_exception_handler
2021-01-05T07:27:22.1085985Z
2021-01-05T07:27:22.1086595Z �[2m�[36m(pid=None)�[0m self.default_exception_handler(context)
2021-01-05T07:27:22.1087071Z
2021-01-05T07:27:22.1087942Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 1619, in default_exception_handler
2021-01-05T07:27:22.1088655Z
2021-01-05T07:27:22.1089225Z �[2m�[36m(pid=None)�[0m logger.error('\n'.join(log_lines), exc_info=exc_info)
2021-01-05T07:27:22.1089710Z
2021-01-05T07:27:22.1090674Z �[2m�[36m(pid=None)�[0m Message: 'Unclosed client session\nclient_session: <aiohttp.client.ClientSession object at 0x000002699DFEAE88>'
2021-01-05T07:27:22.1091493Z
2021-01-05T07:27:22.1091965Z �[2m�[36m(pid=None)�[0m Arguments: ()
|
UnboundLocalError
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
await asyncio.gather(check_parent_task, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
if sys.platform not in ["win32", "cygwin"]:
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
await asyncio.gather(check_parent_task, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
https://github.com/ray-project/ray/issues/13199
|
2021-01-05T07:27:22.1039161Z 2021-01-05 07:26:40,486 WARNING worker.py:1044 -- The agent on node fv-az68-689 failed with the following error:
2021-01-05T07:27:22.1039976Z Traceback (most recent call last):
2021-01-05T07:27:22.1040684Z File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 311, in <module>
2021-01-05T07:27:22.1041457Z loop.run_until_complete(agent.run())
2021-01-05T07:27:22.1043175Z File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 587, in run_until_complete
2021-01-05T07:27:22.1044233Z return future.result()
2021-01-05T07:27:22.1044986Z File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 187, in run
2021-01-05T07:27:22.1045759Z await asyncio.gather(check_parent_task,
2021-01-05T07:27:22.1046632Z UnboundLocalError: local variable 'check_parent_task' referenced before assignment
2021-01-05T07:27:22.1047247Z
2021-01-05T07:27:22.1047797Z �[2m�[36m(pid=None)�[0m Traceback (most recent call last):
2021-01-05T07:27:22.1048322Z
2021-01-05T07:27:22.1049013Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 322, in <module>
2021-01-05T07:27:22.1049555Z
2021-01-05T07:27:22.1050018Z �[2m�[36m(pid=None)�[0m raise e
2021-01-05T07:27:22.1050352Z
2021-01-05T07:27:22.1050980Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 311, in <module>
2021-01-05T07:27:22.1051524Z
2021-01-05T07:27:22.1052129Z �[2m�[36m(pid=None)�[0m loop.run_until_complete(agent.run())
2021-01-05T07:27:22.1052559Z
2021-01-05T07:27:22.1053390Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 587, in run_until_complete
2021-01-05T07:27:22.1054078Z
2021-01-05T07:27:22.1054625Z �[2m�[36m(pid=None)�[0m return future.result()
2021-01-05T07:27:22.1055026Z
2021-01-05T07:27:22.1055681Z �[2m�[36m(pid=None)�[0m File "d:\a\ray\ray\python\ray\new_dashboard/agent.py", line 187, in run
2021-01-05T07:27:22.1056156Z
2021-01-05T07:27:22.1056751Z �[2m�[36m(pid=None)�[0m await asyncio.gather(check_parent_task,
2021-01-05T07:27:22.1057213Z
2021-01-05T07:27:22.1058008Z �[2m�[36m(pid=None)�[0m UnboundLocalError: local variable 'check_parent_task' referenced before assignment
2021-01-05T07:27:22.1058653Z
2021-01-05T07:27:22.1059146Z �[2m�[36m(pid=None)�[0m --- Logging error ---
2021-01-05T07:27:22.1059512Z
2021-01-05T07:27:22.1060051Z �[2m�[36m(pid=None)�[0m Traceback (most recent call last):
2021-01-05T07:27:22.1060458Z
2021-01-05T07:27:22.1061184Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\handlers.py", line 69, in emit
2021-01-05T07:27:22.1072363Z
2021-01-05T07:27:22.1073081Z �[2m�[36m(pid=None)�[0m if self.shouldRollover(record):
2021-01-05T07:27:22.1073557Z
2021-01-05T07:27:22.1074417Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\handlers.py", line 183, in shouldRollover
2021-01-05T07:27:22.1075116Z
2021-01-05T07:27:22.1075651Z �[2m�[36m(pid=None)�[0m self.stream = self._open()
2021-01-05T07:27:22.1076051Z
2021-01-05T07:27:22.1076745Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\logging\__init__.py", line 1116, in _open
2021-01-05T07:27:22.1077345Z
2021-01-05T07:27:22.1078106Z �[2m�[36m(pid=None)�[0m return open(self.baseFilename, self.mode, encoding=self.encoding)
2021-01-05T07:27:22.1078730Z
2021-01-05T07:27:22.1079292Z �[2m�[36m(pid=None)�[0m NameError: name 'open' is not defined
2021-01-05T07:27:22.1079708Z
2021-01-05T07:27:22.1080172Z �[2m�[36m(pid=None)�[0m Call stack:
2021-01-05T07:27:22.1080503Z
2021-01-05T07:27:22.1082589Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\site-packages\aiohttp\client.py", line 320, in __del__
2021-01-05T07:27:22.1083331Z
2021-01-05T07:27:22.1083952Z �[2m�[36m(pid=None)�[0m self._loop.call_exception_handler(context)
2021-01-05T07:27:22.1084419Z
2021-01-05T07:27:22.1085281Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 1645, in call_exception_handler
2021-01-05T07:27:22.1085985Z
2021-01-05T07:27:22.1086595Z �[2m�[36m(pid=None)�[0m self.default_exception_handler(context)
2021-01-05T07:27:22.1087071Z
2021-01-05T07:27:22.1087942Z �[2m�[36m(pid=None)�[0m File "C:\hostedtoolcache\windows\Python\3.7.9\x64\lib\asyncio\base_events.py", line 1619, in default_exception_handler
2021-01-05T07:27:22.1088655Z
2021-01-05T07:27:22.1089225Z �[2m�[36m(pid=None)�[0m logger.error('\n'.join(log_lines), exc_info=exc_info)
2021-01-05T07:27:22.1089710Z
2021-01-05T07:27:22.1090674Z �[2m�[36m(pid=None)�[0m Message: 'Unclosed client session\nclient_session: <aiohttp.client.ClientSession object at 0x000002699DFEAE88>'
2021-01-05T07:27:22.1091493Z
2021-01-05T07:27:22.1091965Z �[2m�[36m(pid=None)�[0m Arguments: ()
|
UnboundLocalError
|
def is_connected(self) -> bool:
if self.client_worker is None:
return False
return self.client_worker.is_connected()
|
def is_connected(self) -> bool:
return self.client_worker is not None
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def __init__(self, channel: "grpc._channel.Channel", client_id: str, metadata: list):
"""Initializes a thread-safe datapath over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
client_id: the generated ID representing this client
metadata: metadata to pass to gRPC requests
"""
self.channel = channel
self.request_queue = queue.Queue()
self.data_thread = self._start_datathread()
self.ready_data: Dict[int, Any] = {}
self.cv = threading.Condition()
self._req_id = 0
self._client_id = client_id
self._metadata = metadata
self._in_shutdown = False
self.data_thread.start()
|
def __init__(self, channel: "grpc._channel.Channel", client_id: str, metadata: list):
"""Initializes a thread-safe datapath over a Ray Client gRPC channel.
Args:
channel: connected gRPC channel
client_id: the generated ID representing this client
metadata: metadata to pass to gRPC requests
"""
self.channel = channel
self.request_queue = queue.Queue()
self.data_thread = self._start_datathread()
self.ready_data: Dict[int, Any] = {}
self.cv = threading.Condition()
self._req_id = 0
self._client_id = client_id
self._metadata = metadata
self.data_thread.start()
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def _data_main(self) -> None:
stub = ray_client_pb2_grpc.RayletDataStreamerStub(self.channel)
resp_stream = stub.Datapath(
iter(self.request_queue.get, None),
metadata=[("client_id", self._client_id)] + self._metadata,
wait_for_ready=True,
)
try:
for response in resp_stream:
if response.req_id == 0:
# This is not being waited for.
logger.debug(f"Got unawaited response {response}")
continue
with self.cv:
self.ready_data[response.req_id] = response
self.cv.notify_all()
except grpc.RpcError as e:
with self.cv:
self._in_shutdown = True
self.cv.notify_all()
if e.code() == grpc.StatusCode.CANCELLED:
# Gracefully shutting down
logger.info("Cancelling data channel")
elif e.code() == grpc.StatusCode.UNAVAILABLE:
# TODO(barakmich): The server may have
# dropped. In theory, we can retry, as per
# https://grpc.github.io/grpc/core/md_doc_statuscodes.html but
# in practice we may need to think about the correct semantics
# here.
logger.info("Server disconnected from data channel")
else:
logger.error(f"Got Error from data channel -- shutting down: {e}")
raise e
|
def _data_main(self) -> None:
stub = ray_client_pb2_grpc.RayletDataStreamerStub(self.channel)
resp_stream = stub.Datapath(
iter(self.request_queue.get, None),
metadata=[("client_id", self._client_id)] + self._metadata,
wait_for_ready=True,
)
try:
for response in resp_stream:
if response.req_id == 0:
# This is not being waited for.
logger.debug(f"Got unawaited response {response}")
continue
with self.cv:
self.ready_data[response.req_id] = response
self.cv.notify_all()
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED == e.code():
# Gracefully shutting down
logger.info("Cancelling data channel")
else:
logger.error(f"Got Error from data channel -- shutting down: {e}")
raise e
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def _blocking_send(
self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(lambda: req_id in self.ready_data or self._in_shutdown)
if self._in_shutdown:
raise ConnectionError(
f"cannot send request {req}: data channel shutting down"
)
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
|
def _blocking_send(
self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(lambda: req_id in self.ready_data)
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata
)
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.CANCELLED:
# Graceful shutdown. We've cancelled our own connection.
logger.info("Cancelling logs channel")
elif e.code() == grpc.StatusCode.UNAVAILABLE:
# TODO(barakmich): The server may have
# dropped. In theory, we can retry, as per
# https://grpc.github.io/grpc/core/md_doc_statuscodes.html but
# in practice we may need to think about the correct semantics
# here.
logger.info("Server disconnected from logs channel")
else:
# Some other, unhandled, gRPC error
logger.error(f"Got Error from logger channel -- shutting down: {e}")
raise e
|
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata
)
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED != e.code():
# Not just shutting down normally
logger.error(f"Got Error from logger channel -- shutting down: {e}")
raise e
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def __init__(
self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._conn_state = grpc.ChannelConnectivity.IDLE
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
self.channel.subscribe(self._on_channel_state_change)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info(f"Ray client server unavailable, retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info(
f"Waiting for Ray to become ready on the server, retry in {timeout}s..."
)
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id, self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
def __init__(
self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info(f"Ray client server unavailable, retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info(
f"Waiting for Ray to become ready on the server, retry in {timeout}s..."
)
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id, self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
https://github.com/ray-project/ray/issues/13353
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def translate(
configuration: Dict[str, Any], dictionary: Dict[str, str]
) -> Dict[str, Any]:
return {
dictionary[field]: configuration[field]
for field in dictionary
if field in configuration
}
|
def translate(
configuration: Dict[str, Any], dictionary: Dict[str, str]
) -> Dict[str, Any]:
return {dictionary[field]: configuration[field] for field in dictionary}
|
https://github.com/ray-project/ray/issues/13667
|
Traceback (most recent call last):
File "/home/ray/anaconda3/bin/ray-operator", line 8, in <module>
sys.exit(main())
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator.py", line 123, in main
cluster_config = operator_utils.cr_to_config(cluster_cr)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 62, in cr_to_config
config["available_node_types" = get_node_types(cluster_resource)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 76, in get_node_types
pod_type_copy, dictionary=NODE_TYPE_FIELDS)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in translate
return {dictionary[field: configuration[field for field in dictionary}
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in <dictcomp>
return {dictionary[field: configuration[field for field in dictionary}
KeyError: 'minWorkers'
stream closed
|
KeyError
|
def __init__(
self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info(f"Ray client server unavailable, retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info(
f"Waiting for Ray to become ready on the server, retry in {timeout}s..."
)
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id, self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
def __init__(
self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3,
):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
while conn_attempts < connection_retries + 1:
conn_attempts += 1
try:
grpc.channel_ready_future(self.channel).result(timeout=timeout)
break
except grpc.FutureTimeoutError:
if conn_attempts >= connection_retries:
raise ConnectionError("ray client connection timeout")
logger.info(f"Couldn't connect in {timeout} seconds, retrying")
timeout = timeout + 5
if timeout > MAX_TIMEOUT_SEC:
timeout = MAX_TIMEOUT_SEC
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
self.data_client = DataClient(self.channel, self._client_id, self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
https://github.com/ray-project/ray/issues/13446
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.741104491","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-5:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def __init__(
self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None
):
if len(name) == 0:
raise ValueError("Empty name is not allowed. Please provide a metric name.")
self._name = name
self._description = description
# We don't specify unit because it won't be
# exported to Prometheus anyway.
self._unit = ""
# The default tags key-value pair.
self._default_tags = {}
# Keys of tags.
self._tag_keys = tag_keys or tuple()
# The Cython metric class. This should be set in the child class.
self._metric = None
if not isinstance(self._tag_keys, tuple):
raise TypeError(f"tag_keys should be a tuple type, got: {type(self._tag_keys)}")
for key in self._tag_keys:
if not isinstance(key, str):
raise TypeError(f"Tag keys must be str, got {type(key)}.")
|
def __init__(
self, name: str, description: str = "", tag_keys: Optional[Tuple[str]] = None
):
if len(name) == 0:
raise ValueError("Empty name is not allowed. Please provide a metric name.")
self._name = name
self._description = description
# We don't specify unit because it won't be
# exported to Prometheus anyway.
self._unit = ""
# The default tags key-value pair.
self._default_tags = {}
# Keys of tags.
self._tag_keys = tag_keys or tuple()
# The Cython metric class. This should be set in the child class.
self._metric = None
if not isinstance(self._tag_keys, tuple):
raise ValueError(
f"tag_keys should be a tuple type, got: {type(self._tag_keys)}"
)
|
https://github.com/ray-project/ray/issues/13419
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def set_default_tags(self, default_tags: Dict[str, str]):
"""Set default tags of metrics.
Example:
>>> # Note that set_default_tags returns the instance itself.
>>> counter = Counter("name")
>>> counter2 = counter.set_default_tags({"a": "b"})
>>> assert counter is counter2
>>> # this means you can instantiate it in this way.
>>> counter = Counter("name").set_default_tags({"a": "b"})
Args:
default_tags(dict): Default tags that are
used for every record method.
Returns:
Metric: it returns the instance itself.
"""
for key, val in default_tags.items():
if key not in self._tag_keys:
raise ValueError(f"Unrecognized tag key {key}.")
if not isinstance(val, str):
raise TypeError(f"Tag values must be str, got {type(val)}.")
self._default_tags = default_tags
return self
|
def set_default_tags(self, default_tags: Dict[str, str]):
"""Set default tags of metrics.
Example:
>>> # Note that set_default_tags returns the instance itself.
>>> counter = Counter("name")
>>> counter2 = counter.set_default_tags({"a": "b"})
>>> assert counter is counter2
>>> # this means you can instantiate it in this way.
>>> counter = Counter("name").set_default_tags({"a": "b"})
Args:
default_tags(dict): Default tags that are
used for every record method.
Returns:
Metric: it returns the instance itself.
"""
self._default_tags = default_tags
return self
|
https://github.com/ray-project/ray/issues/13419
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def record(self, value: float, tags: dict = None) -> None:
"""Record the metric point of the metric.
Args:
value(float): The value to be recorded as a metric point.
"""
assert self._metric is not None
if tags is not None:
for val in tags.values():
if not isinstance(val, str):
raise TypeError(f"Tag values must be str, got {type(val)}.")
default_tag_copy = self._default_tags.copy()
default_tag_copy.update(tags or {})
self._metric.record(value, tags=default_tag_copy)
|
def record(self, value: float, tags: dict = None) -> None:
"""Record the metric point of the metric.
Args:
value(float): The value to be recorded as a metric point.
"""
assert self._metric is not None
default_tag_copy = self._default_tags.copy()
default_tag_copy.update(tags or {})
self._metric.record(value, tags=default_tag_copy)
|
https://github.com/ray-project/ray/issues/13419
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def postprocess_advantages(
policy, sample_batch, other_agent_batches=None, episode=None
):
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.a3c.a3c_tf_policy.postprocess_advantages",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False,
)
return compute_gae_for_sample_batch(
policy, sample_batch, other_agent_batches, episode
)
|
def postprocess_advantages(
policy, sample_batch, other_agent_batches=None, episode=None
):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append(sample_batch["state_out_{}".format(i)][-1])
last_r = policy._value(
sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state,
)
return compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
policy.config["use_gae"],
policy.config["use_critic"],
)
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy, obs_space, action_space, config)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
|
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def __init__(
self,
action_dist,
actions,
advantages,
v_target,
vf,
vf_loss_coeff=0.5,
entropy_coeff=0.01,
):
log_prob = action_dist.logp(actions)
# The "policy gradients" loss
self.pi_loss = -tf.reduce_sum(log_prob * advantages)
delta = vf - v_target
self.vf_loss = 0.5 * tf.reduce_sum(tf.math.square(delta))
self.entropy = tf.reduce_sum(action_dist.entropy())
self.total_loss = (
self.pi_loss + self.vf_loss * vf_loss_coeff - self.entropy * entropy_coeff
)
|
def __init__(self):
@make_tf_callable(self.get_session())
def value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model(
{
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor([prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([prev_reward]),
"is_training": tf.convert_to_tensor(False),
},
[tf.convert_to_tensor([s]) for s in state],
tf.convert_to_tensor([1]),
)
return self.model.value_function()[0]
self._value = value
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def add_advantages(policy, sample_batch, other_agent_batches=None, episode=None):
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.a3c.a3c_torch_policy.add_advantages",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False,
)
return compute_gae_for_sample_batch(
policy, sample_batch, other_agent_batches, episode
)
|
def add_advantages(policy, sample_batch, other_agent_batches=None, episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1])
return compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
policy.config["use_gae"],
policy.config["use_critic"],
)
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def clip_gradients(
policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", loss: TensorType
) -> ModelGradients:
return minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_clip"],
)
|
def clip_gradients(
policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", loss: TensorType
) -> ModelGradients:
if policy.config["grad_clip"] is not None:
grads_and_vars = minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_clip"],
)
else:
grads_and_vars = optimizer.compute_gradients(loss, var_list=policy.q_func_vars)
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if g is not None]
return grads_and_vars
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def get_policy_class(config):
if config["framework"] == "torch":
if config["vtrace"]:
from ray.rllib.agents.impala.vtrace_torch_policy import VTraceTorchPolicy
return VTraceTorchPolicy
else:
from ray.rllib.agents.a3c.a3c_torch_policy import A3CTorchPolicy
return A3CTorchPolicy
else:
if config["vtrace"]:
return VTraceTFPolicy
else:
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
return A3CTFPolicy
|
def get_policy_class(config):
if config["framework"] == "torch":
if config["vtrace"]:
from ray.rllib.agents.impala.vtrace_torch_policy import VTraceTorchPolicy
return VTraceTorchPolicy
else:
from ray.rllib.agents.a3c.a3c_torch_policy import A3CTorchPolicy
return A3CTorchPolicy
else:
if config["vtrace"]:
return VTraceTFPolicy
else:
return A3CTFPolicy
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
if not policy.config["vtrace"]:
sample_batch = compute_gae_for_sample_batch(
policy, sample_batch, other_agent_batches, episode
)
# TODO: (sven) remove this del once we have trajectory view API fully in
# place.
del sample_batch.data["new_obs"] # not used, so save some bandwidth
return sample_batch
|
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
if not policy.config["vtrace"]:
sample_batch = postprocess_ppo_gae(
policy, sample_batch, other_agent_batches, episode
)
# TODO: (sven) remove this del once we have trajectory view API fully in
# place.
del sample_batch.data["new_obs"] # not used, so save some bandwidth
return sample_batch
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def postprocess_ppo_gae(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None,
) -> SampleBatch:
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.ppo.ppo_tf_policy.postprocess_ppo_gae",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False,
)
return compute_gae_for_sample_batch(
policy, sample_batch, other_agent_batches, episode
)
|
def postprocess_ppo_gae(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None,
) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
# Trajectory is actually complete -> last r=0.0.
if sample_batch[SampleBatch.DONES][-1]:
last_r = 0.0
# Trajectory has been truncated -> last r=VF estimate of last obs.
else:
# Input dict is provided to us automatically via the Model's
# requirements. It's a single-timestep (last one in trajectory)
# input_dict.
if policy.config["_use_trajectory_view_api"]:
# Create an input dict according to the Model's requirements.
input_dict = policy.model.get_input_dict(sample_batch, index="last")
last_r = policy._value(**input_dict)
# TODO: (sven) Remove once trajectory view API is all-algo default.
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append(sample_batch["state_out_{}".format(i)][-1])
last_r = policy._value(
sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state,
)
# Adds the policy logits, VF preds, and advantages to the batch,
# using GAE ("generalized advantage estimation") or not.
batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"],
use_critic=policy.config.get("use_critic", True),
)
return batch
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def gradients(self, optimizer, loss):
self.gvs = {
k: minimize_and_clip(
optimizer, self.losses[k], self.vars[k], self.config["grad_norm_clipping"]
)
for k, optimizer in self.optimizers.items()
}
return self.gvs["critic"] + self.gvs["actor"]
|
def gradients(self, optimizer, loss):
if self.config["grad_norm_clipping"] is not None:
self.gvs = {
k: minimize_and_clip(
optimizer,
self.losses[k],
self.vars[k],
self.config["grad_norm_clipping"],
)
for k, optimizer in self.optimizers.items()
}
else:
self.gvs = {
k: optimizer.compute_gradients(self.losses[k], self.vars[k])
for k, optimizer in self.optimizers.items()
}
return self.gvs["critic"] + self.gvs["actor"]
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def minimize_and_clip(optimizer, objective, var_list, clip_val=10.0):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
# Accidentally passing values < 0.0 will break all gradients.
assert clip_val is None or clip_val > 0.0, clip_val
if tf.executing_eagerly():
tape = optimizer.tape
grads_and_vars = list(zip(list(tape.gradient(objective, var_list)), var_list))
else:
grads_and_vars = optimizer.compute_gradients(objective, var_list=var_list)
return [
(tf.clip_by_norm(g, clip_val) if clip_val is not None else g, v)
for (g, v) in grads_and_vars
if g is not None
]
|
def minimize_and_clip(optimizer, objective, var_list, clip_val=10.0):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
# Accidentally passing values < 0.0 will break all gradients.
assert clip_val > 0.0, clip_val
if tf.executing_eagerly():
tape = optimizer.tape
grads_and_vars = list(zip(list(tape.gradient(objective, var_list)), var_list))
else:
grads_and_vars = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[i] = (tf.clip_by_norm(grad, clip_val), var)
return grads_and_vars
|
https://github.com/ray-project/ray/issues/9071
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def persistent_id(self, obj):
if isinstance(obj, ray.ObjectRef):
obj_id = obj.binary()
if obj_id not in self.server.object_refs[self.client_id]:
# We're passing back a reference, probably inside a reference.
# Let's hold onto it.
self.server.object_refs[self.client_id][obj_id] = obj
return PickleStub(
type="Object",
client_id=self.client_id,
ref_id=obj_id,
name=None,
baseline_options=None,
)
elif isinstance(obj, ray.actor.ActorHandle):
actor_id = obj._actor_id.binary()
if actor_id not in self.server.actor_refs:
# We're passing back a handle, probably inside a reference.
self.server.actor_refs[actor_id] = obj
if actor_id not in self.server.actor_owners[self.client_id]:
self.server.actor_owners[self.client_id].add(actor_id)
return PickleStub(
type="Actor",
client_id=self.client_id,
ref_id=obj._actor_id.binary(),
name=None,
baseline_options=None,
)
return None
|
def persistent_id(self, obj):
if isinstance(obj, ray.ObjectRef):
obj_id = obj.binary()
if obj_id not in self.server.object_refs[self.client_id]:
# We're passing back a reference, probably inside a reference.
# Let's hold onto it.
self.server.object_refs[self.client_id][obj_id] = obj
return PickleStub(
type="Object",
client_id=self.client_id,
ref_id=obj_id,
name=None,
baseline_options=None,
)
elif isinstance(obj, ray.actor.ActorHandle):
actor_id = obj._actor_id.binary()
if actor_id not in self.server.actor_refs:
# We're passing back a handle, probably inside a reference.
self.actor_refs[actor_id] = obj
if actor_id not in self.actor_owners[self.client_id]:
self.actor_owners[self.client_id].add(actor_id)
return PickleStub(
type="Actor",
client_id=self.client_id,
ref_id=obj._actor_id.binary(),
name=None,
baseline_options=None,
)
return None
|
https://github.com/ray-project/ray/issues/13463
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'"
debug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[
::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating response
s: 'ServerPickler' object has no attribute 'actor_refs'","grpc_status":2}"
Exception in thread Thread-5:
Traceback (most recent call last):
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 803, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'"
debug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'","grpc_status":2}"
^CTraceback (most recent call last):
File "test.py", line 17, in <module>
ray.get(a.get_actor_ref.remote())
File "/Users/eoakes/code/ray/python/ray/_private/client_mode_hook.py", line 46, in wrapper
return getattr(ray, func.__name__)(*args, **kwargs)
File "/Users/eoakes/code/ray/python/ray/util/client/api.py", line 32, in get
return self.worker.get(vals, timeout=timeout)
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in get
out = [self._get(x, timeout) for x in to_get]
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in <listcomp>
out = [self._get(x, timeout) for x in to_get]
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 115, in _get
data = self.data_client.GetObject(req)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 106, in GetObject
resp = self._blocking_send(datareq)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 91, in _blocking_send
self.cv.wait_for(lambda: req_id in self.ready_data)
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 337, in wait_for
self.wait(waittime)
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 302, in wait
waiter.acquire()
KeyboardInterrupt
|
debug_error
|
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
with disable_client_hook():
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
elif request.type == ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
with disable_client_hook():
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
elif request.type == ray_client_pb2.ClusterInfoType.RUNTIME_CONTEXT:
ctx = ray_client_pb2.ClusterInfoResponse.RuntimeContext()
with disable_client_hook():
rtc = ray.get_runtime_context()
ctx.job_id = rtc.job_id.binary()
ctx.node_id = rtc.node_id.binary()
ctx.capture_client_tasks = rtc.should_capture_child_tasks_in_placement_group
resp.runtime_context.CopyFrom(ctx)
else:
with disable_client_hook():
resp.json = self._return_debug_cluster_info(request, context)
return resp
|
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
with disable_client_hook():
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
elif request.type == ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
with disable_client_hook():
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(table=float_resources)
)
else:
with disable_client_hook():
resp.json = self._return_debug_cluster_info(request, context)
return resp
|
https://github.com/ray-project/ray/issues/13414
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().node_id.hex())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id
node_id = self.worker.current_node_id
File "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id
return self.core_worker.get_current_node_id()
AttributeError: 'Worker' object has no attribute 'core_worker'
|
AttributeError
|
def get_cluster_info(self, type: ray_client_pb2.ClusterInfoType.TypeEnum):
req = ray_client_pb2.ClusterInfoRequest()
req.type = type
resp = self.server.ClusterInfo(req, metadata=self.metadata)
if resp.WhichOneof("response_type") == "resource_table":
# translate from a proto map to a python dict
output_dict = {k: v for k, v in resp.resource_table.table.items()}
return output_dict
elif resp.WhichOneof("response_type") == "runtime_context":
return resp.runtime_context
return json.loads(resp.json)
|
def get_cluster_info(self, type: ray_client_pb2.ClusterInfoType.TypeEnum):
req = ray_client_pb2.ClusterInfoRequest()
req.type = type
resp = self.server.ClusterInfo(req, metadata=self.metadata)
if resp.WhichOneof("response_type") == "resource_table":
# translate from a proto map to a python dict
output_dict = {k: v for k, v in resp.resource_table.table.items()}
return output_dict
return json.loads(resp.json)
|
https://github.com/ray-project/ray/issues/13414
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().node_id.hex())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id
node_id = self.worker.current_node_id
File "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id
return self.core_worker.get_current_node_id()
AttributeError: 'Worker' object has no attribute 'core_worker'
|
AttributeError
|
def get(self):
"""Get a dictionary of the current context.
Fields that are not available (e.g., actor ID inside a task) won't be
included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
}
if self.worker.mode == ray.worker.WORKER_MODE:
if self.task_id is not None:
context["task_id"] = self.task_id
if self.actor_id is not None:
context["actor_id"] = self.actor_id
return context
|
def get(self):
"""Get a dictionary of the current_context.
For fields that are not available (for example actor id inside a task)
won't be included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
"task_id": self.task_id,
"actor_id": self.actor_id,
}
# Remove fields that are None.
return {key: value for key, value in context.items() if value is not None}
|
https://github.com/ray-project/ray/issues/13415
|
2021-01-13 14:15:29,011 INFO services.py:1169 -- View the Ray dashboard at http://127.0.0.1:8266
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().get())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 26, in get
"task_id": self.task_id,
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 92, in task_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
AssertionError: This method is only available when the process is a worker. Current mode: 0
|
AssertionError
|
def __init__(
self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
# TODO(edoakes): RAY_RAYLET_PID isn't properly set on Windows. This is
# only used for fate-sharing with the raylet and we need a different
# fate-sharing mechanism for Windows anyways.
if sys.platform not in ["win32", "cygwin"]:
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}"
)
self.http_session = None
|
def __init__(
self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None,
):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0),))
self.grpc_port = self.server.add_insecure_port(f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip, self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}"
)
self.http_session = None
|
https://github.com/ray-project/ray/issues/12947
|
(pid=None) Traceback (most recent call last):
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 317, in <module>
(pid=None) raise e
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 293, in <module>
(pid=None) agent = DashboardAgent(
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 65, in __init__
(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\os.py", line 675, in __getitem__
(pid=None) raise KeyError(key) from None
(pid=None) KeyError: 'RAY_RAYLET_PID'
|
KeyError
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
if sys.platform not in ["win32", "cygwin"]:
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
await asyncio.gather(check_parent_task, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if parent is None or parent.pid == 1 or self.ppid != parent.pid:
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS
)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address,
self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES,
)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: Failed to connect to redis at %s",
self.redis_address,
)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
},
)
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host, http_port)
# Dump registered http routes.
dump_routes = [r for r in app.router.routes() if r.method != hdrs.METH_HEAD]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]),
)
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel
)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(), agent_port=self.grpc_port, agent_ip_address=self.ip
)
)
await asyncio.gather(check_parent_task, *(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
https://github.com/ray-project/ray/issues/12947
|
(pid=None) Traceback (most recent call last):
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 317, in <module>
(pid=None) raise e
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 293, in <module>
(pid=None) agent = DashboardAgent(
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\site-packages\ray\new_dashboard/agent.py", line 65, in __init__
(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])
(pid=None) File "D:\Programs\Anaconda3\envs\ray\lib\os.py", line 675, in __getitem__
(pid=None) raise KeyError(key) from None
(pid=None) KeyError: 'RAY_RAYLET_PID'
|
KeyError
|
def __str__(self):
return (
"The worker died unexpectedly while executing this task. "
"Check python-core-worker-*.log files for more information."
)
|
def __str__(self):
return "The worker died unexpectedly while executing this task."
|
https://github.com/ray-project/ray/issues/11239
|
2020-10-05 01:55:09,393\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \u001b[36mray::PPO.train()\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def __str__(self):
return (
"The actor died unexpectedly before finishing this task. "
"Check python-core-worker-*.log files for more information."
)
|
def __str__(self):
return "The actor died unexpectedly before finishing this task."
|
https://github.com/ray-project/ray/issues/11239
|
2020-10-05 01:55:09,393\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \u001b[36mray::PPO.train()\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.shake_128(new_class_id).digest(ray_constants.ID_SIZE)
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
f"WARNING: Could not produce a deterministic class ID for class {cls}"
)
return hashlib.shake_128(new_class_id).digest(ray_constants.ID_SIZE)
|
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
f"WARNING: Could not produce a deterministic class ID for class {cls}"
)
return hashlib.sha1(new_class_id).digest()
|
https://github.com/ray-project/ray/issues/11239
|
2020-10-05 01:55:09,393\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \u001b[36mray::PPO.train()\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
|
def _random_string():
id_hash = hashlib.sha1()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest()
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
|
https://github.com/ray-project/ray/issues/11239
|
2020-10-05 01:55:09,393\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \u001b[36mray::PPO.train()\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def run_function_on_all_workers(self, function, run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE
)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__, "function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
},
)
self.redis_client.rpush("Exports", key)
|
def run_function_on_all_workers(self, function, run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__, "function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
},
)
self.redis_client.rpush("Exports", key)
|
https://github.com/ray-project/ray/issues/11239
|
2020-10-05 01:55:09,393\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \u001b[36mray::PPO.train()\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _home(self):
if self._home_cached is not None:
return self._home_cached
for _ in range(MAX_HOME_RETRIES - 1):
try:
self._home_cached = self._try_to_get_home()
return self._home_cached
except Exception:
# TODO (Dmitri): Identify the exception we're trying to avoid.
logger.info(
"Error reading container's home directory. "
f"Retrying in {HOME_RETRY_DELAY_S} seconds."
)
time.sleep(HOME_RETRY_DELAY_S)
# Last try
self._home_cached = self._try_to_get_home()
return self._home_cached
|
def _home(self):
# TODO (Dmitri): Think about how to use the node's HOME variable
# without making an extra kubectl exec call.
if self._home_cached is None:
cmd = self.kubectl + ["exec", "-it", self.node_id, "--", "printenv", "HOME"]
joined_cmd = " ".join(cmd)
raw_out = self.process_runner.check_output(joined_cmd, shell=True)
self._home_cached = raw_out.decode().strip("\n\r")
return self._home_cached
|
https://github.com/ray-project/ray/issues/12883
|
Updating cluster configuration. [hash=0194a452ebd82e0ab6eade7b4dd3a4f4f775d5de]
New status: syncing-files
[2/7] Processing file mounts
2020-12-15 09:03:24,116 INFO command_runner.py:169 -- NodeUpdater: ray-head-m5nvd: Running kubectl -n ray exec -it ray-head-m5nvd -- bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (mkdir -p ~)'
Error from server: error dialing backend: EOF
New status: update-failed
!!!
Setup command `kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME` failed with exit code 1. stderr:
!!!
Exception in thread Thread-1:
Traceback (most recent call last):
File "/Users/rliaw/miniconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 124, in run
self.do_update()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 312, in do_update
self.rsync_up, step_numbers=(1, NUM_SETUP_STEPS))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 210, in sync_file_mounts
do_sync(remote_path, local_path)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 198, in do_sync
local_path, remote_path, docker_mount_if_possible=True)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 446, in rsync_up
self.cmd_runner.run_rsync_up(source, target, options=options)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 197, in run_rsync_up
target = self._home + target[1:]
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 258, in _home
raw_out = self.process_runner.check_output(joined_cmd, shell=True)
File "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 395, in check_output
**kwargs).stdout
File "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 487, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command 'kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME' returned non-zero exit status 1.
Failed to setup head node.
|
subprocess.CalledProcessError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug(
"Automatically increasing RLIMIT_NOFILE to max value of {}".format(hard)
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
logger.debug(
"Automatically increasing RLIMIT_NOFILE to max value of {}".format(hard)
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
https://github.com/ray-project/ray/issues/12059
|
Traceback (most recent call last):
File "debugging.py", line 2, in <module>
ray.init()
File "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init
ray_params=ray_params)
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__
self.start_head_processes()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes
self.start_redis()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis
fate_share=self.kernel_fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis
fate_share=fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance
ulimit_n - redis_client_buffer)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set
return self.execute_command('CONFIG SET', name, value)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command
return self.parse_response(conn, command_name, **options)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response
response = connection.read_response()
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response
raise response
redis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33
|
redis.exceptions.ResponseError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug(
"Automatically increasing RLIMIT_NOFILE to max value of {}".format(hard)
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug(
"Automatically increasing RLIMIT_NOFILE to max value of {}".format(hard)
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
'please call ray.init() or ray.init(address="auto") on the '
"driver."
)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info("Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True, shutdown_at_exit=False, spawn_reaper=True, ray_params=ray_params
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _lru_evict:
raise ValueError(
"When connecting to an existing cluster, "
"_lru_evict must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config,
)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
https://github.com/ray-project/ray/issues/12059
|
Traceback (most recent call last):
File "debugging.py", line 2, in <module>
ray.init()
File "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init
ray_params=ray_params)
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__
self.start_head_processes()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes
self.start_redis()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis
fate_share=self.kernel_fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis
fate_share=fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance
ulimit_n - redis_client_buffer)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set
return self.execute_command('CONFIG SET', name, value)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command
return self.parse_response(conn, command_name, **options)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response
response = connection.read_response()
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response
raise response
redis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33
|
redis.exceptions.ResponseError
|
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if (not self._shutdown) and ray.is_initialized():
ray.get(self._controller.shutdown.remote())
ray.kill(self._controller, no_restart=True)
# Wait for the named actor entry gets removed as well.
started = time.time()
while True:
try:
ray.get_actor(self._controller_name)
if time.time() - started > 5:
logger.warning(
"Waited 5s for Serve to shutdown gracefully but "
"the controller is still not cleaned up. "
"You can ignore this warning if you are shutting "
"down the Ray cluster."
)
break
except ValueError: # actor name is removed
break
self._shutdown = True
|
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if not self._shutdown:
ray.get(self._controller.shutdown.remote())
ray.kill(self._controller, no_restart=True)
self._shutdown = True
|
https://github.com/ray-project/ray/issues/12214
|
File descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with 'ulimit -n 8192'
2020-11-20 10:38:27,065 INFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265
(pid=raylet) 2020-11-20 10:38:29,628 INFO controller.py:313 -- Starting router with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0' on node 'node:192.168.31.141-0' listening on '127.0.0.1:8000'
(pid=32847) INFO: Started server process [32847]
Traceback (most recent call last):
File "detached.py", line 5, in <module>
client = serve.start(detached=True)
File "/Users/simonmo/Desktop/ray/ray/python/ray/serve/api.py", line 414, in start
raise RayServeException("Called serve.start(detached=True) but a "
ray.serve.exceptions.RayServeException: Called serve.start(detached=True) but a detached instance is already running. Please use serve.connect() to connect to the running instance instead.
|
ray.serve.exceptions.RayServeException
|
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[
[Policy, ModelV2, Type[TFActionDistribution], SampleBatch], TensorType
],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[str, TensorType]]] = None,
grad_stats_fn: Optional[
Callable[[Policy, SampleBatch, ModelGradients], Dict[str, TensorType]]
] = None,
before_loss_init: Optional[
Callable[[Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], None]
] = None,
make_model: Optional[
Callable[
[Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], ModelV2
]
] = None,
action_sampler_fn: Optional[
Callable[[TensorType, List[TensorType]], Tuple[TensorType, TensorType]]
] = None,
action_distribution_fn: Optional[
Callable[
[Policy, ModelV2, TensorType, TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]],
]
] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None,
obs_include_prev_action_reward: bool = True,
):
"""Initialize a dynamic TF policy.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
obs_include_prev_action_reward (bool): Whether to include the
previous action and reward in the model input (default: True).
"""
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
dist_class = dist_inputs = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given"
)
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"]
)
# Setup self.model.
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf",
)
# Auto-update model's inference view requirements, if recurrent.
self._update_model_inference_view_requirements_from_init_state()
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items() if k.startswith("state_in_")
]
if self._state_inputs:
self._seq_lens = existing_inputs["seq_lens"]
else:
if self.config["_use_trajectory_view_api"]:
self._state_inputs = [
tf1.placeholder(shape=(None,) + vr.space.shape, dtype=vr.space.dtype)
for k, vr in self.model.inference_view_requirements.items()
if k[:9] == "state_in_"
]
else:
self._state_inputs = [
tf1.placeholder(shape=(None,) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.inference_view_requirements)
# Setup standard placeholders.
if existing_inputs is not None:
timestep = existing_inputs["timestep"]
explore = existing_inputs["is_exploring"]
self._input_dict, self._dummy_batch = self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs
)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = ModelCatalog.get_action_placeholder(
action_space, "prev_action"
)
if self.config["_use_trajectory_view_api"]:
self._input_dict, self._dummy_batch = self._get_input_dict_and_dummy_batch(
self.view_requirements,
{
SampleBatch.ACTIONS: action_ph,
SampleBatch.PREV_ACTIONS: prev_action_ph,
},
)
else:
self._input_dict = {
SampleBatch.CUR_OBS: tf1.placeholder(
tf.float32, shape=[None] + list(obs_space.shape), name="observation"
)
}
self._input_dict[SampleBatch.ACTIONS] = action_ph
if self._obs_include_prev_action_reward:
self._input_dict.update(
{
SampleBatch.PREV_ACTIONS: prev_action_ph,
SampleBatch.PREV_REWARDS: tf1.placeholder(
tf.float32, [None], name="prev_reward"
),
}
)
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep"
)
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(True, (), name="is_exploring")
# Placeholder for RNN time-chunk valid lengths.
self._seq_lens = tf1.placeholder(dtype=tf.int32, shape=[None], name="seq_lens")
# Placeholder for `is_training` flag.
self._input_dict["is_training"] = self._get_is_training_placeholder()
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"],
)
else:
# Distribution generation is customized, e.g., DQN, DDPG.
if action_distribution_fn:
dist_inputs, dist_class, self._state_out = action_distribution_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"],
)
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs, self._seq_lens
)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session()
batch_divisibility_req = (
get_batch_divisibility_req(self)
if callable(get_batch_divisibility_req)
else (get_batch_divisibility_req or 1)
)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep,
)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not existing_inputs:
self._initialize_loss_from_dummy_batch(auto_remove_unneeded_view_reqs=True)
|
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[
[Policy, ModelV2, Type[TFActionDistribution], SampleBatch], TensorType
],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[str, TensorType]]] = None,
grad_stats_fn: Optional[
Callable[[Policy, SampleBatch, ModelGradients], Dict[str, TensorType]]
] = None,
before_loss_init: Optional[
Callable[[Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], None]
] = None,
make_model: Optional[
Callable[
[Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict], ModelV2
]
] = None,
action_sampler_fn: Optional[
Callable[[TensorType, List[TensorType]], Tuple[TensorType, TensorType]]
] = None,
action_distribution_fn: Optional[
Callable[
[Policy, ModelV2, TensorType, TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]],
]
] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy], int]] = None,
obs_include_prev_action_reward: bool = True,
):
"""Initialize a dynamic TF policy.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
obs_include_prev_action_reward (bool): Whether to include the
previous action and reward in the model input (default: True).
"""
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
dist_class = dist_inputs = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given"
)
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"]
)
# Setup self.model.
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf",
)
# Auto-update model's inference view requirements, if recurrent.
self._update_model_inference_view_requirements_from_init_state()
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items() if k.startswith("state_in_")
]
if self._state_inputs:
self._seq_lens = existing_inputs["seq_lens"]
else:
if self.config["_use_trajectory_view_api"]:
self._state_inputs = [
tf1.placeholder(shape=(None,) + vr.space.shape, dtype=vr.space.dtype)
for k, vr in self.model.inference_view_requirements.items()
if k[:9] == "state_in_"
]
else:
self._state_inputs = [
tf1.placeholder(shape=(None,) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.inference_view_requirements)
# Setup standard placeholders.
if existing_inputs is not None:
timestep = existing_inputs["timestep"]
explore = existing_inputs["is_exploring"]
self._input_dict, self._dummy_batch = self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs
)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = ModelCatalog.get_action_placeholder(
action_space, "prev_action"
)
if self.config["_use_trajectory_view_api"]:
self._input_dict, self._dummy_batch = self._get_input_dict_and_dummy_batch(
self.view_requirements,
{
SampleBatch.ACTIONS: action_ph,
SampleBatch.PREV_ACTIONS: prev_action_ph,
},
)
else:
self._input_dict = {
SampleBatch.CUR_OBS: tf1.placeholder(
tf.float32, shape=[None] + list(obs_space.shape), name="observation"
)
}
self._input_dict[SampleBatch.ACTIONS] = action_ph
if self._obs_include_prev_action_reward:
self._input_dict.update(
{
SampleBatch.PREV_ACTIONS: prev_action_ph,
SampleBatch.PREV_REWARDS: tf1.placeholder(
tf.float32, [None], name="prev_reward"
),
}
)
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder(tf.int64, (), name="timestep")
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(True, (), name="is_exploring")
# Placeholder for RNN time-chunk valid lengths.
self._seq_lens = tf1.placeholder(dtype=tf.int32, shape=[None], name="seq_lens")
# Placeholder for `is_training` flag.
self._input_dict["is_training"] = self._get_is_training_placeholder()
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"],
)
else:
# Distribution generation is customized, e.g., DQN, DDPG.
if action_distribution_fn:
dist_inputs, dist_class, self._state_out = action_distribution_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"],
)
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs, self._seq_lens
)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = self.exploration.get_exploration_action(
action_distribution=action_dist, timestep=timestep, explore=explore
)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session()
batch_divisibility_req = (
get_batch_divisibility_req(self)
if callable(get_batch_divisibility_req)
else (get_batch_divisibility_req or 1)
)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep,
)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not existing_inputs:
self._initialize_loss_from_dummy_batch(auto_remove_unneeded_view_reqs=True)
|
https://github.com/ray-project/ray/issues/12244
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]],
model: ModelV2 = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None,
):
"""Initializes a Policy object.
Args:
observation_space (gym.spaces.Space): Observation space of the env.
action_space (gym.spaces.Space): Action space of the env.
config (TrainerConfigDict): The Policy config dict.
sess (tf1.Session): The TensorFlow session to use.
obs_input (TensorType): Input placeholder for observations, of
shape [BATCH_SIZE, obs...].
sampled_action (TensorType): Tensor for sampling an action, of
shape [BATCH_SIZE, action...]
loss (TensorType): Scalar policy loss output tensor.
loss_inputs (List[Tuple[str, TensorType]]): A (name, placeholder)
tuple for each loss input argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model (ModelV2): used to integrate custom losses and
stats from user-defined RLlib models.
sampled_action_logp (Optional[TensorType]): log probability of the
sampled action.
action_input (Optional[TensorType]): Input placeholder for actions
for logp/log-likelihood calculations.
log_likelihood (Optional[TensorType]): Tensor to calculate the
log_likelihood (given action_input and obs_input).
dist_class (Optional[type]): An optional ActionDistribution class
to use for generating a dist object from distribution inputs.
dist_inputs (Optional[TensorType]): Tensor to calculate the
distribution inputs/parameters.
state_inputs (Optional[List[TensorType]]): List of RNN state input
Tensors.
state_outputs (Optional[List[TensorType]]): List of RNN state
output Tensors.
prev_action_input (Optional[TensorType]): placeholder for previous
actions.
prev_reward_input (Optional[TensorType]): placeholder for previous
rewards.
seq_lens (Optional[TensorType]): Placeholder for RNN sequence
lengths, of shape [NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): Max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (List[TensorType]): override the batchnorm update ops to
run when applying gradients. Otherwise we run all update ops
found in the current variable scope.
explore (Optional[TensorType]): Placeholder for `explore` parameter
into call to Exploration.get_exploration_action.
timestep (Optional[TensorType]): Placeholder for the global
sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
assert model is None or isinstance(model, ModelV2), (
"Model classes for TFPolicy other than `ModelV2` not allowed! "
"You passed in {}.".format(model)
)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_inference_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = (
explore
if explore is not None
else tf1.placeholder_with_default(True, (), name="is_exploring")
)
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (
tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None
)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: {} vs {}".format(
self._state_inputs, self._state_outputs
)
)
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(), self._state_inputs)
)
if self._state_inputs and self._seq_lens is None:
raise ValueError("seq_lens tensor must be given if state inputs are defined")
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = (
timestep
if timestep is not None
else tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep"
)
)
self._optimizer = None
self._grads_and_vars = None
self._grads = None
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer's tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables = None
# The loss tf-op.
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
if loss is not None:
self._initialize_loss(loss, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if (
self._log_likelihood is None
and self._dist_inputs is not None
and self.dist_class is not None
):
self._log_likelihood = self.dist_class(self._dist_inputs, self.model).logp(
self._action_input
)
|
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]],
model: ModelV2 = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None,
):
"""Initializes a Policy object.
Args:
observation_space (gym.spaces.Space): Observation space of the env.
action_space (gym.spaces.Space): Action space of the env.
config (TrainerConfigDict): The Policy config dict.
sess (tf1.Session): The TensorFlow session to use.
obs_input (TensorType): Input placeholder for observations, of
shape [BATCH_SIZE, obs...].
sampled_action (TensorType): Tensor for sampling an action, of
shape [BATCH_SIZE, action...]
loss (TensorType): Scalar policy loss output tensor.
loss_inputs (List[Tuple[str, TensorType]]): A (name, placeholder)
tuple for each loss input argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model (ModelV2): used to integrate custom losses and
stats from user-defined RLlib models.
sampled_action_logp (Optional[TensorType]): log probability of the
sampled action.
action_input (Optional[TensorType]): Input placeholder for actions
for logp/log-likelihood calculations.
log_likelihood (Optional[TensorType]): Tensor to calculate the
log_likelihood (given action_input and obs_input).
dist_class (Optional[type]): An optional ActionDistribution class
to use for generating a dist object from distribution inputs.
dist_inputs (Optional[TensorType]): Tensor to calculate the
distribution inputs/parameters.
state_inputs (Optional[List[TensorType]]): List of RNN state input
Tensors.
state_outputs (Optional[List[TensorType]]): List of RNN state
output Tensors.
prev_action_input (Optional[TensorType]): placeholder for previous
actions.
prev_reward_input (Optional[TensorType]): placeholder for previous
rewards.
seq_lens (Optional[TensorType]): Placeholder for RNN sequence
lengths, of shape [NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): Max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (List[TensorType]): override the batchnorm update ops to
run when applying gradients. Otherwise we run all update ops
found in the current variable scope.
explore (Optional[TensorType]): Placeholder for `explore` parameter
into call to Exploration.get_exploration_action.
timestep (Optional[TensorType]): Placeholder for the global
sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
assert model is None or isinstance(model, ModelV2), (
"Model classes for TFPolicy other than `ModelV2` not allowed! "
"You passed in {}.".format(model)
)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_inference_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = (
explore
if explore is not None
else tf1.placeholder_with_default(True, (), name="is_exploring")
)
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (
tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None
)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: {} vs {}".format(
self._state_inputs, self._state_outputs
)
)
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(), self._state_inputs)
)
if self._state_inputs and self._seq_lens is None:
raise ValueError("seq_lens tensor must be given if state inputs are defined")
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = (
timestep
if timestep is not None
else tf1.placeholder(tf.int64, (), name="timestep")
)
self._optimizer = None
self._grads_and_vars = None
self._grads = None
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer's tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables = None
# The loss tf-op.
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
if loss is not None:
self._initialize_loss(loss, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if (
self._log_likelihood is None
and self._dist_inputs is not None
and self.dist_class is not None
):
self._log_likelihood = self.dist_class(self._dist_inputs, self.model).logp(
self._action_input
)
|
https://github.com/ray-project/ray/issues/12244
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
Tuple:
- tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
- tf module (resulting from `import tensorflow`).
Either tf1.x or 2.x.
- The actually installed tf version as int: 1 or 2.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None, None, None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
was_imported = False
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
was_imported = True
else:
try:
import tensorflow as tf_module
except ImportError as e:
if error:
raise e
return None, None, None
# Try "reducing" tf to tf.compat.v1.
try:
tf1_module = tf_module.compat.v1
if not was_imported:
tf1_module.disable_v2_behavior()
tf1_module.enable_resource_variables()
# No compat.v1 -> return tf as is.
except AttributeError:
tf1_module = tf_module
if not hasattr(tf_module, "__version__"):
version = 1 # sphinx doc gen
else:
version = 2 if "2." in tf_module.__version__[:2] else 1
return tf1_module, tf_module, version
|
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
Tuple:
- tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
- tf module (resulting from `import tensorflow`).
Either tf1.x or 2.x.
- The actually installed tf version as int: 1 or 2.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None, None, None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
was_imported = False
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
was_imported = True
else:
try:
import tensorflow as tf_module
except ImportError as e:
if error:
raise e
return None, None, None
# Try "reducing" tf to tf.compat.v1.
try:
tf1_module = tf_module.compat.v1
if not was_imported:
tf1_module.disable_v2_behavior()
# No compat.v1 -> return tf as is.
except AttributeError:
tf1_module = tf_module
if not hasattr(tf_module, "__version__"):
version = 1 # sphinx doc gen
else:
version = 2 if "2." in tf_module.__version__[:2] else 1
return tf1_module, tf_module, version
|
https://github.com/ray-project/ray/issues/12244
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[
Callable[[TrainerConfigDict], Optional[Type[Policy]]]
] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[
Callable[[WorkerSet, TrainerConfigDict], Iterable[ResultDict]]
] = default_execution_plan,
) -> Type[Trainer]:
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy
2. Worker setup: before_init, execution_plan
3. Post setup: after_init
Args:
name (str): name of the trainer (e.g., "PPO")
default_config (Optional[TrainerConfigDict]): The default config dict
of the algorithm, otherwise uses the Trainer default config.
validate_config (Optional[Callable[[TrainerConfigDict], None]]):
Optional callable that takes the config to check for correctness.
It may mutate the config as needed.
default_policy (Optional[Type[Policy]]): The default Policy class to
use if `get_policy_class` returns None.
get_policy_class (Optional[Callable[
TrainerConfigDict, Optional[Type[Policy]]]]): Optional callable
that takes a config and returns the policy class or None. If None
is returned, will use `default_policy` (which must be provided
then).
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
before_init (Optional[Callable[[Trainer], None]]): Optional callable to
run before anything is constructed inside Trainer (Workers with
Policies, execution plan, etc..). Takes the Trainer instance as
argument.
after_init (Optional[Callable[[Trainer], None]]): Optional callable to
run at the end of trainer init (after all Workers and the exec.
plan have been constructed). Takes the Trainer instance as
argument.
before_evaluate_fn (Optional[Callable[[Trainer], None]]): Callback to
run before evaluation. This takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan (Optional[Callable[[WorkerSet, TrainerConfigDict],
Iterable[ResultDict]]]): Optional callable that sets up the
distributed execution workflow.
Returns:
Type[Trainer]: A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(
self,
config: TrainerConfigDict,
env_creator: Callable[[EnvConfigDict], EnvType],
):
# Validate config via custom validation function.
if validate_config:
validate_config(config)
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class.
if not config["multiagent"]["policies"]:
assert default_policy is not None
self._policy_class = default_policy
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = self._make_workers(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
config=config,
num_workers=self.config["num_workers"],
)
self.execution_plan = execution_plan
self.train_exec_impl = execution_plan(self.workers, config)
if after_init:
after_init(self)
@override(Trainer)
def step(self):
res = next(self.train_exec_impl)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@override(Trainer)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["train_exec_impl"] = self.train_exec_impl.shared_metrics.get().save()
return state
@override(Trainer)
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"])
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> MyClass = SomeOtherClass.with_updates({"name": "Mine"})
>>> issubclass(MyClass, SomeOtherClass)
... False
>>> issubclass(MyClass, Trainer)
... True
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
|
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[
Callable[[TrainerConfigDict], Optional[Type[Policy]]]
] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[
Callable[[WorkerSet, TrainerConfigDict], Iterable[ResultDict]]
] = default_execution_plan,
) -> Type[Trainer]:
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy
2. Worker setup: before_init, execution_plan
3. Post setup: after_init
Args:
name (str): name of the trainer (e.g., "PPO")
default_config (Optional[TrainerConfigDict]): The default config dict
of the algorithm, otherwise uses the Trainer default config.
validate_config (Optional[Callable[[TrainerConfigDict], None]]):
Optional callable that takes the config to check for correctness.
It may mutate the config as needed.
default_policy (Optional[Type[Policy]]): The default Policy class to
use.
get_policy_class (Optional[Callable[
TrainerConfigDict, Optional[Type[Policy]]]]): Optional callable
that takes a config and returns the policy class or None. If None
is returned, will use `default_policy` (which must be provided
then).
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
before_init (Optional[Callable[[Trainer], None]]): Optional callable to
run before anything is constructed inside Trainer (Workers with
Policies, execution plan, etc..). Takes the Trainer instance as
argument.
after_init (Optional[Callable[[Trainer], None]]): Optional callable to
run at the end of trainer init (after all Workers and the exec.
plan have been constructed). Takes the Trainer instance as
argument.
before_evaluate_fn (Optional[Callable[[Trainer], None]]): Callback to
run before evaluation. This takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan (Optional[Callable[[WorkerSet, TrainerConfigDict],
Iterable[ResultDict]]]): Optional callable that sets up the
distributed execution workflow.
Returns:
Type[Trainer]: A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(
self,
config: TrainerConfigDict,
env_creator: Callable[[EnvConfigDict], EnvType],
):
# Validate config via custom validation function.
if validate_config:
validate_config(config)
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class.
if not config["multiagent"]["policies"]:
assert default_policy is not None
self._policy_class = default_policy
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = self._make_workers(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
config=config,
num_workers=self.config["num_workers"],
)
self.execution_plan = execution_plan
self.train_exec_impl = execution_plan(self.workers, config)
if after_init:
after_init(self)
@override(Trainer)
def step(self):
res = next(self.train_exec_impl)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@override(Trainer)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["train_exec_impl"] = self.train_exec_impl.shared_metrics.get().save()
return state
@override(Trainer)
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.train_exec_impl.shared_metrics.get().restore(state["train_exec_impl"])
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> MyClass = SomeOtherClass.with_updates({"name": "Mine"})
>>> issubclass(MyClass, SomeOtherClass)
... False
>>> issubclass(MyClass, Trainer)
... True
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
|
https://github.com/ray-project/ray/issues/12516
|
Traceback (most recent call last):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape
if not self._obs_space.contains(observation):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains
return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
AttributeError: 'dict' object has no attribute 'shape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action
preprocessed = self.workers.local_worker().preprocessors[
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform
self.check_shape(observation)
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape
raise ValueError(
ValueError: ('Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.', {'action_mask': array([0, 1, 1, 1, 1]), 'avail_actions': array([1., 1., 1., 1., 1.]), 'state': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})
|
AttributeError
|
def __init__(
self,
*,
env_creator: Optional[Callable[[EnvContext], EnvType]] = None,
validate_env: Optional[Callable[[EnvType], None]] = None,
policy_class: Optional[Type[Policy]] = None,
trainer_config: Optional[TrainerConfigDict] = None,
num_workers: int = 0,
logdir: Optional[str] = None,
_setup: bool = True,
):
"""Create a new WorkerSet and initialize its workers.
Args:
env_creator (Optional[Callable[[EnvContext], EnvType]]): Function
that returns env given env config.
validate_env (Optional[Callable[[EnvType], None]]): Optional
callable to validate the generated environment (only on
worker=0).
policy (Optional[Type[Policy]]): A rllib.policy.Policy class.
trainer_config (Optional[TrainerConfigDict]): Optional dict that
extends the common config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (Optional[str]): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
if not trainer_config:
from ray.rllib.agents.trainer import COMMON_CONFIG
trainer_config = COMMON_CONFIG
self._env_creator = env_creator
self._policy_class = policy_class
self._remote_config = trainer_config
self._logdir = logdir
if _setup:
self._local_config = merge_dicts(
trainer_config, {"tf_session_args": trainer_config["local_tf_session_args"]}
)
# Create a number of remote workers.
self._remote_workers = []
self.add_workers(num_workers)
# If num_workers > 0, get the action_spaces and observation_spaces
# to not be forced to create an Env on the driver.
if self._remote_workers:
remote_spaces = ray.get(
self.remote_workers()[0].foreach_policy.remote(
lambda p, pid: (pid, p.observation_space, p.action_space)
)
)
spaces = {
e[0]: (getattr(e[1], "original_space", e[1]), e[2])
for e in remote_spaces
}
else:
spaces = None
# Always create a local worker.
self._local_worker = self._make_worker(
cls=RolloutWorker,
env_creator=env_creator,
validate_env=validate_env,
policy_cls=self._policy_class,
worker_index=0,
num_workers=num_workers,
config=self._local_config,
spaces=spaces,
)
|
def __init__(
self,
*,
env_creator: Optional[Callable[[EnvContext], EnvType]] = None,
validate_env: Optional[Callable[[EnvType], None]] = None,
policy_class: Optional[Type[Policy]] = None,
trainer_config: Optional[TrainerConfigDict] = None,
num_workers: int = 0,
logdir: Optional[str] = None,
_setup: bool = True,
):
"""Create a new WorkerSet and initialize its workers.
Args:
env_creator (Optional[Callable[[EnvContext], EnvType]]): Function
that returns env given env config.
validate_env (Optional[Callable[[EnvType], None]]): Optional
callable to validate the generated environment (only on
worker=0).
policy (Optional[Type[Policy]]): A rllib.policy.Policy class.
trainer_config (Optional[TrainerConfigDict]): Optional dict that
extends the common config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (Optional[str]): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
if not trainer_config:
from ray.rllib.agents.trainer import COMMON_CONFIG
trainer_config = COMMON_CONFIG
self._env_creator = env_creator
self._policy_class = policy_class
self._remote_config = trainer_config
self._logdir = logdir
if _setup:
self._local_config = merge_dicts(
trainer_config, {"tf_session_args": trainer_config["local_tf_session_args"]}
)
# Create a number of remote workers.
self._remote_workers = []
self.add_workers(num_workers)
# If num_workers > 0, get the action_spaces and observation_spaces
# to not be forced to create an Env on the driver.
if self._remote_workers:
remote_spaces = ray.get(
self.remote_workers()[0].foreach_policy.remote(
lambda p, pid: (pid, p.observation_space, p.action_space)
)
)
spaces = {e[0]: (e[1], e[2]) for e in remote_spaces}
else:
spaces = None
# Always create a local worker.
self._local_worker = self._make_worker(
cls=RolloutWorker,
env_creator=env_creator,
validate_env=validate_env,
policy_cls=self._policy_class,
worker_index=0,
num_workers=num_workers,
config=self._local_config,
spaces=spaces,
)
|
https://github.com/ray-project/ray/issues/12516
|
Traceback (most recent call last):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape
if not self._obs_space.contains(observation):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains
return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
AttributeError: 'dict' object has no attribute 'shape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action
preprocessed = self.workers.local_worker().preprocessors[
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform
self.check_shape(observation)
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape
raise ValueError(
ValueError: ('Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.', {'action_mask': array([0, 1, 1, 1, 1]), 'avail_actions': array([1., 1., 1., 1., 1.]), 'state': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})
|
AttributeError
|
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(
raylet["NodeManagerAddress"], ray.nodes()[0]["NodeManagerPort"]
)
channel = grpc.insecure_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0
)
return reply.memory_summary
|
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(
raylet["NodeManagerAddress"], ray.nodes()[0]["NodeManagerPort"]
)
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0
)
return reply.memory_summary
|
https://github.com/ray-project/ray/issues/8502
|
2020-05-19 02:13:32,283 INFO scripts.py:976 -- Connecting to Ray instance at 172.31.6.12:34940.
2020-05-19 02:13:32,284 WARNING worker.py:809 -- When connecting to an existing cluster, _internal_config must match the cluster's _internal_config.
(pid=5906) E0519 02:13:32.383447 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5906) Waiting 1000ms for space to free up...
(pid=5906) 2020-05-19 02:13:32,594 INFO (unknown file):0 -- gc.collect() freed 10 refs in 0.11551751299975876 seconds
(pid=5771) E0519 02:13:32.686894 5771 plasma_store_provider.cc:118] Failed to put object 72e67d09154b35b1ffffffff010000c801000000 after 6 attempts. Plasma store status:
(pid=5771) num clients with quota: 0
(pid=5771) quota map size: 0
(pid=5771) pinned quota map size: 0
(pid=5771) allocated bytes: 19130609999
(pid=5771) allocation limit: 19130641612
(pid=5771) pinned bytes: 19130609999
(pid=5771) (global lru) capacity: 19130641612
(pid=5771) (global lru) used: 0%
(pid=5771) (global lru) num objects: 0
(pid=5771) (global lru) num evictions: 0
(pid=5771) (global lru) bytes evicted: 0
(pid=5771) ---
(pid=5771) --- Tip: Use the `ray memory` command to list active objects in the cluster.
(pid=5771) ---
(pid=5771) E0519 02:13:32.880080 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5771) Waiting 1000ms for space to free up...
(pid=5769) E0519 02:13:32.882894 5769 plasma_store_provider.cc:108] Failed to put object cb31822e7f0e3c70ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5769) Waiting 2000ms for space to free up...
(pid=5771) 2020-05-19 02:13:33,215 INFO (unknown file):0 -- gc.collect() freed 10 refs in 0.23763301200006026 seconds
(pid=5906) E0519 02:13:33.383901 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5906) Waiting 2000ms for space to free up...
Traceback (most recent call last):
File "/home/ubuntu/src/seeweed/ml/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1028, in main
return cli()
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 978, in memory
print(ray.internal.internal_api.memory_summary())
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/internal/internal_api.py", line 28, in memory_summary
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 826, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.RESOURCE_EXHAUSTED
details = "Received message larger than max (28892999 vs. 4194304)"
debug_error_string = "{"created":"@1589854413.712252174","description":"Received message larger than max (28892999 vs. 4194304)","file":"src/core/ext/filters/message_size/message_size_filter.cc","file_line":188,"grpc_status":8}"
(pid=5771) E0519 02:13:33.880635 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5771) Waiting 2000ms for space to free up...
|
grpc._channel._InactiveRpcError
|
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
|
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
|
https://github.com/ray-project/ray/issues/12643
|
raise Exception("hello")
Error in sys.excepthook:
Traceback (most recent call last):
File "/Users/eoakes/code/ray/python/ray/worker.py", line 836, in custom_excepthook
ray.state.state.add_worker(worker_id, worker_type, worker_info)
File "/Users/eoakes/code/ray/python/ray/state.py", line 733, in add_worker
return self.global_state_accessor.add_worker_info(
AttributeError: 'NoneType' object has no attribute 'add_worker_info'
Original exception was:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception: hello
|
AttributeError
|
def __init__(self, sync_up_template, sync_down_template, delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self._closed = False
self.cmd_process = None
|
def __init__(self, sync_up_template, sync_down_template, delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self.cmd_process = None
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False
)
self._closed = False
|
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False
)
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self._get_logfile()
)
return True
|
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile
)
return True
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self._get_logfile()
)
return True
|
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile
)
return True
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
trial_syncer = self._get_trial_syncer(trial)
if NODE_IP in trial.last_result:
trainable_ip = trial.last_result[NODE_IP]
else:
trainable_ip = ray.get(trial.runner.get_current_ip.remote())
trial_syncer.set_worker_ip(trainable_ip)
trial_syncer.sync_down_if_needed()
trial_syncer.close()
|
def on_trial_complete(
self, iteration: int, trials: List["Trial"], trial: "Trial", **info
):
trial_syncer = self._get_trial_syncer(trial)
if NODE_IP in trial.last_result:
trainable_ip = trial.last_result[NODE_IP]
else:
trainable_ip = ray.get(trial.runner.get_current_ip.remote())
trial_syncer.set_worker_ip(trainable_ip)
trial_syncer.sync_down_if_needed()
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def mock_storage_client():
"""Mocks storage client that treats a local dir as durable storage."""
client = get_sync_client(LOCAL_SYNC_TEMPLATE, LOCAL_DELETE_TEMPLATE)
path = os.path.join(
ray.utils.get_user_temp_dir(), f"mock-client-{uuid.uuid4().hex[:4]}"
)
os.makedirs(path, exist_ok=True)
client.set_logdir(path)
return client
|
def mock_storage_client():
"""Mocks storage client that treats a local dir as durable storage."""
return get_sync_client(LOCAL_SYNC_TEMPLATE, LOCAL_DELETE_TEMPLATE)
|
https://github.com/ray-project/ray/issues/12227
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
async def _do_long_poll(self):
while True:
try:
updates: Dict[str, UpdatedObject] = await self._poll_once()
self._update(updates)
logger.debug(f"LongPollerClient received udpates: {updates}")
for key, updated_object in updates.items():
# NOTE(simon):
# This blocks the loop from doing another poll. Consider
# use loop.create_task here or poll first then call the
# callbacks.
callback = self.key_listeners[key]
await callback(updated_object.object_snapshot)
except ray.exceptions.RayActorError:
# This can happen during shutdown where the controller is
# intentionally killed, the client should just gracefully
# exit.
logger.debug("LongPollerClient failed to connect to host. Shutting down.")
break
|
async def _do_long_poll(self):
while True:
updates: Dict[str, UpdatedObject] = await self._poll_once()
self._update(updates)
logger.debug(f"LongPollerClient received updates: {updates}")
for key, updated_object in updates.items():
# NOTE(simon): This blocks the loop from doing another poll.
# Consider use loop.create_task here or poll first then call
# the callbacks.
callback = self.key_listeners[key]
await callback(updated_object.object_snapshot)
|
https://github.com/ray-project/ray/issues/12384
|
Exception in callback async_set_result.<locals>.set_future()
handle: <Handle async_set_result.<locals>.set_future()>
Traceback (most recent call last):
File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "python/ray/_raylet.pyx", line 1530, in ray._raylet.async_set_result.set_future
AttributeError: 'RayActorError' object has no attribute 'as_instanceof_cause'
|
AttributeError
|
async def _get_actor(actor):
actor = dict(actor)
worker_id = actor["address"]["workerId"]
core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})
actor_constructor = core_worker_stats.get("actorTitle", "Unknown actor constructor")
actor["actorConstructor"] = actor_constructor
actor.update(core_worker_stats)
# TODO(fyrestone): remove this, give a link from actor
# info to worker info in front-end.
node_id = actor["address"]["rayletId"]
pid = core_worker_stats.get("pid")
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
actor_process_stats = None
actor_process_gpu_stats = None
if pid:
for process_stats in node_physical_stats.get("workers", []):
if process_stats["pid"] == pid:
actor_process_stats = process_stats
break
for gpu_stats in node_physical_stats.get("gpus", []):
for process in gpu_stats.get("processes", []):
if process["pid"] == pid:
actor_process_gpu_stats = gpu_stats
break
if actor_process_gpu_stats is not None:
break
actor["gpus"] = actor_process_gpu_stats
actor["processStats"] = actor_process_stats
return actor
|
async def _get_actor(actor):
actor = dict(actor)
worker_id = actor["address"]["workerId"]
core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})
actor_constructor = core_worker_stats.get("actorTitle", "Unknown actor constructor")
actor["actorConstructor"] = actor_constructor
actor.update(core_worker_stats)
# TODO(fyrestone): remove this, give a link from actor
# info to worker info in front-end.
node_id = actor["address"]["rayletId"]
pid = core_worker_stats.get("pid")
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
actor_process_stats = None
actor_process_gpu_stats = None
if pid:
for process_stats in node_physical_stats.get("workers"):
if process_stats["pid"] == pid:
actor_process_stats = process_stats
break
for gpu_stats in node_physical_stats.get("gpus"):
for process in gpu_stats.get("processes", []):
if process["pid"] == pid:
actor_process_gpu_stats = gpu_stats
break
if actor_process_gpu_stats is not None:
break
actor["gpus"] = actor_process_gpu_stats
actor["processStats"] = actor_process_stats
return actor
|
https://github.com/ray-project/ray/issues/11631
|
Error: Traceback (most recent call last): File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 347, in _update_cache response = task.result() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 77, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 182, in get_all_node_details node_details.append(await cls.get_node_info(node_id)) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 159, in get_node_info node_info["actors"] = await cls.get_node_actors(node_id) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 74, in get_node_actors for process_stats in node_physical_stats.get("workers"): TypeError: 'NoneType' object is not iterable
|
TypeError
|
def get_address_info_from_redis_helper(
redis_address, node_ip_address, redis_password=None
):
redis_ip_address, redis_port = redis_address.split(":")
# Get node table from global state accessor.
global_state = ray.state.GlobalState()
global_state._initialize_global_state(redis_address, redis_password)
client_table = global_state.node_table()
if len(client_table) == 0:
raise RuntimeError("Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if (
client_node_ip_address == node_ip_address
or (
client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address()
)
or client_node_ip_address == redis_ip_address
):
relevant_client = client_info
break
if relevant_client is None:
raise RuntimeError(
f"This node has an IP address of {node_ip_address}, and Ray "
"expects this IP address to be either the Redis address or one of"
f" the Raylet addresses. Connected to Redis at {redis_address} and"
" found raylets at "
f"{', '.join(c['NodeManagerAddress'] for c in client_table)} but "
f"none of these match this node's IP {node_ip_address}. Are any of"
" these actually a different IP address for the same node?"
"You might need to provide --node-ip-address to specify the IP "
"address that the head should use when sending to this node."
)
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"],
}
|
def get_address_info_from_redis_helper(
redis_address, node_ip_address, redis_password=None
):
redis_ip_address, redis_port = redis_address.split(":")
# Get node table from global state accessor.
global_state = ray.state.GlobalState()
global_state._initialize_global_state(redis_address, redis_password)
client_table = global_state.node_table()
if len(client_table) == 0:
raise RuntimeError("Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if client_node_ip_address == node_ip_address or (
client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address()
):
relevant_client = client_info
break
if relevant_client is None:
raise RuntimeError(
f"This node has an IP address of {node_ip_address}, and Ray "
"expects this IP address to be either the Redis address or one of"
f" the Raylet addresses. Connected to Redis at {redis_address} and"
" found raylets at "
f"{', '.join(c['NodeManagerAddress'] for c in client_table)} but "
f"none of these match this node's IP {node_ip_address}. Are any of"
" these actually a different IP address for the same node?"
"You might need to provide --node-ip-address to specify the IP "
"address that the head should use when sending to this node."
)
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"],
}
|
https://github.com/ray-project/ray/issues/11943
|
020-11-11 14:48:28,960 INFO worker.py:672 -- Connecting to existing Ray cluster at address: ***:***
2020-11-11 14:48:28,968 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:29,977 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:30,986 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:31,996 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:33,005 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
Traceback (most recent call last):
File "***", line 39, in <module>
ray.init(address='***:***', _redis_password='***')
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 779, in init
connect_only=True)
File "/usr/local/lib/python3.6/dist-packages/ray/node.py", line 179, in __init__
redis_password=self.redis_password))
File "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 211, in get_address_info_from_redis
redis_address, node_ip_address, redis_password=redis_password)
File "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 194, in get_address_info_from_redis_helper
"Redis has started but no raylets have registered yet.")
RuntimeError: Redis has started but no raylets have registered yet.
|
RuntimeError
|
def __init__(self):
self.indent_level = 0
self._verbosity = 0
self._verbosity_overriden = False
self._color_mode = "auto"
self._log_style = "record"
self.pretty = False
self.interactive = False
# store whatever colorful has detected for future use if
# the color ouput is toggled (colorful detects # of supported colors,
# so it has some non-trivial logic to determine this)
self._autodetected_cf_colormode = cf.colorful.colormode
self.set_format()
|
def __init__(self):
self.indent_level = 0
self._verbosity = 0
self._color_mode = "auto"
self._log_style = "record"
self.pretty = False
self.interactive = False
# store whatever colorful has detected for future use if
# the color ouput is toggled (colorful detects # of supported colors,
# so it has some non-trivial logic to determine this)
self._autodetected_cf_colormode = cf.colorful.colormode
self.set_format()
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def verbosity(self):
if self._verbosity_overriden:
return self._verbosity
elif not self.pretty:
return 999
return self._verbosity
|
def verbosity(self):
if not self.pretty:
return 999
return self._verbosity
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def _set_verbosity(self, x):
self._verbosity = x
self._verbosity_overriden = True
|
def _set_verbosity(self, x):
self._verbosity = x
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def rsync(
config_file: str,
source: Optional[str],
target: Optional[str],
override_cluster_name: Optional[str],
down: bool,
ip_address: Optional[str] = None,
use_internal_ip: bool = False,
no_config_cache: bool = False,
all_nodes: bool = False,
_runner: ModuleType = subprocess,
) -> None:
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
ip_address (str): Address of node. Raise Exception
if both ip_address and 'all_nodes' are provided.
use_internal_ip (bool): Whether the provided ip_address is
public or private.
all_nodes: whether to sync worker nodes in addition to the head node
"""
if bool(source) != bool(target):
cli_logger.abort("Expected either both a source and a target, or neither.")
assert bool(source) == bool(target), (
"Must either provide both or neither source and target."
)
if ip_address and all_nodes:
cli_logger.abort("Cannot provide both ip_address and 'all_nodes'.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
is_file_mount = False
if source and target:
for remote_mount in config.get("file_mounts", {}).keys():
if (source if down else target).startswith(remote_mount):
is_file_mount = True
break
provider = _get_node_provider(config["provider"], config["cluster_name"])
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter"),
},
docker_config=config.get("docker"),
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
if cli_logger.verbosity > 0:
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
nodes = []
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False
)
if ip_address:
nodes = [provider.get_node_id(ip_address, use_internal_ip=use_internal_ip)]
else:
nodes = [head_node]
if all_nodes:
nodes.extend(_get_worker_nodes(config, override_cluster_name))
for node_id in nodes:
rsync_to_node(node_id, is_head_node=(node_id == head_node))
|
def rsync(
config_file: str,
source: Optional[str],
target: Optional[str],
override_cluster_name: Optional[str],
down: bool,
ip_address: Optional[str] = None,
use_internal_ip: bool = False,
no_config_cache: bool = False,
all_nodes: bool = False,
_runner: ModuleType = subprocess,
) -> None:
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
ip_address (str): Address of node. Raise Exception
if both ip_address and 'all_nodes' are provided.
use_internal_ip (bool): Whether the provided ip_address is
public or private.
all_nodes: whether to sync worker nodes in addition to the head node
"""
if bool(source) != bool(target):
cli_logger.abort("Expected either both a source and a target, or neither.")
assert bool(source) == bool(target), (
"Must either provide both or neither source and target."
)
if ip_address and all_nodes:
cli_logger.abort("Cannot provide both ip_address and 'all_nodes'.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
is_file_mount = False
if source and target:
for remote_mount in config.get("file_mounts", {}).keys():
if (source if down else target).startswith(remote_mount):
is_file_mount = True
break
provider = _get_node_provider(config["provider"], config["cluster_name"])
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter"),
},
docker_config=config.get("docker"),
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
nodes = []
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False
)
if ip_address:
nodes = [provider.get_node_id(ip_address, use_internal_ip=use_internal_ip)]
else:
nodes = [head_node]
if all_nodes:
nodes.extend(_get_worker_nodes(config, override_cluster_name))
for node_id in nodes:
rsync_to_node(node_id, is_head_node=(node_id == head_node))
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter"),
},
docker_config=config.get("docker"),
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
if cli_logger.verbosity > 0:
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
|
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter"),
},
docker_config=config.get("docker"),
)
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def __init__(
self, local_dir: str, remote_dir: str, sync_client: Optional[SyncClient] = None
):
configure_logging(
log_style="record", verbosity=env_integer("TUNE_SYNCER_VERBOSITY", 0)
)
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
|
def __init__(
self, local_dir: str, remote_dir: str, sync_client: Optional[SyncClient] = None
):
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def reset_trial(self, trial, new_config, new_experiment_tag, logger_creator=None):
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial trainable.
new_experiment_tag (str): New experiment name for trial.
logger_creator (Optional[Callable[[Dict], Logger]]): Function
that instantiates a logger on the actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(new_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
|
def reset_trial(self, trial, new_config, new_experiment_tag, logger_creator=None):
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial trainable.
new_experiment_tag (str): New experiment name for trial.
logger_creator (Callable[[Dict], Logger]): A function that
instantiates a logger on the actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(new_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT,
)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def reset(self, new_config, logger_creator=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._result_logger.flush()
self._result_logger.close()
if logger_creator:
logger.debug("Logger reset.")
self._create_logger(new_config.copy(), logger_creator)
else:
logger.debug(
"Did not reset logger. Got: "
f"trainable.reset(logger_creator={logger_creator})."
)
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
success = self.reset_config(new_config)
if not success:
return False
# Reset attributes. Will be overwritten by `restore` if a checkpoint
# is provided.
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
return True
|
def reset(self, new_config, logger_creator=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._result_logger.flush()
self._result_logger.close()
self._create_logger(new_config.copy(), logger_creator)
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
success = self.reset_config(new_config)
if not success:
return False
# Reset attributes. Will be overwritten by `restore` if a checkpoint
# is provided.
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
return True
|
https://github.com/ray-project/ray/issues/12172
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def _bootstrap_config(
config: Dict[str, Any], no_config_cache: bool = False
) -> Dict[str, Any]:
config = prepare_config(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(
tempfile.gettempdir(), "ray-config-{}".format(hasher.hexdigest())
)
if os.path.exists(cache_key) and not no_config_cache:
config_cache = json.loads(open(cache_key).read())
if config_cache.get("_version", -1) == CONFIG_CACHE_VERSION:
# todo: is it fine to re-resolve? afaik it should be.
# we can have migrations otherwise or something
# but this seems overcomplicated given that resolving is
# relatively cheap
try_reload_log_state(
config_cache["config"]["provider"],
config_cache.get("provider_log_info"),
)
if log_once("_printed_cached_config_warning"):
cli_logger.verbose_warning(
"Loaded cached provider configuration from " + cf.bold("{}"),
cache_key,
)
if cli_logger.verbosity == 0:
cli_logger.warning("Loaded cached provider configuration")
cli_logger.warning(
"If you experience issues with "
"the cloud provider, try re-running "
"the command with {}.",
cf.bold("--no-config-cache"),
)
return config_cache["config"]
else:
cli_logger.warning(
"Found cached cluster config "
"but the version " + cf.bold("{}") + " "
"(expected " + cf.bold("{}") + ") does not match.\n"
"This is normal if cluster launcher was updated.\n"
"Config will be re-resolved.",
config_cache.get("_version", "none"),
CONFIG_CACHE_VERSION,
)
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(config["provider"]))
provider_cls = importer(config["provider"])
cli_logger.print(
"Checking {} environment settings",
_PROVIDER_PRETTY_NAMES.get(config["provider"]["type"]),
)
try:
config = provider_cls.fillout_available_node_types_resources(config)
except Exception as exc:
if cli_logger.verbosity > 2:
logger.exception("Failed to autodetect node resources.")
else:
cli_logger.warning(
f"Failed to autodetect node resources: {str(exc)}. "
"You can see full stack trace with higher verbosity."
)
# NOTE: if `resources` field is missing, validate_config for non-AWS will
# fail (the schema error will ask the user to manually fill the resources)
# as we currently support autofilling resources for AWS instances only.
validate_config(config)
resolved_config = provider_cls.bootstrap_config(config)
if not no_config_cache:
with open(cache_key, "w") as f:
config_cache = {
"_version": CONFIG_CACHE_VERSION,
"provider_log_info": try_get_log_state(config["provider"]),
"config": resolved_config,
}
f.write(json.dumps(config_cache))
return resolved_config
|
def _bootstrap_config(
config: Dict[str, Any], no_config_cache: bool = False
) -> Dict[str, Any]:
config = prepare_config(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(
tempfile.gettempdir(), "ray-config-{}".format(hasher.hexdigest())
)
if os.path.exists(cache_key) and not no_config_cache:
config_cache = json.loads(open(cache_key).read())
if config_cache.get("_version", -1) == CONFIG_CACHE_VERSION:
# todo: is it fine to re-resolve? afaik it should be.
# we can have migrations otherwise or something
# but this seems overcomplicated given that resolving is
# relatively cheap
try_reload_log_state(
config_cache["config"]["provider"],
config_cache.get("provider_log_info"),
)
if log_once("_printed_cached_config_warning"):
cli_logger.verbose_warning(
"Loaded cached provider configuration from " + cf.bold("{}"),
cache_key,
)
if cli_logger.verbosity == 0:
cli_logger.warning("Loaded cached provider configuration")
cli_logger.warning(
"If you experience issues with "
"the cloud provider, try re-running "
"the command with {}.",
cf.bold("--no-config-cache"),
)
return config_cache["config"]
else:
cli_logger.warning(
"Found cached cluster config "
"but the version " + cf.bold("{}") + " "
"(expected " + cf.bold("{}") + ") does not match.\n"
"This is normal if cluster launcher was updated.\n"
"Config will be re-resolved.",
config_cache.get("_version", "none"),
CONFIG_CACHE_VERSION,
)
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(config["provider"]))
provider_cls = importer(config["provider"])
cli_logger.print(
"Checking {} environment settings",
_PROVIDER_PRETTY_NAMES.get(config["provider"]["type"]),
)
config = provider_cls.fillout_available_node_types_resources(config)
# NOTE: if `resources` field is missing, validate_config for non-AWS will
# fail (the schema error will ask the user to manually fill the resources)
# as we currently support autofilling resources for AWS instances only.
validate_config(config)
resolved_config = provider_cls.bootstrap_config(config)
if not no_config_cache:
with open(cache_key, "w") as f:
config_cache = {
"_version": CONFIG_CACHE_VERSION,
"provider_log_info": try_get_log_state(config["provider"]),
"config": resolved_config,
}
f.write(json.dumps(config_cache))
return resolved_config
|
https://github.com/ray-project/ray/issues/12195
|
(base) ➜ tune git:(fix-kubernetes-dep) ✗ ray up $CFG -y
Cluster: basic
Checking AWS environment settings
Traceback (most recent call last):
File "/Users/rliaw/miniconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1471, in main
return cli()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 860, in up
use_login_shells=use_login_shells)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 213, in create_or_update_cluster
config = _bootstrap_config(config, no_config_cache=no_config_cache)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 274, in _bootstrap_config
config = provider_cls.fillout_available_node_types_resources(config)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 487, in fillout_available_node_types_resources
cluster_config["provider"].get("aws_credentials"))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 76, in list_ec2_instances
instance_types = ec2.describe_instance_types()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/botocore/client.py", line 565, in __getattr__
self.__class__.__name__, item)
AttributeError: 'EC2' object has no attribute 'describe_instance_types'
|
AttributeError
|
async def get_node_workers(cls, node_id):
workers = []
node_ip = DataSource.node_id_to_ip[node_id]
node_logs = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_errs = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
node_stats = DataSource.node_stats.get(node_id, {})
# Merge coreWorkerStats (node stats) to workers (node physical stats)
pid_to_worker_stats = {}
pid_to_language = {}
pid_to_job_id = {}
for core_worker_stats in node_stats.get("coreWorkersStats", []):
pid = core_worker_stats["pid"]
pid_to_worker_stats.setdefault(pid, []).append(core_worker_stats)
pid_to_language[pid] = core_worker_stats["language"]
pid_to_job_id[pid] = core_worker_stats["jobId"]
for worker in node_physical_stats.get("workers", []):
worker = dict(worker)
pid = worker["pid"]
worker["logCount"] = len(node_logs.get(str(pid), []))
worker["errorCount"] = len(node_errs.get(str(pid), []))
worker["coreWorkerStats"] = pid_to_worker_stats.get(pid, [])
worker["language"] = pid_to_language.get(pid, dashboard_consts.DEFAULT_LANGUAGE)
worker["jobId"] = pid_to_job_id.get(pid, dashboard_consts.DEFAULT_JOB_ID)
await GlobalSignals.worker_info_fetched.send(node_id, worker)
workers.append(worker)
return workers
|
async def get_node_workers(cls, node_id):
workers = []
node_ip = DataSource.node_id_to_ip[node_id]
node_logs = DataSource.ip_and_pid_to_logs.get(node_ip, {})
logger.error(node_logs)
node_errs = DataSource.ip_and_pid_to_errors.get(node_ip, {})
logger.error(node_errs)
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
node_stats = DataSource.node_stats.get(node_id, {})
# Merge coreWorkerStats (node stats) to workers (node physical stats)
pid_to_worker_stats = {}
pid_to_language = {}
pid_to_job_id = {}
for core_worker_stats in node_stats.get("coreWorkersStats", []):
pid = core_worker_stats["pid"]
pid_to_worker_stats.setdefault(pid, []).append(core_worker_stats)
pid_to_language[pid] = core_worker_stats["language"]
pid_to_job_id[pid] = core_worker_stats["jobId"]
for worker in node_physical_stats.get("workers", []):
worker = dict(worker)
pid = worker["pid"]
logger.error(f"pid={pid}")
worker["logCount"] = len(node_logs.get(str(pid), []))
worker["errorCount"] = len(node_errs.get(str(pid), []))
worker["coreWorkerStats"] = pid_to_worker_stats.get(pid, [])
worker["language"] = pid_to_language.get(pid, dashboard_consts.DEFAULT_LANGUAGE)
worker["jobId"] = pid_to_job_id.get(pid, dashboard_consts.DEFAULT_JOB_ID)
await GlobalSignals.worker_info_fetched.send(node_id, worker)
workers.append(worker)
return workers
|
https://github.com/ray-project/ray/issues/12126
|
Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: 'viewData'
|
KeyError
|
async def get_node_info(cls, node_id):
node_physical_stats = dict(DataSource.node_physical_stats.get(node_id, {}))
node_stats = dict(DataSource.node_stats.get(node_id, {}))
node = DataSource.nodes.get(node_id, {})
node_ip = DataSource.node_id_to_ip.get(node_id)
# Merge node log count information into the payload
log_info = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_log_count = 0
for entries in log_info.values():
node_log_count += len(entries)
error_info = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_err_count = 0
for entries in error_info.values():
node_err_count += len(entries)
node_stats.pop("coreWorkersStats", None)
view_data = node_stats.get("viewData", [])
ray_stats = cls._extract_view_data(
view_data, {"object_store_used_memory", "object_store_available_memory"}
)
node_info = node_physical_stats
# Merge node stats to node physical stats under raylet
node_info["raylet"] = node_stats
node_info["raylet"].update(ray_stats)
# Merge GcsNodeInfo to node physical stats
node_info["raylet"].update(node)
# Merge actors to node physical stats
node_info["actors"] = await cls.get_node_actors(node_id)
# Update workers to node physical stats
node_info["workers"] = DataSource.node_workers.get(node_id, [])
node_info["logCount"] = node_log_count
node_info["errorCount"] = node_err_count
await GlobalSignals.node_info_fetched.send(node_info)
return node_info
|
async def get_node_info(cls, node_id):
node_physical_stats = dict(DataSource.node_physical_stats.get(node_id, {}))
node_stats = dict(DataSource.node_stats.get(node_id, {}))
node = DataSource.nodes.get(node_id, {})
node_ip = DataSource.node_id_to_ip.get(node_id)
# Merge node log count information into the payload
log_info = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_log_count = 0
for entries in log_info.values():
node_log_count += len(entries)
error_info = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_err_count = 0
for entries in error_info.values():
node_err_count += len(entries)
node_stats.pop("coreWorkersStats", None)
ray_stats = cls._extract_view_data(
node_stats["viewData"],
{"object_store_used_memory", "object_store_available_memory"},
)
node_info = node_physical_stats
# Merge node stats to node physical stats under raylet
node_info["raylet"] = node_stats
node_info["raylet"].update(ray_stats)
# Merge GcsNodeInfo to node physical stats
node_info["raylet"].update(node)
# Merge actors to node physical stats
node_info["actors"] = await cls.get_node_actors(node_id)
# Update workers to node physical stats
node_info["workers"] = DataSource.node_workers.get(node_id, [])
node_info["logCount"] = node_log_count
node_info["errorCount"] = node_err_count
await GlobalSignals.node_info_fetched.send(node_info)
return node_info
|
https://github.com/ray-project/ray/issues/12126
|
Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: 'viewData'
|
KeyError
|
def unflatten_dict(dt, delimiter="/"):
"""Unflatten dict. Does not support unflattening lists."""
dict_type = type(dt)
out = dict_type()
for key, val in dt.items():
path = key.split(delimiter)
item = out
for k in path[:-1]:
item = item.setdefault(k, dict_type())
item[path[-1]] = val
return out
|
def unflatten_dict(dt, delimiter="/"):
"""Unflatten dict. Does not support unflattening lists."""
out = defaultdict(dict)
for key, val in dt.items():
path = key.split(delimiter)
item = out
for k in path[:-1]:
item = item[k]
item[path[-1]] = val
return dict(out)
|
https://github.com/ray-project/ray/issues/11947
|
$ python ./python/ray/tune/examples/bohb_example.py
File descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with 'ulimit -n 8192'
2020-11-11 20:38:44,944 INFO services.py:1110 -- View the Ray dashboard at http://127.0.0.1:8265
Traceback (most recent call last):
File "./python/ray/tune/examples/bohb_example.py", line 87, in <module>
stop={"training_iteration": 100})
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/tune.py", line 416, in run
runner.step()
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 380, in step
next_trial = self._get_next_trial() # blocking
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 479, in _get_next_trial
self._update_trial_queue(blocking=wait_for_trial)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 855, in _update_trial_queue
trial = self._search_alg.next_trial()
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 114, in next_trial
self._experiment.dir_name)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 121, in create_trial_if_possible
suggested_config = self.searcher.suggest(trial_id)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/bohb.py", line 175, in suggest
return unflatten_dict(config)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/utils/util.py", line 276, in unflatten_dict
item = item[k]
KeyError: 'b'
|
KeyError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.