after_merge
stringlengths 64
17k
| before_merge
stringlengths 60
17k
| source code and errors
stringlengths 236
32.3k
| full_traceback
stringlengths 170
17.7k
| traceback_type
stringclasses 60
values |
|---|---|---|---|---|
def _blocking_send(self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(
lambda: req_id in self.ready_data or self._in_shutdown)
if self._in_shutdown:
raise ConnectionError(
f"cannot send request {req}: data channel shutting down")
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
|
def _blocking_send(self, req: ray_client_pb2.DataRequest
) -> ray_client_pb2.DataResponse:
req_id = self._next_id()
req.req_id = req_id
self.request_queue.put(req)
data = None
with self.cv:
self.cv.wait_for(lambda: req_id in self.ready_data)
data = self.ready_data[req_id]
del self.ready_data[req_id]
return data
|
[{'piece_type': 'error message', 'piece_content': 'Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nGot Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nException in thread Thread-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main\\nfor response in resp_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main\\nfor record in log_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n'}]
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata)
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.CANCELLED:
# Graceful shutdown. We've cancelled our own connection.
logger.info("Cancelling logs channel")
elif e.code() == grpc.StatusCode.UNAVAILABLE:
# TODO(barakmich): The server may have
# dropped. In theory, we can retry, as per
# https://grpc.github.io/grpc/core/md_doc_statuscodes.html but
# in practice we may need to think about the correct semantics
# here.
logger.info("Server disconnected from logs channel")
else:
# Some other, unhandled, gRPC error
logger.error(
f"Got Error from logger channel -- shutting down: {e}")
raise e
|
def _log_main(self) -> None:
stub = ray_client_pb2_grpc.RayletLogStreamerStub(self.channel)
log_stream = stub.Logstream(
iter(self.request_queue.get, None), metadata=self._metadata)
try:
for record in log_stream:
if record.level < 0:
self.stdstream(level=record.level, msg=record.msg)
self.log(level=record.level, msg=record.msg)
except grpc.RpcError as e:
if grpc.StatusCode.CANCELLED != e.code():
# Not just shutting down normally
logger.error(
f"Got Error from logger channel -- shutting down: {e}")
raise e
|
[{'piece_type': 'error message', 'piece_content': 'Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nGot Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nException in thread Thread-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main\\nfor response in resp_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main\\nfor record in log_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n'}]
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def __init__(self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._conn_state = grpc.ChannelConnectivity.IDLE
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
self.channel.subscribe(self._on_channel_state_change)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(
self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(
f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info("Ray client server unavailable, "
f"retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info("Waiting for Ray to become ready on the server, "
f"retry in {timeout}s...")
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id,
self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
def __init__(self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(
self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(
f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info("Ray client server unavailable, "
f"retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info("Waiting for Ray to become ready on the server, "
f"retry in {timeout}s...")
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id,
self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
[{'piece_type': 'error message', 'piece_content': 'Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nGot Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nException in thread Thread-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main\\nfor response in resp_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main\\nfor record in log_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n'}]
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184399675","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 51, in _log_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/logsclient.py", line 42, in _log_main
for record in log_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610421939.184408826","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def translate(configuration: Dict[str, Any],
dictionary: Dict[str, str]) -> Dict[str, Any]:
return {
dictionary[field]: configuration[field]
for field in dictionary if field in configuration
}
|
def translate(configuration: Dict[str, Any],
dictionary: Dict[str, str]) -> Dict[str, Any]:
return {dictionary[field]: configuration[field] for field in dictionary}
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/ray/anaconda3/bin/ray-operator", line 8, in <module>\\nsys.exit(main())\\nFile "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator.py", line 123, in main\\ncluster_config = operator_utils.cr_to_config(cluster_cr)\\nFile "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 62, in cr_to_config\\nconfig["available_node_types" = get_node_types(cluster_resource)\\nFile "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 76, in get_node_types\\npod_type_copy, dictionary=NODE_TYPE_FIELDS)\\nFile "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in translate\\nreturn {dictionary[field: configuration[field for field in dictionary}\\nFile "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in <dictcomp>\\nreturn {dictionary[field: configuration[field for field in dictionary}\\nKeyError: \\'minWorkers\\'\\nstream closed'}]
|
Traceback (most recent call last):
File "/home/ray/anaconda3/bin/ray-operator", line 8, in <module>
sys.exit(main())
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator.py", line 123, in main
cluster_config = operator_utils.cr_to_config(cluster_cr)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 62, in cr_to_config
config["available_node_types" = get_node_types(cluster_resource)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 76, in get_node_types
pod_type_copy, dictionary=NODE_TYPE_FIELDS)
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in translate
return {dictionary[field: configuration[field for field in dictionary}
File "/home/ray/anaconda3/lib/python3.7/site-packages/ray/operator/operator_utils.py", line 98, in <dictcomp>
return {dictionary[field: configuration[field for field in dictionary}
KeyError: 'minWorkers'
stream closed
|
KeyError
|
def __init__(self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
# Retry the connection until the channel responds to something
# looking like a gRPC connection, though it may be a proxy.
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
ray_ready = False
while conn_attempts < max(connection_retries, 1):
conn_attempts += 1
try:
# Let gRPC wait for us to see if the channel becomes ready.
# If it throws, we couldn't connect.
grpc.channel_ready_future(self.channel).result(timeout=timeout)
# The HTTP2 channel is ready. Wrap the channel with the
# RayletDriverStub, allowing for unary requests.
self.server = ray_client_pb2_grpc.RayletDriverStub(
self.channel)
# Now the HTTP2 channel is ready, or proxied, but the
# servicer may not be ready. Call is_initialized() and if
# it throws, the servicer is not ready. On success, the
# `ray_ready` result is checked.
ray_ready = self.is_initialized()
if ray_ready:
# Ray is ready! Break out of the retry loop
break
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
except grpc.FutureTimeoutError:
logger.info(
f"Couldn't connect channel in {timeout} seconds, retrying")
# Note that channel_ready_future constitutes its own timeout,
# which is why we do not sleep here.
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# UNAVAILABLE is gRPC's retryable error,
# so we do that here.
logger.info("Ray client server unavailable, "
f"retrying in {timeout}s...")
logger.debug(f"Received when checking init: {e.details()}")
# Ray is not ready yet, wait a timeout
time.sleep(timeout)
else:
# Any other gRPC error gets a reraise
raise e
# Fallthrough, backoff, and retry at the top of the loop
logger.info("Waiting for Ray to become ready on the server, "
f"retry in {timeout}s...")
timeout = backoff(timeout)
# If we made it through the loop without ray_ready it means we've used
# up our retries and should error back to the user.
if not ray_ready:
raise ConnectionError("ray client connection timeout")
# Initialize the streams to finish protocol negotiation.
self.data_client = DataClient(self.channel, self._client_id,
self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
def __init__(self,
conn_str: str = "",
secure: bool = False,
metadata: List[Tuple[str, str]] = None,
connection_retries: int = 3):
"""Initializes the worker side grpc client.
Args:
conn_str: The host:port connection string for the ray server.
secure: whether to use SSL secure channel or not.
metadata: additional metadata passed in the grpc request headers.
connection_retries: Number of times to attempt to reconnect to the
ray server if it doesn't respond immediately. Setting to 0 tries
at least once. For infinite retries, catch the ConnectionError
exception.
"""
self.metadata = metadata if metadata else []
self.channel = None
self._client_id = make_client_id()
if secure:
credentials = grpc.ssl_channel_credentials()
self.channel = grpc.secure_channel(conn_str, credentials)
else:
self.channel = grpc.insecure_channel(conn_str)
conn_attempts = 0
timeout = INITIAL_TIMEOUT_SEC
while conn_attempts < connection_retries + 1:
conn_attempts += 1
try:
grpc.channel_ready_future(self.channel).result(timeout=timeout)
break
except grpc.FutureTimeoutError:
if conn_attempts >= connection_retries:
raise ConnectionError("ray client connection timeout")
logger.info(f"Couldn't connect in {timeout} seconds, retrying")
timeout = timeout + 5
if timeout > MAX_TIMEOUT_SEC:
timeout = MAX_TIMEOUT_SEC
self.server = ray_client_pb2_grpc.RayletDriverStub(self.channel)
self.data_client = DataClient(self.channel, self._client_id,
self.metadata)
self.reference_count: Dict[bytes, int] = defaultdict(int)
self.log_client = LogstreamClient(self.channel, self.metadata)
self.log_client.set_logstream_level(logging.INFO)
self.closed = False
|
[{'piece_type': 'error message', 'piece_content': 'Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nGot Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610606646.741104491","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"\\n\\nException in thread Thread-5:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/usr/lib/python3.6/threading.py", line 864, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main\\nraise e\\nFile "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main\\nfor response in resp_stream:\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNAVAILABLE\\ndetails = "Received http2 header with status: 502"\\ndebug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"'}]
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Got Error from logger channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.741104491","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
Exception in thread Thread-5:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/home/eric/Desktop/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/home/eric/.local/lib/python3.6/site-packages/grpc/_channel.py", line 706, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "Received http2 header with status: 502"
debug_error_string = "{"created":"@1610606646.739755640","description":"Received http2 :status header with non-200 OK status","file":"src/core/ext/filters/http/client/http_client_filter.cc","file_line":129,"grpc_message":"Received http2 header with status: 502","grpc_status":14,"value":"502"}"
|
debug_error
|
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
if len(name) == 0:
raise ValueError("Empty name is not allowed. "
"Please provide a metric name.")
self._name = name
self._description = description
# We don't specify unit because it won't be
# exported to Prometheus anyway.
self._unit = ""
# The default tags key-value pair.
self._default_tags = {}
# Keys of tags.
self._tag_keys = tag_keys or tuple()
# The Cython metric class. This should be set in the child class.
self._metric = None
if not isinstance(self._tag_keys, tuple):
raise TypeError("tag_keys should be a tuple type, got: "
f"{type(self._tag_keys)}")
for key in self._tag_keys:
if not isinstance(key, str):
raise TypeError(f"Tag keys must be str, got {type(key)}.")
|
def __init__(self,
name: str,
description: str = "",
tag_keys: Optional[Tuple[str]] = None):
if len(name) == 0:
raise ValueError("Empty name is not allowed. "
"Please provide a metric name.")
self._name = name
self._description = description
# We don't specify unit because it won't be
# exported to Prometheus anyway.
self._unit = ""
# The default tags key-value pair.
self._default_tags = {}
# Keys of tags.
self._tag_keys = tag_keys or tuple()
# The Cython metric class. This should be set in the child class.
self._metric = None
if not isinstance(self._tag_keys, tuple):
raise ValueError("tag_keys should be a tuple type, got: "
f"{type(self._tag_keys)}")
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nfrom ray.util.metrics import Count\\n\\nray.init()\\n\\ncount = Count("abc", tag_keys=("a", "b"))\\ncount.set_default_tags({"a": 1})\\ncount.record(1.0, {"b": 2})'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "test.py", line 8, in <module>\\ncount.record(1.0, {"b": 2})\\nFile "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record\\nself._metric.record(value, tags=final_tags)\\nFile "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record\\nc_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")\\nAttributeError: \\'int\\' object has no attribute \\'encode\\''}]
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def set_default_tags(self, default_tags: Dict[str, str]):
"""Set default tags of metrics.
Example:
>>> # Note that set_default_tags returns the instance itself.
>>> counter = Counter("name")
>>> counter2 = counter.set_default_tags({"a": "b"})
>>> assert counter is counter2
>>> # this means you can instantiate it in this way.
>>> counter = Counter("name").set_default_tags({"a": "b"})
Args:
default_tags(dict): Default tags that are
used for every record method.
Returns:
Metric: it returns the instance itself.
"""
for key, val in default_tags.items():
if key not in self._tag_keys:
raise ValueError(f"Unrecognized tag key {key}.")
if not isinstance(val, str):
raise TypeError(f"Tag values must be str, got {type(val)}.")
self._default_tags = default_tags
return self
|
def set_default_tags(self, default_tags: Dict[str, str]):
"""Set default tags of metrics.
Example:
>>> # Note that set_default_tags returns the instance itself.
>>> counter = Counter("name")
>>> counter2 = counter.set_default_tags({"a": "b"})
>>> assert counter is counter2
>>> # this means you can instantiate it in this way.
>>> counter = Counter("name").set_default_tags({"a": "b"})
Args:
default_tags(dict): Default tags that are
used for every record method.
Returns:
Metric: it returns the instance itself.
"""
self._default_tags = default_tags
return self
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nfrom ray.util.metrics import Count\\n\\nray.init()\\n\\ncount = Count("abc", tag_keys=("a", "b"))\\ncount.set_default_tags({"a": 1})\\ncount.record(1.0, {"b": 2})'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "test.py", line 8, in <module>\\ncount.record(1.0, {"b": 2})\\nFile "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record\\nself._metric.record(value, tags=final_tags)\\nFile "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record\\nc_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")\\nAttributeError: \\'int\\' object has no attribute \\'encode\\''}]
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def record(self, value: float, tags: dict = None) -> None:
"""Record the metric point of the metric.
Args:
value(float): The value to be recorded as a metric point.
"""
assert self._metric is not None
if tags is not None:
for val in tags.values():
if not isinstance(val, str):
raise TypeError(
f"Tag values must be str, got {type(val)}.")
default_tag_copy = self._default_tags.copy()
default_tag_copy.update(tags or {})
self._metric.record(value, tags=default_tag_copy)
|
def record(self, value: float, tags: dict = None) -> None:
"""Record the metric point of the metric.
Args:
value(float): The value to be recorded as a metric point.
"""
assert self._metric is not None
default_tag_copy = self._default_tags.copy()
default_tag_copy.update(tags or {})
self._metric.record(value, tags=default_tag_copy)
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nfrom ray.util.metrics import Count\\n\\nray.init()\\n\\ncount = Count("abc", tag_keys=("a", "b"))\\ncount.set_default_tags({"a": 1})\\ncount.record(1.0, {"b": 2})'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "test.py", line 8, in <module>\\ncount.record(1.0, {"b": 2})\\nFile "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record\\nself._metric.record(value, tags=final_tags)\\nFile "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record\\nc_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")\\nAttributeError: \\'int\\' object has no attribute \\'encode\\''}]
|
Traceback (most recent call last):
File "test.py", line 8, in <module>
count.record(1.0, {"b": 2})
File "/Users/eoakes/code/ray/python/ray/util/metrics.py", line 84, in record
self._metric.record(value, tags=final_tags)
File "python/ray/includes/metric.pxi", line 54, in ray._raylet.Metric.record
c_tags[tag_k.encode("ascii")] = tag_v.encode("ascii")
AttributeError: 'int' object has no attribute 'encode'
|
AttributeError
|
def postprocess_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.a3c.a3c_tf_policy.postprocess_advantages",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False)
return compute_gae_for_sample_batch(policy, sample_batch,
other_agent_batches, episode)
|
def postprocess_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append(sample_batch["state_out_{}".format(i)][-1])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
return compute_advantages(
sample_batch, last_r, policy.config["gamma"], policy.config["lambda"],
policy.config["use_gae"], policy.config["use_critic"])
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy, obs_space, action_space, config)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
|
def setup_mixins(policy, obs_space, action_space, config):
ValueNetworkMixin.__init__(policy)
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def __init__(self,
action_dist,
actions,
advantages,
v_target,
vf,
vf_loss_coeff=0.5,
entropy_coeff=0.01):
log_prob = action_dist.logp(actions)
# The "policy gradients" loss
self.pi_loss = -tf.reduce_sum(log_prob * advantages)
delta = vf - v_target
self.vf_loss = 0.5 * tf.reduce_sum(tf.math.square(delta))
self.entropy = tf.reduce_sum(action_dist.entropy())
self.total_loss = (self.pi_loss + self.vf_loss * vf_loss_coeff -
self.entropy * entropy_coeff)
|
def __init__(self):
@make_tf_callable(self.get_session())
def value(ob, prev_action, prev_reward, *state):
model_out, _ = self.model({
SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]),
SampleBatch.PREV_ACTIONS: tf.convert_to_tensor([prev_action]),
SampleBatch.PREV_REWARDS: tf.convert_to_tensor([prev_reward]),
"is_training": tf.convert_to_tensor(False),
}, [tf.convert_to_tensor([s]) for s in state],
tf.convert_to_tensor([1]))
return self.model.value_function()[0]
self._value = value
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def add_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.a3c.a3c_torch_policy.add_advantages",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False)
return compute_gae_for_sample_batch(policy, sample_batch,
other_agent_batches, episode)
|
def add_advantages(policy,
sample_batch,
other_agent_batches=None,
episode=None):
completed = sample_batch[SampleBatch.DONES][-1]
if completed:
last_r = 0.0
else:
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1])
return compute_advantages(
sample_batch, last_r, policy.config["gamma"], policy.config["lambda"],
policy.config["use_gae"], policy.config["use_critic"])
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def clip_gradients(policy: Policy, optimizer: "tf.keras.optimizers.Optimizer",
loss: TensorType) -> ModelGradients:
return minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_clip"])
|
def clip_gradients(policy: Policy, optimizer: "tf.keras.optimizers.Optimizer",
loss: TensorType) -> ModelGradients:
if policy.config["grad_clip"] is not None:
grads_and_vars = minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_clip"])
else:
grads_and_vars = optimizer.compute_gradients(
loss, var_list=policy.q_func_vars)
grads_and_vars = [(g, v) for (g, v) in grads_and_vars if g is not None]
return grads_and_vars
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def get_policy_class(config):
if config["framework"] == "torch":
if config["vtrace"]:
from ray.rllib.agents.impala.vtrace_torch_policy import \\
VTraceTorchPolicy
return VTraceTorchPolicy
else:
from ray.rllib.agents.a3c.a3c_torch_policy import \\
A3CTorchPolicy
return A3CTorchPolicy
else:
if config["vtrace"]:
return VTraceTFPolicy
else:
from ray.rllib.agents.a3c.a3c_tf_policy import A3CTFPolicy
return A3CTFPolicy
|
def get_policy_class(config):
if config["framework"] == "torch":
if config["vtrace"]:
from ray.rllib.agents.impala.vtrace_torch_policy import \\
VTraceTorchPolicy
return VTraceTorchPolicy
else:
from ray.rllib.agents.a3c.a3c_torch_policy import \\
A3CTorchPolicy
return A3CTorchPolicy
else:
if config["vtrace"]:
return VTraceTFPolicy
else:
return A3CTFPolicy
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
if not policy.config["vtrace"]:
sample_batch = compute_gae_for_sample_batch(
policy, sample_batch, other_agent_batches, episode)
# TODO: (sven) remove this del once we have trajectory view API fully in
# place.
del sample_batch.data["new_obs"] # not used, so save some bandwidth
return sample_batch
|
def postprocess_trajectory(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
if not policy.config["vtrace"]:
sample_batch = postprocess_ppo_gae(policy, sample_batch,
other_agent_batches, episode)
# TODO: (sven) remove this del once we have trajectory view API fully in
# place.
del sample_batch.data["new_obs"] # not used, so save some bandwidth
return sample_batch
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def postprocess_ppo_gae(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None) -> SampleBatch:
# Stub serving backward compatibility.
deprecation_warning(
old="rllib.agents.ppo.ppo_tf_policy.postprocess_ppo_gae",
new="rllib.evaluation.postprocessing.compute_gae_for_sample_batch",
error=False)
return compute_gae_for_sample_batch(policy, sample_batch,
other_agent_batches, episode)
|
def postprocess_ppo_gae(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode: Optional[MultiAgentEpisode] = None) -> SampleBatch:
"""Postprocesses a trajectory and returns the processed trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy (Policy): The Policy used to generate the trajectory
(`sample_batch`)
sample_batch (SampleBatch): The SampleBatch to postprocess.
other_agent_batches (Optional[Dict[PolicyID, SampleBatch]]): Optional
dict of AgentIDs mapping to other agents' trajectory data (from the
same episode). NOTE: The other agents use the same policy.
episode (Optional[MultiAgentEpisode]): Optional multi-agent episode
object in which the agents operated.
Returns:
SampleBatch: The postprocessed, modified SampleBatch (or a new one).
"""
# Trajectory is actually complete -> last r=0.0.
if sample_batch[SampleBatch.DONES][-1]:
last_r = 0.0
# Trajectory has been truncated -> last r=VF estimate of last obs.
else:
# Input dict is provided to us automatically via the Model's
# requirements. It's a single-timestep (last one in trajectory)
# input_dict.
if policy.config["_use_trajectory_view_api"]:
# Create an input dict according to the Model's requirements.
input_dict = policy.model.get_input_dict(
sample_batch, index="last")
last_r = policy._value(**input_dict)
# TODO: (sven) Remove once trajectory view API is all-algo default.
else:
next_state = []
for i in range(policy.num_state_tensors()):
next_state.append(sample_batch["state_out_{}".format(i)][-1])
last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1],
sample_batch[SampleBatch.ACTIONS][-1],
sample_batch[SampleBatch.REWARDS][-1],
*next_state)
# Adds the policy logits, VF preds, and advantages to the batch,
# using GAE ("generalized advantage estimation") or not.
batch = compute_advantages(
sample_batch,
last_r,
policy.config["gamma"],
policy.config["lambda"],
use_gae=policy.config["use_gae"],
use_critic=policy.config.get("use_critic", True))
return batch
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def gradients(self, optimizer, loss):
self.gvs = {
k: minimize_and_clip(optimizer, self.losses[k], self.vars[k],
self.config["grad_norm_clipping"])
for k, optimizer in self.optimizers.items()
}
return self.gvs["critic"] + self.gvs["actor"]
|
def gradients(self, optimizer, loss):
if self.config["grad_norm_clipping"] is not None:
self.gvs = {
k: minimize_and_clip(optimizer, self.losses[k], self.vars[k],
self.config["grad_norm_clipping"])
for k, optimizer in self.optimizers.items()
}
else:
self.gvs = {
k: optimizer.compute_gradients(self.losses[k], self.vars[k])
for k, optimizer in self.optimizers.items()
}
return self.gvs["critic"] + self.gvs["actor"]
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def minimize_and_clip(optimizer, objective, var_list, clip_val=10.0):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
# Accidentally passing values < 0.0 will break all gradients.
assert clip_val is None or clip_val > 0.0, clip_val
if tf.executing_eagerly():
tape = optimizer.tape
grads_and_vars = list(
zip(list(tape.gradient(objective, var_list)), var_list))
else:
grads_and_vars = optimizer.compute_gradients(
objective, var_list=var_list)
return [(tf.clip_by_norm(g, clip_val) if clip_val is not None else g, v)
for (g, v) in grads_and_vars if g is not None]
|
def minimize_and_clip(optimizer, objective, var_list, clip_val=10.0):
"""Minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
# Accidentally passing values < 0.0 will break all gradients.
assert clip_val > 0.0, clip_val
if tf.executing_eagerly():
tape = optimizer.tape
grads_and_vars = list(
zip(list(tape.gradient(objective, var_list)), var_list))
else:
grads_and_vars = optimizer.compute_gradients(
objective, var_list=var_list)
for i, (grad, var) in enumerate(grads_and_vars):
if grad is not None:
grads_and_vars[i] = (tf.clip_by_norm(grad, clip_val), var)
return grads_and_vars
|
[{'piece_type': 'error message', 'piece_content': '2020-06-21 13:09:32,571\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn\\nreturn self._call_tf_sessionrun(options, feed_dict, fetch_list,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun\\nreturn tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nray::PPO.train() (pid=20426, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__\\nbatch_fetches = optimizer.optimize(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize\\nreturn sess.run(fetches, feed_dict=feed_dict)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run\\nresult = self._run(None, fetches, feed_dict, options_ptr,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run\\nresults = self._do_run(handle, final_targets, final_fetches,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run\\nreturn self._do_call(_run_fn, feeds, fetches, targets, options,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call\\nraise type(e)(node_def, op, message)\\ntensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]\\n[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]'}, {'piece_type': 'error message', 'piece_content': '2020-06-21 13:14:26,130\\tERROR trial_runner.py:524 -- Trial PPO_TestingGym_a4dcc_00000: Error processing event.\\nTraceback (most recent call last):\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(RuntimeError): ray::PPO.train() (pid=21085, ip=192.168.2.105)\\nFile "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train\\nraise e\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train\\nresult = Trainable.train(self)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train\\nresult = self._train()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train\\nreturn self._train_exec_impl()\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl\\nres = next(self.train_exec_impl)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach\\nfor item in it:\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach\\nresult = fn(item)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 62, in __call__\\ninfo = do_minibatch_sgd(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/utils/sgd.py", line 114, in do_minibatch_sgd\\nbatch_fetches = (local_worker.learn_on_batch(\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/evaluation/rollout_worker.py", line 737, in learn_on_batch\\ninfo_out[pid] = policy.learn_on_batch(batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/policy/torch_policy.py", line 242, in learn_on_batch\\nself._loss(self, self.model, self.dist_class, train_batch))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/ppo/ppo_torch_policy.py", line 113, in ppo_surrogate_loss\\nlogits, state = model.from_batch(train_batch)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 224, in from_batch\\nreturn self.__call__(input_dict, states, train_batch.get("seq_lens"))\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/models/modelv2.py", line 181, in __call__\\nres = self.forward(restored, state or [], seq_lens)\\nFile "/mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py", line 166, in forward\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/module.py", line 550, in __call__\\nresult = self.forward(*input, **kwargs)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 567, in forward\\nself.check_forward_args(input, hx, batch_sizes)\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 522, in check_forward_args\\nself.check_hidden_size(hidden[0], expected_hidden_size,\\nFile "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/torch/nn/modules/rnn.py", line 187, in check_hidden_size\\nraise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))\\nRuntimeError: Expected hidden[0] size (1, 140, 256), got (1, 7, 256)'}, {'piece_type': 'source code', 'piece_content': 'from testing_gym import TestingGym\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray.tune.registry import register_env\\nfrom rnn_model import TorchRNNModel, RNNModel\\nfrom ray import tune\\n\\ntimesteps = 5\\n\\n\\ndef env_creator(env_config):\\nenv = TestingGym()\\nreturn env # return an env instance\\n\\n\\nif __name__ == "__main__":\\nregister_env("TestingGym", env_creator)\\n# also have had issues with TF models\\nModelCatalog.register_custom_model("torch_model", TorchRNNModel)\\nModelCatalog.register_custom_model("keras_model", RNNModel)\\n\\ntune.run(\\n"A2C",\\nstop={"episodes_total": 500},\\ncheckpoint_at_end=True,\\ncheckpoint_freq=100,\\nconfig={\\n"env": "TestingGym",\\n"num_workers": 14,\\n"env_config": {},\\n"lr": 0.000001,\\n"framework": "torch",\\n"model": {\\n"custom_model_config":\\n{\\n"timesteps": timesteps\\n},\\n"fcnet_hiddens": [256, 256, 256, 256],\\n"custom_model": "torch_model",\\n}\\n},\\nlocal_dir="./results", )'}, {'piece_type': 'source code', 'piece_content': 'import numpy as np\\n\\nfrom ray.rllib.models.modelv2 import ModelV2\\nfrom ray.rllib.models.preprocessors import get_preprocessor\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.utils.annotations import override\\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch\\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\\n\\ntf = try_import_tf()\\ntorch, nn = try_import_torch()\\n\\n\\nclass RNNModel(TFModelV2):\\n"""Example of using the Keras functional API to define a RNN model."""\\n\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nhiddens_size=256,\\ncell_size=64,\\ntimesteps=5):\\nsuper(RNNModel, self).__init__(obs_space, action_space, num_outputs,\\nmodel_config, name)\\nself.obs_space = obs_space\\nself.cell_size = cell_size\\nself.timesteps = timesteps\\n\\nprint(f"OBS SPACE: {obs_space.shape}")\\n# Define input layers\\ninput_layer = tf.keras.layers.Input(\\nshape=(timesteps, int(obs_space.shape[0]/self.timesteps)), name="inputs")\\n\\nstate_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h")\\nstate_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c")\\n#seq_in = tf.keras.layers.Input(shape=(), name="seq_in", dtype=tf.int32)\\n\\n# Preprocess observation with a hidden layer and send to LSTM cell\\ndense1 = tf.keras.layers.Dense(\\nhiddens_size, activation=tf.nn.sigmoid, name="dense1")(input_layer)\\nlstm_out, state_h, state_c = tf.keras.layers.LSTM(\\ncell_size,\\n#return_sequences=True,\\nreturn_state=True, name="lstm")(\\ninputs=dense1,\\n#mask=tf.sequence_mask(seq_in),\\ninitial_state=[state_in_h, state_in_c])\\n#flats = tf.keras.layers.Flatten()(lstm_out)\\n# Postprocess LSTM output with another hidden layer and compute values\\n\\n_ = lstm_out\\nfor units in model_config["fcnet_hiddens"]:\\n_ = tf.keras.layers.Dense(\\nunits,\\nactivation=tf.keras.activations.sigmoid)(_)\\n\\nlogits = tf.keras.layers.Dense(\\nself.num_outputs,\\nactivation=tf.keras.activations.linear,\\nname="logits")(_)\\nvalues = tf.keras.layers.Dense(\\n1, activation=None, name="values")(_)\\n\\n# Create the RNN model\\nself.rnn_model = tf.keras.Model(\\ninputs=[input_layer, state_in_h, state_in_c],\\noutputs=[logits, values, state_h, state_c])\\nself.register_variables(self.rnn_model.variables)\\nself.rnn_model.summary()\\n\\n@override(TFModelV2)\\ndef forward(self, inputs, state, seq_lens):\\nprint("forward")\\nprint(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\ninputs = tf.reshape(tensor=inputs, shape=[-1, self.timesteps, int(self.obs_space.shape[0]/self.timesteps)])\\n\\nmodel_out, self._value_out, h, c = self.rnn_model([inputs,] + state)\\nreturn model_out, [h, c]\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\nreturn [\\nnp.zeros(self.cell_size, np.float32),\\nnp.zeros(self.cell_size, np.float32),\\n]\\n\\n@override(ModelV2)\\ndef value_function(self):\\nreturn tf.reshape(self._value_out, [-1])\\n\\n\\nclass TorchRNNModel(TorchModelV2, nn.Module):\\ndef __init__(self,\\nobs_space,\\naction_space,\\nnum_outputs,\\nmodel_config,\\nname,\\nfc_size=64,\\nlstm_state_size=256,\\nnum_symbols=5,\\ntimesteps=5):\\nsuper().__init__(obs_space, action_space, num_outputs, model_config,\\nname)\\nnn.Module.__init__(self)\\nself.timesteps = timesteps\\nself.num_symbols = num_symbols\\n\\nself.obs_size = get_preprocessor(obs_space)(obs_space).size\\nprint(f"RNN Obs Size: {self.obs_size}")\\nself.obs_size = int(self.obs_size/self.timesteps)\\nself.fc_size = fc_size\\nself.lstm_state_size = lstm_state_size\\n\\n# Build the Module from fc + LSTM + 2xfc (action + value outs).\\nself.fc1 = nn.Linear(self.obs_size, self.fc_size)\\nself.lstm = nn.LSTM(self.fc_size, self.lstm_state_size, batch_first=True)\\nself.action_branch = nn.Linear(self.lstm_state_size, num_outputs)\\nself.value_branch = nn.Linear(self.lstm_state_size, 1)\\n# Holds the current "base" output (before logits layer).\\nself._features = None\\n\\n@override(ModelV2)\\ndef get_initial_state(self):\\n# Place hidden states on same device as model.\\nh = [\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0),\\nself.fc1.weight.new(1, self.lstm_state_size).zero_().squeeze(0)\\n]\\nprint(f"Inital State: {h[0].shape}, {h[1].shape}")\\nreturn h\\n\\n@override(ModelV2)\\ndef value_function(self):\\nassert self._features is not None, "must call forward() first"\\nreturn torch.reshape(self.value_branch(self._features), [-1])\\n\\n@override(ModelV2)\\ndef forward(self, inputs, state, seq_lens):\\n"""\\nFeeds `inputs` (B x T x ..) through the Gru Unit.\\n\\nReturns the resulting outputs as a sequence (B x T x ...).\\nValues are stored in self._cur_value in simple (B) shape (where B\\ncontains both the B and T dims!).\\n\\nReturns:\\nNN Outputs (B x T x ...) as sequence.\\nThe state batches as a List of two items (c- and h-states).\\n"""\\nprint("forward")\\n#print(f"INPUTS: {state}")\\ninputs = inputs[\\'obs\\']\\n# if not isinstance(inputs, tuple):\\ninputs = torch.reshape(input=inputs, shape=(-1, self.timesteps, int(self.obs_size)))\\nprint(f"inputs shape: {inputs.shape}")\\nprint(f"state sizes: h {torch.unsqueeze(state[0], 0).shape}, c {torch.unsqueeze(state[1], 0).shape}")\\n# embedding_input = inputs[:, :, :self.num_symbols]\\n# inputs = inputs[:, :, self.num_symbols:]\\n\\nx = nn.functional.relu(self.fc1(inputs))\\nself._features, [h, c] = self.lstm(x, [torch.unsqueeze(state[0], 0),\\ntorch.unsqueeze(state[1], 0)])\\nprint(f"state size after: h {h.shape}, c {c.shape}")\\nprint(f"LSTM shape: {self._features.shape}")\\nself._features = self._features[:, -1, :]\\nprint(f"LSTM shape After: {self._features.shape}")\\naction_out = self.action_branch(self._features)\\nprint(f"action shape: {action_out.shape}")\\n\\nreturn action_out, [torch.squeeze(h, 0), torch.squeeze(c, 0)]'}, {'piece_type': 'source code', 'piece_content': 'import gym\\nfrom gym import error, spaces, utils\\nfrom gym.utils import seeding\\nimport numpy as np\\nimport sys\\n\\n\\nclass TestingGym(gym.Env):\\nmetadata = {\\'render.modes\\': [\\'human\\']}\\n\\ndef __init__(self, timesteps=5):\\nself.timesteps = timesteps\\n\\nsuper(TestingGym, self).__init__()\\n\\nself.reward_range = (-sys.float_info.max-1, sys.float_info.max)\\n\\nself.action_space = spaces.Box(low=np.array([0, 0]), high=np.array([4, 1]), dtype=np.float16)\\n\\nself.done_counter = 0\\nself.obs_length = 15\\nself.observation_space = spaces.Box(low=-sys.float_info.max-1, high=sys.float_info.max, shape=(self.timesteps * self.obs_length,), dtype=np.float32)\\n\\ndef _initial_observation(self):\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs\\n\\ndef step(self, action):\\nself.done_counter += 1\\n\\ncurr_obs = np.random.random((self.timesteps, self.obs_length))\\ncurr_obs = curr_obs.reshape((self.timesteps * self.obs_length,))\\n\\nif self.done_counter > 1000:\\ndone = True\\nelse:\\ndone = False\\n\\nprint(f"Obs Length: {curr_obs.shape}")\\nreturn curr_obs, 1, done, {}\\n\\ndef reset(self):\\nself.done_counter = 0\\n\\nreturn self._initial_observation()'}]
|
2020-06-21 13:09:32,571 ERROR trial_runner.py:524 -- Trial PPO_TestingGym_f28cf_00000: Error processing event.
Traceback (most recent call last):
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trial_runner.py", line 472, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/ray_trial_executor.py", line 430, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/worker.py", line 1478, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(InvalidArgumentError): ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1349, in _run_fn
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1441, in _call_tf_sessionrun
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[{{node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6}}]]
During handling of the above exception, another exception occurred:
ray::PPO.train() (pid=20426, ip=192.168.2.105)
File "python/ray/_raylet.pyx", line 443, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 446, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 447, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 401, in ray._raylet.execute_task.function_executor
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 520, in train
raise e
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 506, in train
result = Trainable.train(self)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/tune/trainable.py", line 317, in train
result = self._train()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 137, in _train
return self._train_exec_impl()
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/agents/trainer_template.py", line 175, in _train_exec_impl
res = next(self.train_exec_impl)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 731, in __next__
return next(self.built_iterator)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 814, in apply_filter
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 744, in apply_foreach
for item in it:
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/util/iter.py", line 752, in apply_foreach
result = fn(item)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/train_ops.py", line 204, in __call__
batch_fetches = optimizer.optimize(
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/ray/rllib/execution/multi_gpu_impl.py", line 257, in optimize
return sess.run(fetches, feed_dict=feed_dict)
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 957, in run
result = self._run(None, fetches, feed_dict, options_ptr,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1180, in _run
results = self._do_run(handle, final_targets, final_fetches,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1358, in _do_run
return self._do_call(_run_fn, feeds, fetches, targets, options,
File "/home/user/anaconda3/envs/RLlibTesting/lib/python3.8/site-packages/tensorflow/python/client/session.py", line 1384, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.InvalidArgumentError: Incompatible shapes: [120,64] vs. [6,64]
[[node default_policy_1/tower_1/model_1/lstm/while/lstm_cell/add_6 (defined at mnt/c/Users/user/Desktop/RLlib_Issue/rnn_model.py:81) ]]
|
tensorflow.python.framework.errors_impl.InvalidArgumentError
|
def persistent_id(self, obj):
if isinstance(obj, ray.ObjectRef):
obj_id = obj.binary()
if obj_id not in self.server.object_refs[self.client_id]:
# We're passing back a reference, probably inside a reference.
# Let's hold onto it.
self.server.object_refs[self.client_id][obj_id] = obj
return PickleStub(
type="Object",
client_id=self.client_id,
ref_id=obj_id,
name=None,
baseline_options=None,
)
elif isinstance(obj, ray.actor.ActorHandle):
actor_id = obj._actor_id.binary()
if actor_id not in self.server.actor_refs:
# We're passing back a handle, probably inside a reference.
self.server.actor_refs[actor_id] = obj
if actor_id not in self.server.actor_owners[self.client_id]:
self.server.actor_owners[self.client_id].add(actor_id)
return PickleStub(
type="Actor",
client_id=self.client_id,
ref_id=obj._actor_id.binary(),
name=None,
baseline_options=None,
)
return None
|
def persistent_id(self, obj):
if isinstance(obj, ray.ObjectRef):
obj_id = obj.binary()
if obj_id not in self.server.object_refs[self.client_id]:
# We're passing back a reference, probably inside a reference.
# Let's hold onto it.
self.server.object_refs[self.client_id][obj_id] = obj
return PickleStub(
type="Object",
client_id=self.client_id,
ref_id=obj_id,
name=None,
baseline_options=None,
)
elif isinstance(obj, ray.actor.ActorHandle):
actor_id = obj._actor_id.binary()
if actor_id not in self.server.actor_refs:
# We're passing back a handle, probably inside a reference.
self.actor_refs[actor_id] = obj
if actor_id not in self.actor_owners[self.client_id]:
self.actor_owners[self.client_id].add(actor_id)
return PickleStub(
type="Actor",
client_id=self.client_id,
ref_id=obj._actor_id.binary(),
name=None,
baseline_options=None,
)
return None
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nray.util.connect("localhost:61234")\\n\\n@ray.remote\\nclass B:\\npass\\n\\n@ray.remote\\nclass A:\\ndef __init__(self):\\nself.b = B.remote()\\n\\ndef get_actor_ref(self):\\nreturn [self.b]\\n\\na = A.remote()\\nray.get(a.get_actor_ref.remote())'}, {'piece_type': 'error message', 'piece_content': 'Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNKNOWN\\ndetails = "Exception iterating responses: \\'ServerPickler\\' object has no attribute \\'actor_refs\\'"\\ndebug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[\\n::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating response\\ns: \\'ServerPickler\\' object has no attribute \\'actor_refs\\'","grpc_status":2}"\\n\\nException in thread Thread-5:\\nTraceback (most recent call last):\\nFile "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 932, in _bootstrap_inner\\nself.run()\\nFile "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 870, in run\\nself._target(*self._args, **self._kwargs)\\nFile "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 76, in _data_main\\nraise e\\nFile "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 61, in _data_main\\nfor response in resp_stream:\\nFile "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 416, in __next__\\nreturn self._next()\\nFile "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 803, in _next\\nraise self\\ngrpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:\\nstatus = StatusCode.UNKNOWN\\ndetails = "Exception iterating responses: \\'ServerPickler\\' object has no attribute \\'actor_refs\\'"\\ndebug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating responses: \\'ServerPickler\\' object has no attribute \\'actor_refs\\'","grpc_status":2}"\\n\\n^CTraceback (most recent call last):\\nFile "test.py", line 17, in <module>\\nray.get(a.get_actor_ref.remote())\\nFile "/Users/eoakes/code/ray/python/ray/_private/client_mode_hook.py", line 46, in wrapper\\nreturn getattr(ray, func.__name__)(*args, **kwargs)\\nFile "/Users/eoakes/code/ray/python/ray/util/client/api.py", line 32, in get\\nreturn self.worker.get(vals, timeout=timeout)\\nFile "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in get\\nout = [self._get(x, timeout) for x in to_get]\\nFile "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in <listcomp>\\nout = [self._get(x, timeout) for x in to_get]\\nFile "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 115, in _get\\ndata = self.data_client.GetObject(req)\\nFile "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 106, in GetObject\\nresp = self._blocking_send(datareq)\\nFile "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 91, in _blocking_send\\nself.cv.wait_for(lambda: req_id in self.ready_data)\\nFile "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 337, in wait_for\\nself.wait(waittime)\\nFile "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 302, in wait\\nwaiter.acquire()\\nKeyboardInterrupt'}]
|
Got Error from data channel -- shutting down: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'"
debug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[
::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating response
s: 'ServerPickler' object has no attribute 'actor_refs'","grpc_status":2}"
Exception in thread Thread-5:
Traceback (most recent call last):
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 76, in _data_main
raise e
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 61, in _data_main
for response in resp_stream:
File "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 416, in __next__
return self._next()
File "/Users/eoakes/anaconda3/lib/python3.8/site-packages/grpc/_channel.py", line 803, in _next
raise self
grpc._channel._MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'"
debug_error_string = "{"created":"@1610645797.119742000","description":"Error received from peer ipv6:[::1]:61234","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Exception iterating responses: 'ServerPickler' object has no attribute 'actor_refs'","grpc_status":2}"
^CTraceback (most recent call last):
File "test.py", line 17, in <module>
ray.get(a.get_actor_ref.remote())
File "/Users/eoakes/code/ray/python/ray/_private/client_mode_hook.py", line 46, in wrapper
return getattr(ray, func.__name__)(*args, **kwargs)
File "/Users/eoakes/code/ray/python/ray/util/client/api.py", line 32, in get
return self.worker.get(vals, timeout=timeout)
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in get
out = [self._get(x, timeout) for x in to_get]
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 107, in <listcomp>
out = [self._get(x, timeout) for x in to_get]
File "/Users/eoakes/code/ray/python/ray/util/client/worker.py", line 115, in _get
data = self.data_client.GetObject(req)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 106, in GetObject
resp = self._blocking_send(datareq)
File "/Users/eoakes/code/ray/python/ray/util/client/dataclient.py", line 91, in _blocking_send
self.cv.wait_for(lambda: req_id in self.ready_data)
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 337, in wait_for
self.wait(waittime)
File "/Users/eoakes/anaconda3/lib/python3.8/threading.py", line 302, in wait
waiter.acquire()
KeyboardInterrupt
|
debug_error
|
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
with disable_client_hook():
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
elif request.type == \\
ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
with disable_client_hook():
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
elif request.type == ray_client_pb2.ClusterInfoType.RUNTIME_CONTEXT:
ctx = ray_client_pb2.ClusterInfoResponse.RuntimeContext()
with disable_client_hook():
rtc = ray.get_runtime_context()
ctx.job_id = rtc.job_id.binary()
ctx.node_id = rtc.node_id.binary()
ctx.capture_client_tasks = \\
rtc.should_capture_child_tasks_in_placement_group
resp.runtime_context.CopyFrom(ctx)
else:
with disable_client_hook():
resp.json = self._return_debug_cluster_info(request, context)
return resp
|
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
resp = ray_client_pb2.ClusterInfoResponse()
resp.type = request.type
if request.type == ray_client_pb2.ClusterInfoType.CLUSTER_RESOURCES:
with disable_client_hook():
resources = ray.cluster_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
elif request.type == \\
ray_client_pb2.ClusterInfoType.AVAILABLE_RESOURCES:
with disable_client_hook():
resources = ray.available_resources()
# Normalize resources into floats
# (the function may return values that are ints)
float_resources = {k: float(v) for k, v in resources.items()}
resp.resource_table.CopyFrom(
ray_client_pb2.ClusterInfoResponse.ResourceTable(
table=float_resources))
else:
with disable_client_hook():
resp.json = self._return_debug_cluster_info(request, context)
return resp
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nray.util.connect("localhost:61234")\\n\\nprint(ray.get_runtime_context().node_id.hex())'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "test.py", line 4, in <module>\\nprint(ray.get_runtime_context().node_id.hex())\\nFile "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id\\nnode_id = self.worker.current_node_id\\nFile "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id\\nreturn self.core_worker.get_current_node_id()\\nAttributeError: \\'Worker\\' object has no attribute \\'core_worker\\''}]
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().node_id.hex())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id
node_id = self.worker.current_node_id
File "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id
return self.core_worker.get_current_node_id()
AttributeError: 'Worker' object has no attribute 'core_worker'
|
AttributeError
|
def get_cluster_info(self, type: ray_client_pb2.ClusterInfoType.TypeEnum):
req = ray_client_pb2.ClusterInfoRequest()
req.type = type
resp = self.server.ClusterInfo(req, metadata=self.metadata)
if resp.WhichOneof("response_type") == "resource_table":
# translate from a proto map to a python dict
output_dict = {k: v for k, v in resp.resource_table.table.items()}
return output_dict
elif resp.WhichOneof("response_type") == "runtime_context":
return resp.runtime_context
return json.loads(resp.json)
|
def get_cluster_info(self, type: ray_client_pb2.ClusterInfoType.TypeEnum):
req = ray_client_pb2.ClusterInfoRequest()
req.type = type
resp = self.server.ClusterInfo(req, metadata=self.metadata)
if resp.WhichOneof("response_type") == "resource_table":
# translate from a proto map to a python dict
output_dict = {k: v for k, v in resp.resource_table.table.items()}
return output_dict
return json.loads(resp.json)
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nray.util.connect("localhost:61234")\\n\\nprint(ray.get_runtime_context().node_id.hex())'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "test.py", line 4, in <module>\\nprint(ray.get_runtime_context().node_id.hex())\\nFile "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id\\nnode_id = self.worker.current_node_id\\nFile "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id\\nreturn self.core_worker.get_current_node_id()\\nAttributeError: \\'Worker\\' object has no attribute \\'core_worker\\''}]
|
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().node_id.hex())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 58, in node_id
node_id = self.worker.current_node_id
File "/Users/eoakes/code/ray/python/ray/worker.py", line 148, in current_node_id
return self.core_worker.get_current_node_id()
AttributeError: 'Worker' object has no attribute 'core_worker'
|
AttributeError
|
def get(self):
"""Get a dictionary of the current context.
Fields that are not available (e.g., actor ID inside a task) won't be
included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
}
if self.worker.mode == ray.worker.WORKER_MODE:
if self.task_id is not None:
context["task_id"] = self.task_id
if self.actor_id is not None:
context["actor_id"] = self.actor_id
return context
|
def get(self):
"""Get a dictionary of the current_context.
For fields that are not available (for example actor id inside a task)
won't be included in the field.
Returns:
dict: Dictionary of the current context.
"""
context = {
"job_id": self.job_id,
"node_id": self.node_id,
"task_id": self.task_id,
"actor_id": self.actor_id
}
# Remove fields that are None.
return {
key: value
for key, value in context.items() if value is not None
}
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nray.init()\\n\\nprint(ray.get_runtime_context().get())'}, {'piece_type': 'error message', 'piece_content': '2021-01-13 14:15:29,011 INFO services.py:1169 -- View the Ray dashboard at http://127.0.0.1:8266\\nTraceback (most recent call last):\\nFile "test.py", line 4, in <module>\\nprint(ray.get_runtime_context().get())\\nFile "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 26, in get\\n"task_id": self.task_id,\\nFile "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 92, in task_id\\nassert self.worker.mode == ray.worker.WORKER_MODE, (\\nAssertionError: This method is only available when the process is a worker. Current mode: 0'}]
|
2021-01-13 14:15:29,011 INFO services.py:1169 -- View the Ray dashboard at http://127.0.0.1:8266
Traceback (most recent call last):
File "test.py", line 4, in <module>
print(ray.get_runtime_context().get())
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 26, in get
"task_id": self.task_id,
File "/Users/eoakes/code/ray/python/ray/runtime_context.py", line 92, in task_id
assert self.worker.mode == ray.worker.WORKER_MODE, (
AssertionError: This method is only available when the process is a worker. Current mode: 0
|
AssertionError
|
def __init__(self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
# TODO(edoakes): RAY_RAYLET_PID isn't properly set on Windows. This is
# only used for fate-sharing with the raylet and we need a different
# fate-sharing mechanism for Windows anyways.
if sys.platform not in ["win32", "cygwin"]:
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0), ))
self.grpc_port = self.server.add_insecure_port(
f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip,
self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}")
self.http_session = None
|
def __init__(self,
node_ip_address,
redis_address,
dashboard_agent_port,
redis_password=None,
temp_dir=None,
log_dir=None,
metrics_export_port=None,
node_manager_port=None,
object_store_name=None,
raylet_name=None):
"""Initialize the DashboardAgent object."""
# Public attributes are accessible for all agent modules.
self.ip = node_ip_address
self.redis_address = dashboard_utils.address_tuple(redis_address)
self.redis_password = redis_password
self.temp_dir = temp_dir
self.log_dir = log_dir
self.dashboard_agent_port = dashboard_agent_port
self.metrics_export_port = metrics_export_port
self.node_manager_port = node_manager_port
self.object_store_name = object_store_name
self.raylet_name = raylet_name
self.node_id = os.environ["RAY_NODE_ID"]
self.ppid = int(os.environ["RAY_RAYLET_PID"])
assert self.ppid > 0
logger.info("Parent pid is %s", self.ppid)
self.server = aiogrpc.server(options=(("grpc.so_reuseport", 0), ))
self.grpc_port = self.server.add_insecure_port(
f"[::]:{self.dashboard_agent_port}")
logger.info("Dashboard agent grpc address: %s:%s", self.ip,
self.grpc_port)
self.aioredis_client = None
self.aiogrpc_raylet_channel = aiogrpc.insecure_channel(
f"{self.ip}:{self.node_manager_port}")
self.http_session = None
|
[{'piece_type': 'error message', 'piece_content': '(pid=None) Traceback (most recent call last):\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 317, in <module>\\n(pid=None) raise e\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 293, in <module>\\n(pid=None) agent = DashboardAgent(\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 65, in __init__\\n(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\os.py", line 675, in __getitem__\\n(pid=None) raise KeyError(key) from None\\n(pid=None) KeyError: \\'RAY_RAYLET_PID\\''}]
|
(pid=None) Traceback (most recent call last):
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 317, in <module>
(pid=None) raise e
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 293, in <module>
(pid=None) agent = DashboardAgent(
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 65, in __init__
(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\os.py", line 675, in __getitem__
(pid=None) raise KeyError(key) from None
(pid=None) KeyError: 'RAY_RAYLET_PID'
|
KeyError
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if (parent is None or parent.pid == 1
or self.ppid != parent.pid):
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.
DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
if sys.platform not in ["win32", "cygwin"]:
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address, self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: "
"Failed to connect to redis at %s", self.redis_address)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(
loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
})
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host,
http_port)
# Dump registered http routes.
dump_routes = [
r for r in app.router.routes() if r.method != hdrs.METH_HEAD
]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]))
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(),
agent_port=self.grpc_port,
agent_ip_address=self.ip))
await asyncio.gather(check_parent_task,
*(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
async def run(self):
async def _check_parent():
"""Check if raylet is dead and fate-share if it is."""
try:
curr_proc = psutil.Process()
while True:
parent = curr_proc.parent()
if (parent is None or parent.pid == 1
or self.ppid != parent.pid):
logger.error("Raylet is dead, exiting.")
sys.exit(0)
await asyncio.sleep(
dashboard_consts.
DASHBOARD_AGENT_CHECK_PARENT_INTERVAL_SECONDS)
except Exception:
logger.error("Failed to check parent PID, exiting.")
sys.exit(1)
check_parent_task = create_task(_check_parent())
# Create an aioredis client for all modules.
try:
self.aioredis_client = await dashboard_utils.get_aioredis_client(
self.redis_address, self.redis_password,
dashboard_consts.CONNECT_REDIS_INTERNAL_SECONDS,
dashboard_consts.RETRY_REDIS_CONNECTION_TIMES)
except (socket.gaierror, ConnectionRefusedError):
logger.error(
"Dashboard agent exiting: "
"Failed to connect to redis at %s", self.redis_address)
sys.exit(-1)
# Create a http session for all modules.
self.http_session = aiohttp.ClientSession(
loop=asyncio.get_event_loop())
# Start a grpc asyncio server.
await self.server.start()
modules = self._load_modules()
# Http server should be initialized after all modules loaded.
app = aiohttp.web.Application()
app.add_routes(routes=routes.bound_routes())
# Enable CORS on all routes.
cors = aiohttp_cors.setup(
app,
defaults={
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_methods="*",
allow_headers=("Content-Type", "X-Header"),
)
})
for route in list(app.router.routes()):
cors.add(route)
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, self.ip, 0)
await site.start()
http_host, http_port = site._server.sockets[0].getsockname()
logger.info("Dashboard agent http address: %s:%s", http_host,
http_port)
# Dump registered http routes.
dump_routes = [
r for r in app.router.routes() if r.method != hdrs.METH_HEAD
]
for r in dump_routes:
logger.info(r)
logger.info("Registered %s routes.", len(dump_routes))
# Write the dashboard agent port to redis.
await self.aioredis_client.set(
f"{dashboard_consts.DASHBOARD_AGENT_PORT_PREFIX}{self.node_id}",
json.dumps([http_port, self.grpc_port]))
# Register agent to agent manager.
raylet_stub = agent_manager_pb2_grpc.AgentManagerServiceStub(
self.aiogrpc_raylet_channel)
await raylet_stub.RegisterAgent(
agent_manager_pb2.RegisterAgentRequest(
agent_pid=os.getpid(),
agent_port=self.grpc_port,
agent_ip_address=self.ip))
await asyncio.gather(check_parent_task,
*(m.run(self.server) for m in modules))
await self.server.wait_for_termination()
# Wait for finish signal.
await runner.cleanup()
|
[{'piece_type': 'error message', 'piece_content': '(pid=None) Traceback (most recent call last):\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 317, in <module>\\n(pid=None) raise e\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 293, in <module>\\n(pid=None) agent = DashboardAgent(\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\new_dashboard/agent.py", line 65, in __init__\\n(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])\\n(pid=None) File "D:\\\\Programs\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\os.py", line 675, in __getitem__\\n(pid=None) raise KeyError(key) from None\\n(pid=None) KeyError: \\'RAY_RAYLET_PID\\''}]
|
(pid=None) Traceback (most recent call last):
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 317, in <module>
(pid=None) raise e
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 293, in <module>
(pid=None) agent = DashboardAgent(
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\new_dashboard/agent.py", line 65, in __init__
(pid=None) self.ppid = int(os.environ["RAY_RAYLET_PID"])
(pid=None) File "D:\\Programs\\Anaconda3\\envs\\ray\\lib\\os.py", line 675, in __getitem__
(pid=None) raise KeyError(key) from None
(pid=None) KeyError: 'RAY_RAYLET_PID'
|
KeyError
|
def __str__(self):
return ("The worker died unexpectedly while executing this task. "
"Check python-core-worker-*.log files for more information.")
|
def __str__(self):
return "The worker died unexpectedly while executing this task."
|
[{'piece_type': 'error message', 'piece_content': '2020-10-05 01:55:09,393\\\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: \\\\u001b[36mray::PPO.train()\\\\u001b[39m (pid=4251, ip=172.30.96.106)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach\\nresult = fn(item)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__\\ntimeout_seconds=self.timeout_seconds)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes\\nmetric_lists = ray.get(collected)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-10-03_02-10-38)\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: ray::PPO.train() (pid=524, ip=172.30.58.198)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 876, in apply_flatten\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 828, in add_wait_hooks\\nitem = next(it)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 471, in base_iterator\\nyield ray.get(futures, timeout=timeout)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'reproducing source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\nfrom ray.rllib.agents.ppo.ppo import UpdateKL, warn_about_bad_reward_scales\\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, _get_shared_metrics\\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \\\\\\nStandardizeFields, SelectExperiences\\nfrom ray.rllib.execution.train_ops import TrainOneStep\\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\\nfrom ray.rllib.policy.policy import Policy\\nfrom ray.rllib.policy.sample_batch import SampleBatch\\nfrom ray.util.iter import from_actors\\n\\n\\ndef custom_ppo_execution_plan(workers, config):\\n"""Copy of PPO\\'s execution plan, except we store all ops in a list and return them."""\\n# Modified from ParallelRollout\\'s bulk_sync mode.\\nworkers.sync_weights()\\ndef report_timesteps(batch):\\nmetrics = _get_shared_metrics()\\nmetrics.counters[STEPS_SAMPLED_COUNTER] += batch.count\\nreturn batch\\nops = [from_actors(workers.remote_workers())]\\nops.append(ops[-1].batch_across_shards())\\nops.append(ops[-1].for_each(lambda batches: SampleBatch.concat_samples(batches)))\\nops.append(ops[-1].for_each(report_timesteps))\\n\\n# Collect batches for the trainable policies.\\nops.append(ops[-1].for_each(\\nSelectExperiences(workers.trainable_policies())))\\n# Concatenate the SampleBatches into one.\\nops.append(ops[-1].combine(\\nConcatBatches(min_batch_size=config["train_batch_size"])))\\n# Standardize advantages.\\nops.append(ops[-1].for_each(StandardizeFields(["advantages"])))\\n\\n# Perform one training step on the combined + standardized batch.\\nops.append(ops[-1].for_each(\\nTrainOneStep(\\nworkers,\\nnum_sgd_iter=config["num_sgd_iter"],\\nsgd_minibatch_size=config["sgd_minibatch_size"])))\\n\\n# Update KL after each round of training.\\nops.append(ops[-1].for_each(lambda t: t[1]).for_each(UpdateKL(workers)))\\n\\n# Warn about bad reward scales and return training metrics.\\nreturn (StandardMetricsReporting(ops[-1], workers, config) \\\\\\n.for_each(lambda result: warn_about_bad_reward_scales(config, result)),\\nops)\\n\\nclass ExecutionPlanWrapper:\\n"""A wrapper for custom_ppo_execution_plan that stores all ops in the object."""\\n\\ndef __init__(self, workers, config):\\nself.execution_plan, self.ops = custom_ppo_execution_plan(workers, config)\\n\\ndef __next__(self):\\nreturn next(self.execution_plan)\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer.with_updates(\\nname="CustomPPO",\\nexecution_plan=ExecutionPlanWrapper)\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'other', 'piece_content': '2020-10-04 00:19:40,710\\\\u0009WARNING worker.py:1072 -- The node with node id f7c78d2999929f603ebdf4d2c4508f949f6dafb0 has been marked dead because the detector has missed too many heartbeats from it.'}]
|
2020-10-05 01:55:09,393\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \\u001b[36mray::PPO.train()\\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def __str__(self):
return ("The actor died unexpectedly before finishing this task. "
"Check python-core-worker-*.log files for more information.")
|
def __str__(self):
return "The actor died unexpectedly before finishing this task."
|
[{'piece_type': 'error message', 'piece_content': '2020-10-05 01:55:09,393\\\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: \\\\u001b[36mray::PPO.train()\\\\u001b[39m (pid=4251, ip=172.30.96.106)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach\\nresult = fn(item)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__\\ntimeout_seconds=self.timeout_seconds)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes\\nmetric_lists = ray.get(collected)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-10-03_02-10-38)\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: ray::PPO.train() (pid=524, ip=172.30.58.198)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 876, in apply_flatten\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 828, in add_wait_hooks\\nitem = next(it)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 471, in base_iterator\\nyield ray.get(futures, timeout=timeout)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'reproducing source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\nfrom ray.rllib.agents.ppo.ppo import UpdateKL, warn_about_bad_reward_scales\\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, _get_shared_metrics\\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \\\\\\nStandardizeFields, SelectExperiences\\nfrom ray.rllib.execution.train_ops import TrainOneStep\\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\\nfrom ray.rllib.policy.policy import Policy\\nfrom ray.rllib.policy.sample_batch import SampleBatch\\nfrom ray.util.iter import from_actors\\n\\n\\ndef custom_ppo_execution_plan(workers, config):\\n"""Copy of PPO\\'s execution plan, except we store all ops in a list and return them."""\\n# Modified from ParallelRollout\\'s bulk_sync mode.\\nworkers.sync_weights()\\ndef report_timesteps(batch):\\nmetrics = _get_shared_metrics()\\nmetrics.counters[STEPS_SAMPLED_COUNTER] += batch.count\\nreturn batch\\nops = [from_actors(workers.remote_workers())]\\nops.append(ops[-1].batch_across_shards())\\nops.append(ops[-1].for_each(lambda batches: SampleBatch.concat_samples(batches)))\\nops.append(ops[-1].for_each(report_timesteps))\\n\\n# Collect batches for the trainable policies.\\nops.append(ops[-1].for_each(\\nSelectExperiences(workers.trainable_policies())))\\n# Concatenate the SampleBatches into one.\\nops.append(ops[-1].combine(\\nConcatBatches(min_batch_size=config["train_batch_size"])))\\n# Standardize advantages.\\nops.append(ops[-1].for_each(StandardizeFields(["advantages"])))\\n\\n# Perform one training step on the combined + standardized batch.\\nops.append(ops[-1].for_each(\\nTrainOneStep(\\nworkers,\\nnum_sgd_iter=config["num_sgd_iter"],\\nsgd_minibatch_size=config["sgd_minibatch_size"])))\\n\\n# Update KL after each round of training.\\nops.append(ops[-1].for_each(lambda t: t[1]).for_each(UpdateKL(workers)))\\n\\n# Warn about bad reward scales and return training metrics.\\nreturn (StandardMetricsReporting(ops[-1], workers, config) \\\\\\n.for_each(lambda result: warn_about_bad_reward_scales(config, result)),\\nops)\\n\\nclass ExecutionPlanWrapper:\\n"""A wrapper for custom_ppo_execution_plan that stores all ops in the object."""\\n\\ndef __init__(self, workers, config):\\nself.execution_plan, self.ops = custom_ppo_execution_plan(workers, config)\\n\\ndef __next__(self):\\nreturn next(self.execution_plan)\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer.with_updates(\\nname="CustomPPO",\\nexecution_plan=ExecutionPlanWrapper)\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'other', 'piece_content': '2020-10-04 00:19:40,710\\\\u0009WARNING worker.py:1072 -- The node with node id f7c78d2999929f603ebdf4d2c4508f949f6dafb0 has been marked dead because the detector has missed too many heartbeats from it.'}]
|
2020-10-05 01:55:09,393\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \\u001b[36mray::PPO.train()\\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.shake_128(new_class_id).digest(
ray_constants.ID_SIZE)
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
f"WARNING: Could not produce a deterministic class ID for class {cls}")
return hashlib.shake_128(new_class_id).digest(ray_constants.ID_SIZE)
|
def _try_to_compute_deterministic_class_id(cls, depth=5):
"""Attempt to produce a deterministic class ID for a given class.
The goal here is for the class ID to be the same when this is run on
different worker processes. Pickling, loading, and pickling again seems to
produce more consistent results than simply pickling. This is a bit crazy
and could cause problems, in which case we should revert it and figure out
something better.
Args:
cls: The class to produce an ID for.
depth: The number of times to repeatedly try to load and dump the
string while trying to reach a fixed point.
Returns:
A class ID for this class. We attempt to make the class ID the same
when this function is run on different workers, but that is not
guaranteed.
Raises:
Exception: This could raise an exception if cloudpickle raises an
exception.
"""
# Pickling, loading, and pickling again seems to produce more consistent
# results than simply pickling. This is a bit
class_id = pickle.dumps(cls)
for _ in range(depth):
new_class_id = pickle.dumps(pickle.loads(class_id))
if new_class_id == class_id:
# We appear to have reached a fix point, so use this as the ID.
return hashlib.sha1(new_class_id).digest()
class_id = new_class_id
# We have not reached a fixed point, so we may end up with a different
# class ID for this custom class on each worker, which could lead to the
# same class definition being exported many many times.
logger.warning(
f"WARNING: Could not produce a deterministic class ID for class {cls}")
return hashlib.sha1(new_class_id).digest()
|
[{'piece_type': 'error message', 'piece_content': '2020-10-05 01:55:09,393\\\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: \\\\u001b[36mray::PPO.train()\\\\u001b[39m (pid=4251, ip=172.30.96.106)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach\\nresult = fn(item)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__\\ntimeout_seconds=self.timeout_seconds)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes\\nmetric_lists = ray.get(collected)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-10-03_02-10-38)\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: ray::PPO.train() (pid=524, ip=172.30.58.198)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 876, in apply_flatten\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 828, in add_wait_hooks\\nitem = next(it)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 471, in base_iterator\\nyield ray.get(futures, timeout=timeout)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'reproducing source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\nfrom ray.rllib.agents.ppo.ppo import UpdateKL, warn_about_bad_reward_scales\\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, _get_shared_metrics\\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \\\\\\nStandardizeFields, SelectExperiences\\nfrom ray.rllib.execution.train_ops import TrainOneStep\\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\\nfrom ray.rllib.policy.policy import Policy\\nfrom ray.rllib.policy.sample_batch import SampleBatch\\nfrom ray.util.iter import from_actors\\n\\n\\ndef custom_ppo_execution_plan(workers, config):\\n"""Copy of PPO\\'s execution plan, except we store all ops in a list and return them."""\\n# Modified from ParallelRollout\\'s bulk_sync mode.\\nworkers.sync_weights()\\ndef report_timesteps(batch):\\nmetrics = _get_shared_metrics()\\nmetrics.counters[STEPS_SAMPLED_COUNTER] += batch.count\\nreturn batch\\nops = [from_actors(workers.remote_workers())]\\nops.append(ops[-1].batch_across_shards())\\nops.append(ops[-1].for_each(lambda batches: SampleBatch.concat_samples(batches)))\\nops.append(ops[-1].for_each(report_timesteps))\\n\\n# Collect batches for the trainable policies.\\nops.append(ops[-1].for_each(\\nSelectExperiences(workers.trainable_policies())))\\n# Concatenate the SampleBatches into one.\\nops.append(ops[-1].combine(\\nConcatBatches(min_batch_size=config["train_batch_size"])))\\n# Standardize advantages.\\nops.append(ops[-1].for_each(StandardizeFields(["advantages"])))\\n\\n# Perform one training step on the combined + standardized batch.\\nops.append(ops[-1].for_each(\\nTrainOneStep(\\nworkers,\\nnum_sgd_iter=config["num_sgd_iter"],\\nsgd_minibatch_size=config["sgd_minibatch_size"])))\\n\\n# Update KL after each round of training.\\nops.append(ops[-1].for_each(lambda t: t[1]).for_each(UpdateKL(workers)))\\n\\n# Warn about bad reward scales and return training metrics.\\nreturn (StandardMetricsReporting(ops[-1], workers, config) \\\\\\n.for_each(lambda result: warn_about_bad_reward_scales(config, result)),\\nops)\\n\\nclass ExecutionPlanWrapper:\\n"""A wrapper for custom_ppo_execution_plan that stores all ops in the object."""\\n\\ndef __init__(self, workers, config):\\nself.execution_plan, self.ops = custom_ppo_execution_plan(workers, config)\\n\\ndef __next__(self):\\nreturn next(self.execution_plan)\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer.with_updates(\\nname="CustomPPO",\\nexecution_plan=ExecutionPlanWrapper)\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'other', 'piece_content': '2020-10-04 00:19:40,710\\\\u0009WARNING worker.py:1072 -- The node with node id f7c78d2999929f603ebdf4d2c4508f949f6dafb0 has been marked dead because the detector has missed too many heartbeats from it.'}]
|
2020-10-05 01:55:09,393\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \\u001b[36mray::PPO.train()\\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _random_string():
id_hash = hashlib.shake_128()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest(ray_constants.ID_SIZE)
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
|
def _random_string():
id_hash = hashlib.sha1()
id_hash.update(uuid.uuid4().bytes)
id_bytes = id_hash.digest()
assert len(id_bytes) == ray_constants.ID_SIZE
return id_bytes
|
[{'piece_type': 'error message', 'piece_content': '2020-10-05 01:55:09,393\\\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: \\\\u001b[36mray::PPO.train()\\\\u001b[39m (pid=4251, ip=172.30.96.106)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach\\nresult = fn(item)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__\\ntimeout_seconds=self.timeout_seconds)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes\\nmetric_lists = ray.get(collected)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-10-03_02-10-38)\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: ray::PPO.train() (pid=524, ip=172.30.58.198)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 876, in apply_flatten\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 828, in add_wait_hooks\\nitem = next(it)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 471, in base_iterator\\nyield ray.get(futures, timeout=timeout)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'reproducing source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\nfrom ray.rllib.agents.ppo.ppo import UpdateKL, warn_about_bad_reward_scales\\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, _get_shared_metrics\\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \\\\\\nStandardizeFields, SelectExperiences\\nfrom ray.rllib.execution.train_ops import TrainOneStep\\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\\nfrom ray.rllib.policy.policy import Policy\\nfrom ray.rllib.policy.sample_batch import SampleBatch\\nfrom ray.util.iter import from_actors\\n\\n\\ndef custom_ppo_execution_plan(workers, config):\\n"""Copy of PPO\\'s execution plan, except we store all ops in a list and return them."""\\n# Modified from ParallelRollout\\'s bulk_sync mode.\\nworkers.sync_weights()\\ndef report_timesteps(batch):\\nmetrics = _get_shared_metrics()\\nmetrics.counters[STEPS_SAMPLED_COUNTER] += batch.count\\nreturn batch\\nops = [from_actors(workers.remote_workers())]\\nops.append(ops[-1].batch_across_shards())\\nops.append(ops[-1].for_each(lambda batches: SampleBatch.concat_samples(batches)))\\nops.append(ops[-1].for_each(report_timesteps))\\n\\n# Collect batches for the trainable policies.\\nops.append(ops[-1].for_each(\\nSelectExperiences(workers.trainable_policies())))\\n# Concatenate the SampleBatches into one.\\nops.append(ops[-1].combine(\\nConcatBatches(min_batch_size=config["train_batch_size"])))\\n# Standardize advantages.\\nops.append(ops[-1].for_each(StandardizeFields(["advantages"])))\\n\\n# Perform one training step on the combined + standardized batch.\\nops.append(ops[-1].for_each(\\nTrainOneStep(\\nworkers,\\nnum_sgd_iter=config["num_sgd_iter"],\\nsgd_minibatch_size=config["sgd_minibatch_size"])))\\n\\n# Update KL after each round of training.\\nops.append(ops[-1].for_each(lambda t: t[1]).for_each(UpdateKL(workers)))\\n\\n# Warn about bad reward scales and return training metrics.\\nreturn (StandardMetricsReporting(ops[-1], workers, config) \\\\\\n.for_each(lambda result: warn_about_bad_reward_scales(config, result)),\\nops)\\n\\nclass ExecutionPlanWrapper:\\n"""A wrapper for custom_ppo_execution_plan that stores all ops in the object."""\\n\\ndef __init__(self, workers, config):\\nself.execution_plan, self.ops = custom_ppo_execution_plan(workers, config)\\n\\ndef __next__(self):\\nreturn next(self.execution_plan)\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer.with_updates(\\nname="CustomPPO",\\nexecution_plan=ExecutionPlanWrapper)\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'other', 'piece_content': '2020-10-04 00:19:40,710\\\\u0009WARNING worker.py:1072 -- The node with node id f7c78d2999929f603ebdf4d2c4508f949f6dafb0 has been marked dead because the detector has missed too many heartbeats from it.'}]
|
2020-10-05 01:55:09,393\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \\u001b[36mray::PPO.train()\\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
|
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.sha1(pickled_function).digest()
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_pickle(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
|
[{'piece_type': 'error message', 'piece_content': '2020-10-05 01:55:09,393\\\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: \\\\u001b[36mray::PPO.train()\\\\u001b[39m (pid=4251, ip=172.30.96.106)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach\\nresult = fn(item)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__\\ntimeout_seconds=self.timeout_seconds)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes\\nmetric_lists = ray.get(collected)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-10-03_02-10-38)\\nTraceback (most recent call last):\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError: ray::PPO.train() (pid=524, ip=172.30.58.198)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train\\nraise e\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train\\nresult = Trainable.train(self)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step\\nres = next(self.train_exec_impl)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__\\nreturn next(self.built_iterator)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 843, in apply_filter\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 876, in apply_flatten\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 828, in add_wait_hooks\\nitem = next(it)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach\\nfor item in it:\\n[Previous line repeated 1 more time]\\nFile "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 471, in base_iterator\\nyield ray.get(futures, timeout=timeout)\\nray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.'}, {'piece_type': 'source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'reproducing source code', 'piece_content': 'import copy\\n\\nimport gym\\nimport numpy as np\\nimport ray\\nimport ray.rllib.agents.ppo as ppo\\nfrom ray.rllib.agents.ppo.ppo import UpdateKL, warn_about_bad_reward_scales\\nfrom ray.rllib.execution.common import STEPS_SAMPLED_COUNTER, _get_shared_metrics\\nfrom ray.rllib.execution.rollout_ops import ParallelRollouts, ConcatBatches, \\\\\\nStandardizeFields, SelectExperiences\\nfrom ray.rllib.execution.train_ops import TrainOneStep\\nfrom ray.rllib.execution.metric_ops import StandardMetricsReporting\\nfrom ray.rllib.policy.policy import Policy\\nfrom ray.rllib.policy.sample_batch import SampleBatch\\nfrom ray.util.iter import from_actors\\n\\n\\ndef custom_ppo_execution_plan(workers, config):\\n"""Copy of PPO\\'s execution plan, except we store all ops in a list and return them."""\\n# Modified from ParallelRollout\\'s bulk_sync mode.\\nworkers.sync_weights()\\ndef report_timesteps(batch):\\nmetrics = _get_shared_metrics()\\nmetrics.counters[STEPS_SAMPLED_COUNTER] += batch.count\\nreturn batch\\nops = [from_actors(workers.remote_workers())]\\nops.append(ops[-1].batch_across_shards())\\nops.append(ops[-1].for_each(lambda batches: SampleBatch.concat_samples(batches)))\\nops.append(ops[-1].for_each(report_timesteps))\\n\\n# Collect batches for the trainable policies.\\nops.append(ops[-1].for_each(\\nSelectExperiences(workers.trainable_policies())))\\n# Concatenate the SampleBatches into one.\\nops.append(ops[-1].combine(\\nConcatBatches(min_batch_size=config["train_batch_size"])))\\n# Standardize advantages.\\nops.append(ops[-1].for_each(StandardizeFields(["advantages"])))\\n\\n# Perform one training step on the combined + standardized batch.\\nops.append(ops[-1].for_each(\\nTrainOneStep(\\nworkers,\\nnum_sgd_iter=config["num_sgd_iter"],\\nsgd_minibatch_size=config["sgd_minibatch_size"])))\\n\\n# Update KL after each round of training.\\nops.append(ops[-1].for_each(lambda t: t[1]).for_each(UpdateKL(workers)))\\n\\n# Warn about bad reward scales and return training metrics.\\nreturn (StandardMetricsReporting(ops[-1], workers, config) \\\\\\n.for_each(lambda result: warn_about_bad_reward_scales(config, result)),\\nops)\\n\\nclass ExecutionPlanWrapper:\\n"""A wrapper for custom_ppo_execution_plan that stores all ops in the object."""\\n\\ndef __init__(self, workers, config):\\nself.execution_plan, self.ops = custom_ppo_execution_plan(workers, config)\\n\\ndef __next__(self):\\nreturn next(self.execution_plan)\\n\\n\\nif __name__ == \\'__main__\\':\\nray.init(address="auto")\\n\\nconfig = copy.deepcopy(ppo.DEFAULT_CONFIG)\\nconfig.update({\\n"rollout_fragment_length": 32,\\n"train_batch_size": 8192,\\n"sgd_minibatch_size": 512,\\n"num_sgd_iter": 1,\\n"num_workers": 256,\\n"num_gpus": 1,\\n"num_sgd_iter": 1,\\n"num_cpus_per_worker": 0.25,\\n"num_cpus_for_driver": 1,\\n"model": {"fcnet_hiddens": [1024, 1024]},\\n"framework": "torch",\\n"lr": ray.tune.sample_from(lambda s: np.random.random()),\\n})\\n\\ntrainer_cls = ppo.PPOTrainer.with_updates(\\nname="CustomPPO",\\nexecution_plan=ExecutionPlanWrapper)\\n\\nconfig["env"] = "QbertNoFrameskip-v4"\\nray.tune.run(trainer_cls,\\nconfig=config,\\nfail_fast=True,\\nreuse_actors=False,\\nqueue_trials=True,\\nnum_samples=100,\\nscheduler=ray.tune.schedulers.ASHAScheduler(\\ntime_attr=\\'training_iteration\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\ngrace_period=100,\\nreduction_factor=3,\\nbrackets=3),\\n)'}, {'piece_type': 'other', 'piece_content': '2020-10-04 00:19:40,710\\\\u0009WARNING worker.py:1072 -- The node with node id f7c78d2999929f603ebdf4d2c4508f949f6dafb0 has been marked dead because the detector has missed too many heartbeats from it.'}]
|
2020-10-05 01:55:09,393\\u0009ERROR trial_runner.py:567 -- Trial PPO_QbertNoFrameskip-v4_b43b9_00027: Error processing event.
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trial_runner.py", line 515, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 1428, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError: \\u001b[36mray::PPO.train()\\u001b[39m (pid=4251, ip=172.30.96.106)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 516, in train
raise e
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer.py", line 505, in train
result = Trainable.train(self)
File "/usr/local/lib/python3.6/dist-packages/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/agents/trainer_template.py", line 134, in step
res = next(self.train_exec_impl)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 756, in __next__
return next(self.built_iterator)
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 783, in apply_foreach
for item in it:
File "/usr/local/lib/python3.6/dist-packages/ray/util/iter.py", line 791, in apply_foreach
result = fn(item)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/execution/metric_ops.py", line 79, in __call__
timeout_seconds=self.timeout_seconds)
File "/usr/local/lib/python3.6/dist-packages/ray/rllib/evaluation/metrics.py", line 75, in collect_episodes
metric_lists = ray.get(collected)
ray.exceptions.RayActorError: The actor died unexpectedly before finishing this task.
|
ray.exceptions.RayTaskError
|
def _home(self):
if self._home_cached is not None:
return self._home_cached
for _ in range(MAX_HOME_RETRIES - 1):
try:
self._home_cached = self._try_to_get_home()
return self._home_cached
except Exception:
# TODO (Dmitri): Identify the exception we're trying to avoid.
logger.info("Error reading container's home directory. "
f"Retrying in {HOME_RETRY_DELAY_S} seconds.")
time.sleep(HOME_RETRY_DELAY_S)
# Last try
self._home_cached = self._try_to_get_home()
return self._home_cached
|
def _home(self):
# TODO (Dmitri): Think about how to use the node's HOME variable
# without making an extra kubectl exec call.
if self._home_cached is None:
cmd = self.kubectl + [
"exec", "-it", self.node_id, "--", "printenv", "HOME"
]
joined_cmd = " ".join(cmd)
raw_out = self.process_runner.check_output(joined_cmd, shell=True)
self._home_cached = raw_out.decode().strip("\\n\\r")
return self._home_cached
|
[{'piece_type': 'error message', 'piece_content': 'Updating cluster configuration. [hash=0194a452ebd82e0ab6eade7b4dd3a4f4f775d5de]\\nNew status: syncing-files\\n[2/7] Processing file mounts\\n2020-12-15 09:03:24,116\\tINFO command_runner.py:169 -- NodeUpdater: ray-head-m5nvd: Running kubectl -n ray exec -it ray-head-m5nvd -- bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (mkdir -p ~)\\'\\nError from server: error dialing backend: EOF\\nNew status: update-failed\\n!!!\\nSetup command `kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME` failed with exit code 1. stderr:\\n!!!\\n\\nException in thread Thread-1:\\nTraceback (most recent call last):\\nFile "/Users/rliaw/miniconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner\\nself.run()\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 124, in run\\nself.do_update()\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 312, in do_update\\nself.rsync_up, step_numbers=(1, NUM_SETUP_STEPS))\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 210, in sync_file_mounts\\ndo_sync(remote_path, local_path)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 198, in do_sync\\nlocal_path, remote_path, docker_mount_if_possible=True)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 446, in rsync_up\\nself.cmd_runner.run_rsync_up(source, target, options=options)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 197, in run_rsync_up\\ntarget = self._home + target[1:]\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 258, in _home\\nraw_out = self.process_runner.check_output(joined_cmd, shell=True)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 395, in check_output\\n**kwargs).stdout\\nFile "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 487, in run\\noutput=stdout, stderr=stderr)\\nsubprocess.CalledProcessError: Command \\'kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME\\' returned non-zero exit status 1.\\n\\nFailed to setup head node.'}]
|
Updating cluster configuration. [hash=0194a452ebd82e0ab6eade7b4dd3a4f4f775d5de]
New status: syncing-files
[2/7] Processing file mounts
2020-12-15 09:03:24,116 INFO command_runner.py:169 -- NodeUpdater: ray-head-m5nvd: Running kubectl -n ray exec -it ray-head-m5nvd -- bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (mkdir -p ~)'
Error from server: error dialing backend: EOF
New status: update-failed
!!!
Setup command `kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME` failed with exit code 1. stderr:
!!!
Exception in thread Thread-1:
Traceback (most recent call last):
File "/Users/rliaw/miniconda3/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 124, in run
self.do_update()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 312, in do_update
self.rsync_up, step_numbers=(1, NUM_SETUP_STEPS))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 210, in sync_file_mounts
do_sync(remote_path, local_path)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 198, in do_sync
local_path, remote_path, docker_mount_if_possible=True)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/updater.py", line 446, in rsync_up
self.cmd_runner.run_rsync_up(source, target, options=options)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 197, in run_rsync_up
target = self._home + target[1:]
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/command_runner.py", line 258, in _home
raw_out = self.process_runner.check_output(joined_cmd, shell=True)
File "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 395, in check_output
**kwargs).stdout
File "/Users/rliaw/miniconda3/lib/python3.7/subprocess.py", line 487, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command 'kubectl -n ray exec -it ray-head-m5nvd -- printenv HOME' returned non-zero exit status 1.
Failed to setup head node.
|
subprocess.CalledProcessError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "debugging.py", line 2, in <module>\\nray.init()\\nFile "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init\\nray_params=ray_params)\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__\\nself.start_head_processes()\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes\\nself.start_redis()\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis\\nfate_share=self.kernel_fate_share)\\nFile "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis\\nfate_share=fate_share)\\nFile "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance\\nulimit_n - redis_client_buffer)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set\\nreturn self.execute_command(\\'CONFIG SET\\', name, value)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command\\nreturn self.parse_response(conn, command_name, **options)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response\\nresponse = connection.read_response()\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response\\nraise response\\nredis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33'}]
|
Traceback (most recent call last):
File "debugging.py", line 2, in <module>
ray.init()
File "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init
ray_params=ray_params)
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__
self.start_head_processes()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes
self.start_redis()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis
fate_share=self.kernel_fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis
fate_share=fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance
ulimit_n - redis_client_buffer)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set
return self.execute_command('CONFIG SET', name, value)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command
return self.parse_response(conn, command_name, **options)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response
response = connection.read_response()
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response
raise response
redis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33
|
redis.exceptions.ResponseError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_java_worker_options: Overwrite the options to start Java workers.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
java_worker_options=_java_worker_options,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "debugging.py", line 2, in <module>\\nray.init()\\nFile "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init\\nray_params=ray_params)\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__\\nself.start_head_processes()\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes\\nself.start_redis()\\nFile "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis\\nfate_share=self.kernel_fate_share)\\nFile "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis\\nfate_share=fate_share)\\nFile "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance\\nulimit_n - redis_client_buffer)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set\\nreturn self.execute_command(\\'CONFIG SET\\', name, value)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command\\nreturn self.parse_response(conn, command_name, **options)\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response\\nresponse = connection.read_response()\\nFile "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response\\nraise response\\nredis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33'}]
|
Traceback (most recent call last):
File "debugging.py", line 2, in <module>
ray.init()
File "/Users/haochen/code/ant_ray/python/ray/worker.py", line 740, in init
ray_params=ray_params)
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 200, in __init__
self.start_head_processes()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 801, in start_head_processes
self.start_redis()
File "/Users/haochen/code/ant_ray/python/ray/node.py", line 580, in start_redis
fate_share=self.kernel_fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 720, in start_redis
fate_share=fate_share)
File "/Users/haochen/code/ant_ray/python/ray/_private/services.py", line 902, in _start_redis_instance
ulimit_n - redis_client_buffer)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 1243, in config_set
return self.execute_command('CONFIG SET', name, value)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 901, in execute_command
return self.parse_response(conn, command_name, **options)
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/client.py", line 915, in parse_response
response = connection.read_response()
File "/Users/haochen/.pyenv/versions/3.7.6/lib/python3.7/site-packages/redis/connection.py", line 747, in read_response
raise response
redis.exceptions.ResponseError: The operating system is not able to handle the specified number of clients, try with -33
|
redis.exceptions.ResponseError
|
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if (not self._shutdown) and ray.is_initialized():
ray.get(self._controller.shutdown.remote())
ray.kill(self._controller, no_restart=True)
# Wait for the named actor entry gets removed as well.
started = time.time()
while True:
try:
ray.get_actor(self._controller_name)
if time.time() - started > 5:
logger.warning(
"Waited 5s for Serve to shutdown gracefully but "
"the controller is still not cleaned up. "
"You can ignore this warning if you are shutting "
"down the Ray cluster.")
break
except ValueError: # actor name is removed
break
self._shutdown = True
|
def shutdown(self) -> None:
"""Completely shut down the connected Serve instance.
Shuts down all processes and deletes all state associated with the
instance.
"""
if not self._shutdown:
ray.get(self._controller.shutdown.remote())
ray.kill(self._controller, no_restart=True)
self._shutdown = True
|
[{'piece_type': 'source code', 'piece_content': 'from ray import serve\\n\\nclient = serve.start(detached=True)\\nclient.shutdown()\\nclient = serve.start(detached=True)'}, {'piece_type': 'error message', 'piece_content': 'File descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with \\'ulimit -n 8192\\'\\n2020-11-20 10:38:27,065\\tINFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265\\n(pid=raylet) 2020-11-20 10:38:29,628\\tINFO controller.py:313 -- Starting router with name \\'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0\\' on node \\'node:192.168.31.141-0\\' listening on \\'127.0.0.1:8000\\'\\n(pid=32847) INFO: Started server process [32847]\\nTraceback (most recent call last):\\nFile "detached.py", line 5, in <module>\\nclient = serve.start(detached=True)\\nFile "/Users/simonmo/Desktop/ray/ray/python/ray/serve/api.py", line 414, in start\\nraise RayServeException("Called serve.start(detached=True) but a "\\nray.serve.exceptions.RayServeException: Called serve.start(detached=True) but a detached instance is already running. Please use serve.connect() to connect to the running instance instead.'}]
|
File descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with 'ulimit -n 8192'
2020-11-20 10:38:27,065 INFO services.py:1173 -- View the Ray dashboard at http://127.0.0.1:8265
(pid=raylet) 2020-11-20 10:38:29,628 INFO controller.py:313 -- Starting router with name 'SERVE_CONTROLLER_ACTOR:SERVE_PROXY_ACTOR-node:192.168.31.141-0' on node 'node:192.168.31.141-0' listening on '127.0.0.1:8000'
(pid=32847) INFO: Started server process [32847]
Traceback (most recent call last):
File "detached.py", line 5, in <module>
client = serve.start(detached=True)
File "/Users/simonmo/Desktop/ray/ray/python/ray/serve/api.py", line 414, in start
raise RayServeException("Called serve.start(detached=True) but a "
ray.serve.exceptions.RayServeException: Called serve.start(detached=True) but a detached instance is already running. Please use serve.connect() to connect to the running instance instead.
|
ray.serve.exceptions.RayServeException
|
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[[
Policy, ModelV2, Type[TFActionDistribution], SampleBatch
], TensorType],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
str, TensorType]]] = None,
grad_stats_fn: Optional[Callable[[
Policy, SampleBatch, ModelGradients
], Dict[str, TensorType]]] = None,
before_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
make_model: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], ModelV2]] = None,
action_sampler_fn: Optional[Callable[[
TensorType, List[TensorType]
], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, type, List[TensorType]]]] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy],
int]] = None,
obs_include_prev_action_reward: bool = True):
"""Initialize a dynamic TF policy.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
obs_include_prev_action_reward (bool): Whether to include the
previous action and reward in the model input (default: True).
"""
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
dist_class = dist_inputs = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Setup self.model.
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf")
# Auto-update model's inference view requirements, if recurrent.
self._update_model_inference_view_requirements_from_init_state()
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_inputs:
self._seq_lens = existing_inputs["seq_lens"]
else:
if self.config["_use_trajectory_view_api"]:
self._state_inputs = [
tf1.placeholder(
shape=(None, ) + vr.space.shape, dtype=vr.space.dtype)
for k, vr in
self.model.inference_view_requirements.items()
if k[:9] == "state_in_"
]
else:
self._state_inputs = [
tf1.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.inference_view_requirements)
# Setup standard placeholders.
if existing_inputs is not None:
timestep = existing_inputs["timestep"]
explore = existing_inputs["is_exploring"]
self._input_dict, self._dummy_batch = \\
self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = ModelCatalog.get_action_placeholder(
action_space, "prev_action")
if self.config["_use_trajectory_view_api"]:
self._input_dict, self._dummy_batch = \\
self._get_input_dict_and_dummy_batch(
self.view_requirements,
{SampleBatch.ACTIONS: action_ph,
SampleBatch.PREV_ACTIONS: prev_action_ph})
else:
self._input_dict = {
SampleBatch.CUR_OBS: tf1.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
}
self._input_dict[SampleBatch.ACTIONS] = action_ph
if self._obs_include_prev_action_reward:
self._input_dict.update({
SampleBatch.PREV_ACTIONS: prev_action_ph,
SampleBatch.PREV_REWARDS: tf1.placeholder(
tf.float32, [None], name="prev_reward"),
})
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep")
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(
True, (), name="is_exploring")
# Placeholder for RNN time-chunk valid lengths.
self._seq_lens = tf1.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Placeholder for `is_training` flag.
self._input_dict["is_training"] = self._get_is_training_placeholder()
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
else:
# Distribution generation is customized, e.g., DQN, DDPG.
if action_distribution_fn:
dist_inputs, dist_class, self._state_out = \\
action_distribution_fn(
self, self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs, self._seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = \\
self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session()
batch_divisibility_req = get_batch_divisibility_req(self) if \\
callable(get_batch_divisibility_req) else \\
(get_batch_divisibility_req or 1)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not existing_inputs:
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True)
|
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[[
Policy, ModelV2, Type[TFActionDistribution], SampleBatch
], TensorType],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
str, TensorType]]] = None,
grad_stats_fn: Optional[Callable[[
Policy, SampleBatch, ModelGradients
], Dict[str, TensorType]]] = None,
before_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
make_model: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], ModelV2]] = None,
action_sampler_fn: Optional[Callable[[
TensorType, List[TensorType]
], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, type, List[TensorType]]]] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy],
int]] = None,
obs_include_prev_action_reward: bool = True):
"""Initialize a dynamic TF policy.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
obs_include_prev_action_reward (bool): Whether to include the
previous action and reward in the model input (default: True).
"""
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._obs_include_prev_action_reward = obs_include_prev_action_reward
dist_class = dist_inputs = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Setup self.model.
if existing_model:
self.model = existing_model
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf")
# Auto-update model's inference view requirements, if recurrent.
self._update_model_inference_view_requirements_from_init_state()
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
if self._state_inputs:
self._seq_lens = existing_inputs["seq_lens"]
else:
if self.config["_use_trajectory_view_api"]:
self._state_inputs = [
tf1.placeholder(
shape=(None, ) + vr.space.shape, dtype=vr.space.dtype)
for k, vr in
self.model.inference_view_requirements.items()
if k[:9] == "state_in_"
]
else:
self._state_inputs = [
tf1.placeholder(shape=(None, ) + s.shape, dtype=s.dtype)
for s in self.model.get_initial_state()
]
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.inference_view_requirements)
# Setup standard placeholders.
if existing_inputs is not None:
timestep = existing_inputs["timestep"]
explore = existing_inputs["is_exploring"]
self._input_dict, self._dummy_batch = \\
self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = ModelCatalog.get_action_placeholder(
action_space, "prev_action")
if self.config["_use_trajectory_view_api"]:
self._input_dict, self._dummy_batch = \\
self._get_input_dict_and_dummy_batch(
self.view_requirements,
{SampleBatch.ACTIONS: action_ph,
SampleBatch.PREV_ACTIONS: prev_action_ph})
else:
self._input_dict = {
SampleBatch.CUR_OBS: tf1.placeholder(
tf.float32,
shape=[None] + list(obs_space.shape),
name="observation")
}
self._input_dict[SampleBatch.ACTIONS] = action_ph
if self._obs_include_prev_action_reward:
self._input_dict.update({
SampleBatch.PREV_ACTIONS: prev_action_ph,
SampleBatch.PREV_REWARDS: tf1.placeholder(
tf.float32, [None], name="prev_reward"),
})
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder(tf.int64, (), name="timestep")
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(
True, (), name="is_exploring")
# Placeholder for RNN time-chunk valid lengths.
self._seq_lens = tf1.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Placeholder for `is_training` flag.
self._input_dict["is_training"] = self._get_is_training_placeholder()
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
else:
# Distribution generation is customized, e.g., DQN, DDPG.
if action_distribution_fn:
dist_inputs, dist_class, self._state_out = \\
action_distribution_fn(
self, self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict["is_training"])
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs, self._seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = \\
self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session()
batch_divisibility_req = get_batch_divisibility_req(self) if \\
callable(get_batch_divisibility_req) else \\
(get_batch_divisibility_req or 1)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not existing_inputs:
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True)
|
[{'piece_type': 'other', 'piece_content': 'from gym.spaces import Discrete\\n\\nimport ray\\nfrom ray.rllib.examples.env.rock_paper_scissors import RockPaperScissors\\nfrom ray.rllib.agents import ppo\\n\\n\\nselect_policy = lambda agent_id: "policy_01" if agent_id == "player1" else "policy_02"\\n\\nconfig = {\\n"multiagent": {\\n"policies": {\\n"policy_01": (None, Discrete(3), Discrete(3), {}),\\n"policy_02": (None, Discrete(3), Discrete(3), {}),\\n},\\n"policy_mapping_fn": select_policy,\\n},\\n}\\n\\nray.init()\\ntrainer = ppo.PPOTrainer(env=RockPaperScissors, config=config)\\ntrainer.train() # Train one step\\ntrainer.export_policy_model("exported_model", "policy_01")'}, {'piece_type': 'other', 'piece_content': 'import tensorflow as tf\\ntf.saved_model.load("exported_model")'}, {'piece_type': 'error message', 'piece_content': 'WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\\nInstructions for updating:\\nThis function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nTraceback (most recent call last):\\nFile "minimal.py", line 27, in <module>\\ntf.saved_model.load("exported_model")\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load\\nreturn load_internal(export_dir, tags, options)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal\\nroot = load_v1_in_v2.load(export_dir, tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load\\nreturn loader.load(tags=tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load\\nsignature_functions = self._extract_signatures(wrapped, meta_graph_def)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures\\nsignature_fn = wrapped.prune(feeds=feeds, fetches=fetches)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune\\nbase_graph=self._func_graph)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph\\nadd_sources=add_sources))\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph\\n% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))\\ntensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature\\'s outputs use. An output for signature \\'serving_default\\' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).\\n\\nUnable to lift tensor <tf.Tensor \\'policy_01/cond_2/Merge:0\\' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation \\'policy_01/timestep\\' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)'}]
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]],
model: ModelV2 = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None):
"""Initializes a Policy object.
Args:
observation_space (gym.spaces.Space): Observation space of the env.
action_space (gym.spaces.Space): Action space of the env.
config (TrainerConfigDict): The Policy config dict.
sess (tf1.Session): The TensorFlow session to use.
obs_input (TensorType): Input placeholder for observations, of
shape [BATCH_SIZE, obs...].
sampled_action (TensorType): Tensor for sampling an action, of
shape [BATCH_SIZE, action...]
loss (TensorType): Scalar policy loss output tensor.
loss_inputs (List[Tuple[str, TensorType]]): A (name, placeholder)
tuple for each loss input argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model (ModelV2): used to integrate custom losses and
stats from user-defined RLlib models.
sampled_action_logp (Optional[TensorType]): log probability of the
sampled action.
action_input (Optional[TensorType]): Input placeholder for actions
for logp/log-likelihood calculations.
log_likelihood (Optional[TensorType]): Tensor to calculate the
log_likelihood (given action_input and obs_input).
dist_class (Optional[type]): An optional ActionDistribution class
to use for generating a dist object from distribution inputs.
dist_inputs (Optional[TensorType]): Tensor to calculate the
distribution inputs/parameters.
state_inputs (Optional[List[TensorType]]): List of RNN state input
Tensors.
state_outputs (Optional[List[TensorType]]): List of RNN state
output Tensors.
prev_action_input (Optional[TensorType]): placeholder for previous
actions.
prev_reward_input (Optional[TensorType]): placeholder for previous
rewards.
seq_lens (Optional[TensorType]): Placeholder for RNN sequence
lengths, of shape [NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): Max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (List[TensorType]): override the batchnorm update ops to
run when applying gradients. Otherwise we run all update ops
found in the current variable scope.
explore (Optional[TensorType]): Placeholder for `explore` parameter
into call to Exploration.get_exploration_action.
timestep (Optional[TensorType]): Placeholder for the global
sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
assert model is None or isinstance(model, ModelV2), \\
"Model classes for TFPolicy other than `ModelV2` not allowed! " \\
"You passed in {}.".format(model)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_inference_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = explore if explore is not None else \\
tf1.placeholder_with_default(True, (), name="is_exploring")
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: "
"{} vs {}".format(self._state_inputs, self._state_outputs))
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(),
self._state_inputs))
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined")
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = timestep if timestep is not None else \\
tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep")
self._optimizer = None
self._grads_and_vars = None
self._grads = None
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer's tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables = None
# The loss tf-op.
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
if loss is not None:
self._initialize_loss(loss, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if self._log_likelihood is None and self._dist_inputs is not None and \\
self.dist_class is not None:
self._log_likelihood = self.dist_class(
self._dist_inputs, self.model).logp(self._action_input)
|
def __init__(self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
sess: "tf1.Session",
obs_input: TensorType,
sampled_action: TensorType,
loss: TensorType,
loss_inputs: List[Tuple[str, TensorType]],
model: ModelV2 = None,
sampled_action_logp: Optional[TensorType] = None,
action_input: Optional[TensorType] = None,
log_likelihood: Optional[TensorType] = None,
dist_inputs: Optional[TensorType] = None,
dist_class: Optional[type] = None,
state_inputs: Optional[List[TensorType]] = None,
state_outputs: Optional[List[TensorType]] = None,
prev_action_input: Optional[TensorType] = None,
prev_reward_input: Optional[TensorType] = None,
seq_lens: Optional[TensorType] = None,
max_seq_len: int = 20,
batch_divisibility_req: int = 1,
update_ops: List[TensorType] = None,
explore: Optional[TensorType] = None,
timestep: Optional[TensorType] = None):
"""Initializes a Policy object.
Args:
observation_space (gym.spaces.Space): Observation space of the env.
action_space (gym.spaces.Space): Action space of the env.
config (TrainerConfigDict): The Policy config dict.
sess (tf1.Session): The TensorFlow session to use.
obs_input (TensorType): Input placeholder for observations, of
shape [BATCH_SIZE, obs...].
sampled_action (TensorType): Tensor for sampling an action, of
shape [BATCH_SIZE, action...]
loss (TensorType): Scalar policy loss output tensor.
loss_inputs (List[Tuple[str, TensorType]]): A (name, placeholder)
tuple for each loss input argument. Each placeholder name must
correspond to a SampleBatch column key returned by
postprocess_trajectory(), and has shape [BATCH_SIZE, data...].
These keys will be read from postprocessed sample batches and
fed into the specified placeholders during loss computation.
model (ModelV2): used to integrate custom losses and
stats from user-defined RLlib models.
sampled_action_logp (Optional[TensorType]): log probability of the
sampled action.
action_input (Optional[TensorType]): Input placeholder for actions
for logp/log-likelihood calculations.
log_likelihood (Optional[TensorType]): Tensor to calculate the
log_likelihood (given action_input and obs_input).
dist_class (Optional[type]): An optional ActionDistribution class
to use for generating a dist object from distribution inputs.
dist_inputs (Optional[TensorType]): Tensor to calculate the
distribution inputs/parameters.
state_inputs (Optional[List[TensorType]]): List of RNN state input
Tensors.
state_outputs (Optional[List[TensorType]]): List of RNN state
output Tensors.
prev_action_input (Optional[TensorType]): placeholder for previous
actions.
prev_reward_input (Optional[TensorType]): placeholder for previous
rewards.
seq_lens (Optional[TensorType]): Placeholder for RNN sequence
lengths, of shape [NUM_SEQUENCES].
Note that NUM_SEQUENCES << BATCH_SIZE. See
policy/rnn_sequencing.py for more information.
max_seq_len (int): Max sequence length for LSTM training.
batch_divisibility_req (int): pad all agent experiences batches to
multiples of this value. This only has an effect if not using
a LSTM model.
update_ops (List[TensorType]): override the batchnorm update ops to
run when applying gradients. Otherwise we run all update ops
found in the current variable scope.
explore (Optional[TensorType]): Placeholder for `explore` parameter
into call to Exploration.get_exploration_action.
timestep (Optional[TensorType]): Placeholder for the global
sampling timestep.
"""
self.framework = "tf"
super().__init__(observation_space, action_space, config)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
assert model is None or isinstance(model, ModelV2), \\
"Model classes for TFPolicy other than `ModelV2` not allowed! " \\
"You passed in {}.".format(model)
self.model = model
# Auto-update model's inference view requirements, if recurrent.
if self.model is not None:
self._update_model_inference_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._sess = sess
self._obs_input = obs_input
self._prev_action_input = prev_action_input
self._prev_reward_input = prev_reward_input
self._sampled_action = sampled_action
self._is_training = self._get_is_training_placeholder()
self._is_exploring = explore if explore is not None else \\
tf1.placeholder_with_default(True, (), name="is_exploring")
self._sampled_action_logp = sampled_action_logp
self._sampled_action_prob = (tf.math.exp(self._sampled_action_logp)
if self._sampled_action_logp is not None
else None)
self._action_input = action_input # For logp calculations.
self._dist_inputs = dist_inputs
self.dist_class = dist_class
self._state_inputs = state_inputs or []
self._state_outputs = state_outputs or []
self._seq_lens = seq_lens
self._max_seq_len = max_seq_len
if len(self._state_inputs) != len(self._state_outputs):
raise ValueError(
"Number of state input and output tensors must match, got: "
"{} vs {}".format(self._state_inputs, self._state_outputs))
if len(self.get_initial_state()) != len(self._state_inputs):
raise ValueError(
"Length of initial state must match number of state inputs, "
"got: {} vs {}".format(self.get_initial_state(),
self._state_inputs))
if self._state_inputs and self._seq_lens is None:
raise ValueError(
"seq_lens tensor must be given if state inputs are defined")
self._batch_divisibility_req = batch_divisibility_req
self._update_ops = update_ops
self._apply_op = None
self._stats_fetches = {}
self._timestep = timestep if timestep is not None else \\
tf1.placeholder(tf.int64, (), name="timestep")
self._optimizer = None
self._grads_and_vars = None
self._grads = None
# Policy tf-variables (weights), whose values to get/set via
# get_weights/set_weights.
self._variables = None
# Local optimizer's tf-variables (e.g. state vars for Adam).
# Will be stored alongside `self._variables` when checkpointing.
self._optimizer_variables = None
# The loss tf-op.
self._loss = None
# A batch dict passed into loss function as input.
self._loss_input_dict = {}
if loss is not None:
self._initialize_loss(loss, loss_inputs)
# The log-likelihood calculator op.
self._log_likelihood = log_likelihood
if self._log_likelihood is None and self._dist_inputs is not None and \\
self.dist_class is not None:
self._log_likelihood = self.dist_class(
self._dist_inputs, self.model).logp(self._action_input)
|
[{'piece_type': 'other', 'piece_content': 'from gym.spaces import Discrete\\n\\nimport ray\\nfrom ray.rllib.examples.env.rock_paper_scissors import RockPaperScissors\\nfrom ray.rllib.agents import ppo\\n\\n\\nselect_policy = lambda agent_id: "policy_01" if agent_id == "player1" else "policy_02"\\n\\nconfig = {\\n"multiagent": {\\n"policies": {\\n"policy_01": (None, Discrete(3), Discrete(3), {}),\\n"policy_02": (None, Discrete(3), Discrete(3), {}),\\n},\\n"policy_mapping_fn": select_policy,\\n},\\n}\\n\\nray.init()\\ntrainer = ppo.PPOTrainer(env=RockPaperScissors, config=config)\\ntrainer.train() # Train one step\\ntrainer.export_policy_model("exported_model", "policy_01")'}, {'piece_type': 'other', 'piece_content': 'import tensorflow as tf\\ntf.saved_model.load("exported_model")'}, {'piece_type': 'error message', 'piece_content': 'WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\\nInstructions for updating:\\nThis function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nTraceback (most recent call last):\\nFile "minimal.py", line 27, in <module>\\ntf.saved_model.load("exported_model")\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load\\nreturn load_internal(export_dir, tags, options)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal\\nroot = load_v1_in_v2.load(export_dir, tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load\\nreturn loader.load(tags=tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load\\nsignature_functions = self._extract_signatures(wrapped, meta_graph_def)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures\\nsignature_fn = wrapped.prune(feeds=feeds, fetches=fetches)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune\\nbase_graph=self._func_graph)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph\\nadd_sources=add_sources))\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph\\n% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))\\ntensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature\\'s outputs use. An output for signature \\'serving_default\\' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).\\n\\nUnable to lift tensor <tf.Tensor \\'policy_01/cond_2/Merge:0\\' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation \\'policy_01/timestep\\' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)'}]
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
Tuple:
- tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
- tf module (resulting from `import tensorflow`).
Either tf1.x or 2.x.
- The actually installed tf version as int: 1 or 2.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None, None, None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
was_imported = False
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
was_imported = True
else:
try:
import tensorflow as tf_module
except ImportError as e:
if error:
raise e
return None, None, None
# Try "reducing" tf to tf.compat.v1.
try:
tf1_module = tf_module.compat.v1
if not was_imported:
tf1_module.disable_v2_behavior()
tf1_module.enable_resource_variables()
# No compat.v1 -> return tf as is.
except AttributeError:
tf1_module = tf_module
if not hasattr(tf_module, "__version__"):
version = 1 # sphinx doc gen
else:
version = 2 if "2." in tf_module.__version__[:2] else 1
return tf1_module, tf_module, version
|
def try_import_tf(error=False):
"""Tries importing tf and returns the module (or None).
Args:
error (bool): Whether to raise an error if tf cannot be imported.
Returns:
Tuple:
- tf1.x module (either from tf2.x.compat.v1 OR as tf1.x).
- tf module (resulting from `import tensorflow`).
Either tf1.x or 2.x.
- The actually installed tf version as int: 1 or 2.
Raises:
ImportError: If error=True and tf is not installed.
"""
# Make sure, these are reset after each test case
# that uses them: del os.environ["RLLIB_TEST_NO_TF_IMPORT"]
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
return None, None, None
if "TF_CPP_MIN_LOG_LEVEL" not in os.environ:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Try to reuse already imported tf module. This will avoid going through
# the initial import steps below and thereby switching off v2_behavior
# (switching off v2 behavior twice breaks all-framework tests for eager).
was_imported = False
if "tensorflow" in sys.modules:
tf_module = sys.modules["tensorflow"]
was_imported = True
else:
try:
import tensorflow as tf_module
except ImportError as e:
if error:
raise e
return None, None, None
# Try "reducing" tf to tf.compat.v1.
try:
tf1_module = tf_module.compat.v1
if not was_imported:
tf1_module.disable_v2_behavior()
# No compat.v1 -> return tf as is.
except AttributeError:
tf1_module = tf_module
if not hasattr(tf_module, "__version__"):
version = 1 # sphinx doc gen
else:
version = 2 if "2." in tf_module.__version__[:2] else 1
return tf1_module, tf_module, version
|
[{'piece_type': 'other', 'piece_content': 'from gym.spaces import Discrete\\n\\nimport ray\\nfrom ray.rllib.examples.env.rock_paper_scissors import RockPaperScissors\\nfrom ray.rllib.agents import ppo\\n\\n\\nselect_policy = lambda agent_id: "policy_01" if agent_id == "player1" else "policy_02"\\n\\nconfig = {\\n"multiagent": {\\n"policies": {\\n"policy_01": (None, Discrete(3), Discrete(3), {}),\\n"policy_02": (None, Discrete(3), Discrete(3), {}),\\n},\\n"policy_mapping_fn": select_policy,\\n},\\n}\\n\\nray.init()\\ntrainer = ppo.PPOTrainer(env=RockPaperScissors, config=config)\\ntrainer.train() # Train one step\\ntrainer.export_policy_model("exported_model", "policy_01")'}, {'piece_type': 'other', 'piece_content': 'import tensorflow as tf\\ntf.saved_model.load("exported_model")'}, {'piece_type': 'error message', 'piece_content': 'WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\\nInstructions for updating:\\nThis function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/timestep_1:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/kl_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/entropy_coeff:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/lr:0\\' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nWARNING:tensorflow:Unable to create a python object for variable <tf.Variable \\'policy_01/global_step:0\\' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().\\nTraceback (most recent call last):\\nFile "minimal.py", line 27, in <module>\\ntf.saved_model.load("exported_model")\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load\\nreturn load_internal(export_dir, tags, options)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal\\nroot = load_v1_in_v2.load(export_dir, tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load\\nreturn loader.load(tags=tags)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load\\nsignature_functions = self._extract_signatures(wrapped, meta_graph_def)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures\\nsignature_fn = wrapped.prune(feeds=feeds, fetches=fetches)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune\\nbase_graph=self._func_graph)\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph\\nadd_sources=add_sources))\\nFile "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph\\n% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))\\ntensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature\\'s outputs use. An output for signature \\'serving_default\\' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).\\n\\nUnable to lift tensor <tf.Tensor \\'policy_01/cond_2/Merge:0\\' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation \\'policy_01/timestep\\' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)'}]
|
WARNING:tensorflow:From /Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/ray/rllib/policy/tf_policy.py:653: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.
Instructions for updating:
This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Some variables could not be lifted out of a loaded function. Run the tf.initializers.tables_initializer() operation to restore these variables.
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/timestep_1:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/kl_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/entropy_coeff:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/lr:0' shape=() dtype=float32_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
WARNING:tensorflow:Unable to create a python object for variable <tf.Variable 'policy_01/global_step:0' shape=() dtype=int64_ref> because it is a reference variable. It may not be visible to training APIs. If this is a problem, consider rebuilding the SavedModel after running tf.compat.v1.enable_resource_variables().
Traceback (most recent call last):
File "minimal.py", line 27, in <module>
tf.saved_model.load("exported_model")
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 603, in load
return load_internal(export_dir, tags, options)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load.py", line 649, in load_internal
root = load_v1_in_v2.load(export_dir, tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 263, in load
return loader.load(tags=tags)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 246, in load
signature_functions = self._extract_signatures(wrapped, meta_graph_def)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/saved_model/load_v1_in_v2.py", line 158, in _extract_signatures
signature_fn = wrapped.prune(feeds=feeds, fetches=fetches)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/wrap_function.py", line 338, in prune
base_graph=self._func_graph)
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/eager/lift_to_graph.py", line 260, in lift_to_graph
add_sources=add_sources))
File "/Users/ivallesp/projects/rockpaperscisors/.venv/lib/python3.7/site-packages/tensorflow/python/ops/op_selector.py", line 413, in map_subgraph
% (repr(init_tensor), repr(op), _path_from(op, init_tensor, sources)))
tensorflow.python.ops.op_selector.UnliftableError: A SavedModel signature needs an input for each placeholder the signature's outputs use. An output for signature 'serving_default' depends on a placeholder which is not an input (i.e. the placeholder is not fed a value).
Unable to lift tensor <tf.Tensor 'policy_01/cond_2/Merge:0' shape=(?,) dtype=float32> because it depends transitively on placeholder <tf.Operation 'policy_01/timestep' type=Placeholder> via at least one path, e.g.: policy_01/cond_2/Merge (Merge) <- policy_01/cond_2/Switch_1 (Switch) <- policy_01/cond_2/pred_id (Identity) <- policy_01/LogicalAnd (LogicalAnd) <- policy_01/GreaterEqual (GreaterEqual) <- policy_01/timestep (Placeholder)
|
tensorflow.python.ops.op_selector.UnliftableError
|
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[Callable[[TrainerConfigDict], Optional[Type[
Policy]]]] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[Callable[[
WorkerSet, TrainerConfigDict
], Iterable[ResultDict]]] = default_execution_plan) -> Type[Trainer]:
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy
2. Worker setup: before_init, execution_plan
3. Post setup: after_init
Args:
name (str): name of the trainer (e.g., "PPO")
default_config (Optional[TrainerConfigDict]): The default config dict
of the algorithm, otherwise uses the Trainer default config.
validate_config (Optional[Callable[[TrainerConfigDict], None]]):
Optional callable that takes the config to check for correctness.
It may mutate the config as needed.
default_policy (Optional[Type[Policy]]): The default Policy class to
use if `get_policy_class` returns None.
get_policy_class (Optional[Callable[
TrainerConfigDict, Optional[Type[Policy]]]]): Optional callable
that takes a config and returns the policy class or None. If None
is returned, will use `default_policy` (which must be provided
then).
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
before_init (Optional[Callable[[Trainer], None]]): Optional callable to
run before anything is constructed inside Trainer (Workers with
Policies, execution plan, etc..). Takes the Trainer instance as
argument.
after_init (Optional[Callable[[Trainer], None]]): Optional callable to
run at the end of trainer init (after all Workers and the exec.
plan have been constructed). Takes the Trainer instance as
argument.
before_evaluate_fn (Optional[Callable[[Trainer], None]]): Callback to
run before evaluation. This takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan (Optional[Callable[[WorkerSet, TrainerConfigDict],
Iterable[ResultDict]]]): Optional callable that sets up the
distributed execution workflow.
Returns:
Type[Trainer]: A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvConfigDict], EnvType]):
# Validate config via custom validation function.
if validate_config:
validate_config(config)
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class.
if not config["multiagent"]["policies"]:
assert default_policy is not None
self._policy_class = default_policy
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = self._make_workers(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
config=config,
num_workers=self.config["num_workers"])
self.execution_plan = execution_plan
self.train_exec_impl = execution_plan(self.workers, config)
if after_init:
after_init(self)
@override(Trainer)
def step(self):
res = next(self.train_exec_impl)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@override(Trainer)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["train_exec_impl"] = (
self.train_exec_impl.shared_metrics.get().save())
return state
@override(Trainer)
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.train_exec_impl.shared_metrics.get().restore(
state["train_exec_impl"])
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> MyClass = SomeOtherClass.with_updates({"name": "Mine"})
>>> issubclass(MyClass, SomeOtherClass)
... False
>>> issubclass(MyClass, Trainer)
... True
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
|
def build_trainer(
name: str,
*,
default_config: Optional[TrainerConfigDict] = None,
validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,
default_policy: Optional[Type[Policy]] = None,
get_policy_class: Optional[Callable[[TrainerConfigDict], Optional[Type[
Policy]]]] = None,
validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,
before_init: Optional[Callable[[Trainer], None]] = None,
after_init: Optional[Callable[[Trainer], None]] = None,
before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,
mixins: Optional[List[type]] = None,
execution_plan: Optional[Callable[[
WorkerSet, TrainerConfigDict
], Iterable[ResultDict]]] = default_execution_plan) -> Type[Trainer]:
"""Helper function for defining a custom trainer.
Functions will be run in this order to initialize the trainer:
1. Config setup: validate_config, get_policy
2. Worker setup: before_init, execution_plan
3. Post setup: after_init
Args:
name (str): name of the trainer (e.g., "PPO")
default_config (Optional[TrainerConfigDict]): The default config dict
of the algorithm, otherwise uses the Trainer default config.
validate_config (Optional[Callable[[TrainerConfigDict], None]]):
Optional callable that takes the config to check for correctness.
It may mutate the config as needed.
default_policy (Optional[Type[Policy]]): The default Policy class to
use.
get_policy_class (Optional[Callable[
TrainerConfigDict, Optional[Type[Policy]]]]): Optional callable
that takes a config and returns the policy class or None. If None
is returned, will use `default_policy` (which must be provided
then).
validate_env (Optional[Callable[[EnvType, EnvContext], None]]):
Optional callable to validate the generated environment (only
on worker=0).
before_init (Optional[Callable[[Trainer], None]]): Optional callable to
run before anything is constructed inside Trainer (Workers with
Policies, execution plan, etc..). Takes the Trainer instance as
argument.
after_init (Optional[Callable[[Trainer], None]]): Optional callable to
run at the end of trainer init (after all Workers and the exec.
plan have been constructed). Takes the Trainer instance as
argument.
before_evaluate_fn (Optional[Callable[[Trainer], None]]): Callback to
run before evaluation. This takes the trainer instance as argument.
mixins (list): list of any class mixins for the returned trainer class.
These mixins will be applied in order and will have higher
precedence than the Trainer class.
execution_plan (Optional[Callable[[WorkerSet, TrainerConfigDict],
Iterable[ResultDict]]]): Optional callable that sets up the
distributed execution workflow.
Returns:
Type[Trainer]: A Trainer sub-class configured by the specified args.
"""
original_kwargs = locals().copy()
base = add_mixins(Trainer, mixins)
class trainer_cls(base):
_name = name
_default_config = default_config or COMMON_CONFIG
_policy_class = default_policy
def __init__(self, config=None, env=None, logger_creator=None):
Trainer.__init__(self, config, env, logger_creator)
def _init(self, config: TrainerConfigDict,
env_creator: Callable[[EnvConfigDict], EnvType]):
# Validate config via custom validation function.
if validate_config:
validate_config(config)
# No `get_policy_class` function.
if get_policy_class is None:
# Default_policy must be provided (unless in multi-agent mode,
# where each policy can have its own default policy class.
if not config["multiagent"]["policies"]:
assert default_policy is not None
self._policy_class = default_policy
# Query the function for a class to use.
else:
self._policy_class = get_policy_class(config)
# If None returned, use default policy (must be provided).
if self._policy_class is None:
assert default_policy is not None
self._policy_class = default_policy
if before_init:
before_init(self)
# Creating all workers (excluding evaluation workers).
self.workers = self._make_workers(
env_creator=env_creator,
validate_env=validate_env,
policy_class=self._policy_class,
config=config,
num_workers=self.config["num_workers"])
self.execution_plan = execution_plan
self.train_exec_impl = execution_plan(self.workers, config)
if after_init:
after_init(self)
@override(Trainer)
def step(self):
res = next(self.train_exec_impl)
return res
@override(Trainer)
def _before_evaluate(self):
if before_evaluate_fn:
before_evaluate_fn(self)
@override(Trainer)
def __getstate__(self):
state = Trainer.__getstate__(self)
state["train_exec_impl"] = (
self.train_exec_impl.shared_metrics.get().save())
return state
@override(Trainer)
def __setstate__(self, state):
Trainer.__setstate__(self, state)
self.train_exec_impl.shared_metrics.get().restore(
state["train_exec_impl"])
@staticmethod
@override(Trainer)
def with_updates(**overrides) -> Type[Trainer]:
"""Build a copy of this trainer class with the specified overrides.
Keyword Args:
overrides (dict): use this to override any of the arguments
originally passed to build_trainer() for this policy.
Returns:
Type[Trainer]: A the Trainer sub-class using `original_kwargs`
and `overrides`.
Examples:
>>> MyClass = SomeOtherClass.with_updates({"name": "Mine"})
>>> issubclass(MyClass, SomeOtherClass)
... False
>>> issubclass(MyClass, Trainer)
... True
"""
return build_trainer(**dict(original_kwargs, **overrides))
trainer_cls.__name__ = name
trainer_cls.__qualname__ = name
return trainer_cls
|
[{'piece_type': 'other', 'piece_content': 'from or_gym.utils import create_env\\nfrom gym import spaces\\nfrom ray.rllib.utils import try_import_tf\\nfrom ray.rllib.models.tf.fcnet import FullyConnectedNetwork\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray import tune\\nfrom ray.rllib import agents\\nimport ray\\nimport or_gym\\nimport numpy as np\\nenv = or_gym.make(\\'Knapsack-v0\\')\\n\\nprint("Max weight capacity:\\\\t{}kg".format(env.max_weight))\\nprint("Number of items:\\\\t{}".format(env.N))\\n\\nenv_config = {\\'N\\': 5,\\n\\'max_weight\\': 15,\\n\\'item_weights\\': np.array([1, 12, 2, 1, 4]),\\n\\'item_values\\': np.array([2, 4, 2, 1, 10]),\\n\\'mask\\': True}\\nenv = or_gym.make(\\'Knapsack-v0\\', env_config=env_config)\\nprint("Max weight capacity:\\\\t{}kg".format(env.max_weight))\\nprint("Number of items:\\\\t{}".format(env.N))\\n\\ntf = try_import_tf()\\n# tf.compat.v1.disable_eager_execution()\\n\\n\\nclass KP0ActionMaskModel(TFModelV2):\\n\\ndef __init__(self, obs_space, action_space, num_outputs,\\nmodel_config, name, true_obs_shape=(11,),\\naction_embed_size=5, *args, **kwargs):\\n\\nsuper(KP0ActionMaskModel, self).__init__(obs_space,\\naction_space, num_outputs, model_config, name,\\n*args, **kwargs)\\n\\nself.action_embed_model = FullyConnectedNetwork(\\nspaces.Box(0, 1, shape=true_obs_shape),\\naction_space, action_embed_size,\\nmodel_config, name + "_action_embedding")\\nself.register_variables(self.action_embed_model.variables())\\n\\ndef forward(self, input_dict, state, seq_lens):\\navail_actions = input_dict["obs"]["avail_actions"]\\naction_mask = input_dict["obs"]["action_mask"]\\naction_embedding, _ = self.action_embed_model({\\n"obs": input_dict["obs"]["state"]})\\nintent_vector = tf.expand_dims(action_embedding, 1)\\naction_logits = tf.math.reduce_sum(avail_actions * intent_vector,\\naxis=1)\\ninf_mask = tf.math.maximum(tf.math.log(action_mask), tf.float32.min)\\nreturn action_logits + inf_mask, state\\n\\ndef value_function(self):\\nreturn self.action_embed_model.value_function()\\n\\n\\nModelCatalog.register_custom_model(\\'kp_mask\\', KP0ActionMaskModel)\\n\\n\\ndef register_env(env_name, env_config={}):\\nenv = create_env(env_name)\\ntune.register_env(env_name, lambda env_name: env(\\nenv_name, env_config=env_config))\\n\\n\\nregister_env(\\'Knapsack-v0\\', env_config=env_config)\\n\\n\\nray.init(ignore_reinit_error=True)\\ntrainer_config = {\\n"model": {\\n"custom_model": "kp_mask"\\n},\\n"env_config": env_config\\n}\\ntrainer = agents.ppo.PPOTrainer(env=\\'Knapsack-v0\\', config=trainer_config)\\n\\nenv = trainer.env_creator(\\'Knapsack-v0\\')\\nstate = env.state\\nstate[\\'action_mask\\'][0] = 0\\n\\n\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\n\\nprint(actions)'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape\\nif not self._obs_space.contains(observation):\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains\\nreturn x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\\nAttributeError: \\'dict\\' object has no attribute \\'shape\\'\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nTraceback (most recent call last):\\nFile "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\nFile "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action\\npreprocessed = self.workers.local_worker().preprocessors[\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform\\nself.check_shape(observation)\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape\\nraise ValueError(\\nValueError: (\\'Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.\\', {\\'action_mask\\': array([0, 1, 1, 1, 1]), \\'avail_actions\\': array([1., 1., 1., 1., 1.]), \\'state\\': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})'}]
|
Traceback (most recent call last):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape
if not self._obs_space.contains(observation):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains
return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
AttributeError: 'dict' object has no attribute 'shape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action
preprocessed = self.workers.local_worker().preprocessors[
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform
self.check_shape(observation)
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape
raise ValueError(
ValueError: ('Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.', {'action_mask': array([0, 1, 1, 1, 1]), 'avail_actions': array([1., 1., 1., 1., 1.]), 'state': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})
|
AttributeError
|
def __init__(self,
*,
env_creator: Optional[Callable[[EnvContext], EnvType]] = None,
validate_env: Optional[Callable[[EnvType], None]] = None,
policy_class: Optional[Type[Policy]] = None,
trainer_config: Optional[TrainerConfigDict] = None,
num_workers: int = 0,
logdir: Optional[str] = None,
_setup: bool = True):
"""Create a new WorkerSet and initialize its workers.
Args:
env_creator (Optional[Callable[[EnvContext], EnvType]]): Function
that returns env given env config.
validate_env (Optional[Callable[[EnvType], None]]): Optional
callable to validate the generated environment (only on
worker=0).
policy (Optional[Type[Policy]]): A rllib.policy.Policy class.
trainer_config (Optional[TrainerConfigDict]): Optional dict that
extends the common config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (Optional[str]): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
if not trainer_config:
from ray.rllib.agents.trainer import COMMON_CONFIG
trainer_config = COMMON_CONFIG
self._env_creator = env_creator
self._policy_class = policy_class
self._remote_config = trainer_config
self._logdir = logdir
if _setup:
self._local_config = merge_dicts(
trainer_config,
{"tf_session_args": trainer_config["local_tf_session_args"]})
# Create a number of remote workers.
self._remote_workers = []
self.add_workers(num_workers)
# If num_workers > 0, get the action_spaces and observation_spaces
# to not be forced to create an Env on the driver.
if self._remote_workers:
remote_spaces = ray.get(self.remote_workers(
)[0].foreach_policy.remote(
lambda p, pid: (pid, p.observation_space, p.action_space)))
spaces = {
e[0]: (getattr(e[1], "original_space", e[1]), e[2])
for e in remote_spaces
}
else:
spaces = None
# Always create a local worker.
self._local_worker = self._make_worker(
cls=RolloutWorker,
env_creator=env_creator,
validate_env=validate_env,
policy_cls=self._policy_class,
worker_index=0,
num_workers=num_workers,
config=self._local_config,
spaces=spaces,
)
|
def __init__(self,
*,
env_creator: Optional[Callable[[EnvContext], EnvType]] = None,
validate_env: Optional[Callable[[EnvType], None]] = None,
policy_class: Optional[Type[Policy]] = None,
trainer_config: Optional[TrainerConfigDict] = None,
num_workers: int = 0,
logdir: Optional[str] = None,
_setup: bool = True):
"""Create a new WorkerSet and initialize its workers.
Args:
env_creator (Optional[Callable[[EnvContext], EnvType]]): Function
that returns env given env config.
validate_env (Optional[Callable[[EnvType], None]]): Optional
callable to validate the generated environment (only on
worker=0).
policy (Optional[Type[Policy]]): A rllib.policy.Policy class.
trainer_config (Optional[TrainerConfigDict]): Optional dict that
extends the common config of the Trainer class.
num_workers (int): Number of remote rollout workers to create.
logdir (Optional[str]): Optional logging directory for workers.
_setup (bool): Whether to setup workers. This is only for testing.
"""
if not trainer_config:
from ray.rllib.agents.trainer import COMMON_CONFIG
trainer_config = COMMON_CONFIG
self._env_creator = env_creator
self._policy_class = policy_class
self._remote_config = trainer_config
self._logdir = logdir
if _setup:
self._local_config = merge_dicts(
trainer_config,
{"tf_session_args": trainer_config["local_tf_session_args"]})
# Create a number of remote workers.
self._remote_workers = []
self.add_workers(num_workers)
# If num_workers > 0, get the action_spaces and observation_spaces
# to not be forced to create an Env on the driver.
if self._remote_workers:
remote_spaces = ray.get(self.remote_workers(
)[0].foreach_policy.remote(
lambda p, pid: (pid, p.observation_space, p.action_space)))
spaces = {e[0]: (e[1], e[2]) for e in remote_spaces}
else:
spaces = None
# Always create a local worker.
self._local_worker = self._make_worker(
cls=RolloutWorker,
env_creator=env_creator,
validate_env=validate_env,
policy_cls=self._policy_class,
worker_index=0,
num_workers=num_workers,
config=self._local_config,
spaces=spaces,
)
|
[{'piece_type': 'other', 'piece_content': 'from or_gym.utils import create_env\\nfrom gym import spaces\\nfrom ray.rllib.utils import try_import_tf\\nfrom ray.rllib.models.tf.fcnet import FullyConnectedNetwork\\nfrom ray.rllib.models.tf.tf_modelv2 import TFModelV2\\nfrom ray.rllib.models import ModelCatalog\\nfrom ray import tune\\nfrom ray.rllib import agents\\nimport ray\\nimport or_gym\\nimport numpy as np\\nenv = or_gym.make(\\'Knapsack-v0\\')\\n\\nprint("Max weight capacity:\\\\t{}kg".format(env.max_weight))\\nprint("Number of items:\\\\t{}".format(env.N))\\n\\nenv_config = {\\'N\\': 5,\\n\\'max_weight\\': 15,\\n\\'item_weights\\': np.array([1, 12, 2, 1, 4]),\\n\\'item_values\\': np.array([2, 4, 2, 1, 10]),\\n\\'mask\\': True}\\nenv = or_gym.make(\\'Knapsack-v0\\', env_config=env_config)\\nprint("Max weight capacity:\\\\t{}kg".format(env.max_weight))\\nprint("Number of items:\\\\t{}".format(env.N))\\n\\ntf = try_import_tf()\\n# tf.compat.v1.disable_eager_execution()\\n\\n\\nclass KP0ActionMaskModel(TFModelV2):\\n\\ndef __init__(self, obs_space, action_space, num_outputs,\\nmodel_config, name, true_obs_shape=(11,),\\naction_embed_size=5, *args, **kwargs):\\n\\nsuper(KP0ActionMaskModel, self).__init__(obs_space,\\naction_space, num_outputs, model_config, name,\\n*args, **kwargs)\\n\\nself.action_embed_model = FullyConnectedNetwork(\\nspaces.Box(0, 1, shape=true_obs_shape),\\naction_space, action_embed_size,\\nmodel_config, name + "_action_embedding")\\nself.register_variables(self.action_embed_model.variables())\\n\\ndef forward(self, input_dict, state, seq_lens):\\navail_actions = input_dict["obs"]["avail_actions"]\\naction_mask = input_dict["obs"]["action_mask"]\\naction_embedding, _ = self.action_embed_model({\\n"obs": input_dict["obs"]["state"]})\\nintent_vector = tf.expand_dims(action_embedding, 1)\\naction_logits = tf.math.reduce_sum(avail_actions * intent_vector,\\naxis=1)\\ninf_mask = tf.math.maximum(tf.math.log(action_mask), tf.float32.min)\\nreturn action_logits + inf_mask, state\\n\\ndef value_function(self):\\nreturn self.action_embed_model.value_function()\\n\\n\\nModelCatalog.register_custom_model(\\'kp_mask\\', KP0ActionMaskModel)\\n\\n\\ndef register_env(env_name, env_config={}):\\nenv = create_env(env_name)\\ntune.register_env(env_name, lambda env_name: env(\\nenv_name, env_config=env_config))\\n\\n\\nregister_env(\\'Knapsack-v0\\', env_config=env_config)\\n\\n\\nray.init(ignore_reinit_error=True)\\ntrainer_config = {\\n"model": {\\n"custom_model": "kp_mask"\\n},\\n"env_config": env_config\\n}\\ntrainer = agents.ppo.PPOTrainer(env=\\'Knapsack-v0\\', config=trainer_config)\\n\\nenv = trainer.env_creator(\\'Knapsack-v0\\')\\nstate = env.state\\nstate[\\'action_mask\\'][0] = 0\\n\\n\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\n\\nprint(actions)'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape\\nif not self._obs_space.contains(observation):\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains\\nreturn x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)\\nAttributeError: \\'dict\\' object has no attribute \\'shape\\'\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nTraceback (most recent call last):\\nFile "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\nFile "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>\\nactions = np.array([trainer.compute_action(state) for i in range(10)])\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action\\npreprocessed = self.workers.local_worker().preprocessors[\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform\\nself.check_shape(observation)\\nFile "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape\\nraise ValueError(\\nValueError: (\\'Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.\\', {\\'action_mask\\': array([0, 1, 1, 1, 1]), \\'avail_actions\\': array([1., 1., 1., 1., 1.]), \\'state\\': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})'}]
|
Traceback (most recent call last):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 60, in check_shape
if not self._obs_space.contains(observation):
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/gym/spaces/box.py", line 128, in contains
return x.shape == self.shape and np.all(x >= self.low) and np.all(x <= self.high)
AttributeError: 'dict' object has no attribute 'shape'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <module>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/notebooks/projects/hanyu/ReferProject/MahjongFastPK/test.py", line 96, in <listcomp>
actions = np.array([trainer.compute_action(state) for i in range(10)])
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/agents/trainer.py", line 819, in compute_action
preprocessed = self.workers.local_worker().preprocessors[
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 166, in transform
self.check_shape(observation)
File "/data2/huangcq/miniconda3/envs/majenv/lib/python3.8/site-packages/ray/rllib/models/preprocessors.py", line 66, in check_shape
raise ValueError(
ValueError: ('Observation for a Box/MultiBinary/MultiDiscrete space should be an np.array, not a Python list.', {'action_mask': array([0, 1, 1, 1, 1]), 'avail_actions': array([1., 1., 1., 1., 1.]), 'state': array([ 1, 12, 2, 1, 4, 2, 4, 2, 1, 10, 0])})
|
AttributeError
|
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
channel = grpc.insecure_channel(
raylet_address,
options=[
("grpc.max_send_message_length", MAX_MESSAGE_LENGTH),
("grpc.max_receive_message_length", MAX_MESSAGE_LENGTH),
],
)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
return reply.memory_summary
|
def memory_summary():
"""Returns a formatted string describing memory usage in the cluster."""
import grpc
from ray.core.generated import node_manager_pb2
from ray.core.generated import node_manager_pb2_grpc
# We can ask any Raylet for the global memory info.
raylet = ray.nodes()[0]
raylet_address = "{}:{}".format(raylet["NodeManagerAddress"],
ray.nodes()[0]["NodeManagerPort"])
channel = grpc.insecure_channel(raylet_address)
stub = node_manager_pb2_grpc.NodeManagerServiceStub(channel)
reply = stub.FormatGlobalMemoryInfo(
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
return reply.memory_summary
|
[{'piece_type': 'error message', 'piece_content': '2020-05-19 02:13:32,283\\tINFO scripts.py:976 -- Connecting to Ray instance at 172.31.6.12:34940.\\n2020-05-19 02:13:32,284\\tWARNING worker.py:809 -- When connecting to an existing cluster, _internal_config must match the cluster\\'s _internal_config.\\n(pid=5906) E0519 02:13:32.383447 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.\\n(pid=5906) Waiting 1000ms for space to free up...\\n(pid=5906) 2020-05-19 02:13:32,594\\tINFO (unknown file):0 -- gc.collect() freed 10 refs in 0.11551751299975876 seconds\\n(pid=5771) E0519 02:13:32.686894 5771 plasma_store_provider.cc:118] Failed to put object 72e67d09154b35b1ffffffff010000c801000000 after 6 attempts. Plasma store status:\\n(pid=5771) num clients with quota: 0\\n(pid=5771) quota map size: 0\\n(pid=5771) pinned quota map size: 0\\n(pid=5771) allocated bytes: 19130609999\\n(pid=5771) allocation limit: 19130641612\\n(pid=5771) pinned bytes: 19130609999\\n(pid=5771) (global lru) capacity: 19130641612\\n(pid=5771) (global lru) used: 0%\\n(pid=5771) (global lru) num objects: 0\\n(pid=5771) (global lru) num evictions: 0\\n(pid=5771) (global lru) bytes evicted: 0\\n(pid=5771) ---\\n(pid=5771) --- Tip: Use the `ray memory` command to list active objects in the cluster.\\n(pid=5771) ---\\n(pid=5771) E0519 02:13:32.880080 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.\\n(pid=5771) Waiting 1000ms for space to free up...\\n(pid=5769) E0519 02:13:32.882894 5769 plasma_store_provider.cc:108] Failed to put object cb31822e7f0e3c70ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.\\n(pid=5769) Waiting 2000ms for space to free up...\\n(pid=5771) 2020-05-19 02:13:33,215\\tINFO (unknown file):0 -- gc.collect() freed 10 refs in 0.23763301200006026 seconds\\n(pid=5906) E0519 02:13:33.383901 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.\\n(pid=5906) Waiting 2000ms for space to free up...\\nTraceback (most recent call last):\\nFile "/home/ubuntu/src/seeweed/ml/bin/ray", line 8, in <module>\\nsys.exit(main())\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1028, in main\\nreturn cli()\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 978, in memory\\nprint(ray.internal.internal_api.memory_summary())\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/internal/internal_api.py", line 28, in memory_summary\\nnode_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 826, in __call__\\nreturn _end_unary_response_blocking(state, call, False, None)\\nFile "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking\\nraise _InactiveRpcError(state)\\ngrpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:\\nstatus = StatusCode.RESOURCE_EXHAUSTED\\ndetails = "Received message larger than max (28892999 vs. 4194304)"\\ndebug_error_string = "{"created":"@1589854413.712252174","description":"Received message larger than max (28892999 vs. 4194304)","file":"src/core/ext/filters/message_size/message_size_filter.cc","file_line":188,"grpc_status":8}"\\n\\n(pid=5771) E0519 02:13:33.880635 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.\\n(pid=5771) Waiting 2000ms for space to free up...'}]
|
2020-05-19 02:13:32,283 INFO scripts.py:976 -- Connecting to Ray instance at 172.31.6.12:34940.
2020-05-19 02:13:32,284 WARNING worker.py:809 -- When connecting to an existing cluster, _internal_config must match the cluster's _internal_config.
(pid=5906) E0519 02:13:32.383447 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5906) Waiting 1000ms for space to free up...
(pid=5906) 2020-05-19 02:13:32,594 INFO (unknown file):0 -- gc.collect() freed 10 refs in 0.11551751299975876 seconds
(pid=5771) E0519 02:13:32.686894 5771 plasma_store_provider.cc:118] Failed to put object 72e67d09154b35b1ffffffff010000c801000000 after 6 attempts. Plasma store status:
(pid=5771) num clients with quota: 0
(pid=5771) quota map size: 0
(pid=5771) pinned quota map size: 0
(pid=5771) allocated bytes: 19130609999
(pid=5771) allocation limit: 19130641612
(pid=5771) pinned bytes: 19130609999
(pid=5771) (global lru) capacity: 19130641612
(pid=5771) (global lru) used: 0%
(pid=5771) (global lru) num objects: 0
(pid=5771) (global lru) num evictions: 0
(pid=5771) (global lru) bytes evicted: 0
(pid=5771) ---
(pid=5771) --- Tip: Use the `ray memory` command to list active objects in the cluster.
(pid=5771) ---
(pid=5771) E0519 02:13:32.880080 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5771) Waiting 1000ms for space to free up...
(pid=5769) E0519 02:13:32.882894 5769 plasma_store_provider.cc:108] Failed to put object cb31822e7f0e3c70ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5769) Waiting 2000ms for space to free up...
(pid=5771) 2020-05-19 02:13:33,215 INFO (unknown file):0 -- gc.collect() freed 10 refs in 0.23763301200006026 seconds
(pid=5906) E0519 02:13:33.383901 5906 plasma_store_provider.cc:108] Failed to put object d47fe8ca624da001ffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5906) Waiting 2000ms for space to free up...
Traceback (most recent call last):
File "/home/ubuntu/src/seeweed/ml/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1028, in main
return cli()
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/scripts/scripts.py", line 978, in memory
print(ray.internal.internal_api.memory_summary())
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/ray/internal/internal_api.py", line 28, in memory_summary
node_manager_pb2.FormatGlobalMemoryInfoRequest(), timeout=30.0)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 826, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/home/ubuntu/src/seeweed/ml/lib/python3.7/site-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.RESOURCE_EXHAUSTED
details = "Received message larger than max (28892999 vs. 4194304)"
debug_error_string = "{"created":"@1589854413.712252174","description":"Received message larger than max (28892999 vs. 4194304)","file":"src/core/ext/filters/message_size/message_size_filter.cc","file_line":188,"grpc_status":8}"
(pid=5771) E0519 02:13:33.880635 5771 plasma_store_provider.cc:108] Failed to put object 1f5c36abed661dbeffffffff010000c801000000 in object store because it is full. Object size is 196886 bytes.
(pid=5771) Waiting 2000ms for space to free up...
|
grpc._channel._InactiveRpcError
|
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
|
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE:
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nray.init()\\n\\nraise Exception("foobar")'}, {'piece_type': 'error message', 'piece_content': 'raise Exception("hello")\\nError in sys.excepthook:\\nTraceback (most recent call last):\\nFile "/Users/eoakes/code/ray/python/ray/worker.py", line 836, in custom_excepthook\\nray.state.state.add_worker(worker_id, worker_type, worker_info)\\nFile "/Users/eoakes/code/ray/python/ray/state.py", line 733, in add_worker\\nreturn self.global_state_accessor.add_worker_info(\\nAttributeError: \\'NoneType\\' object has no attribute \\'add_worker_info\\'\\n\\nOriginal exception was:\\nTraceback (most recent call last):\\nFile "<stdin>", line 1, in <module>\\nException: hello'}]
|
raise Exception("hello")
Error in sys.excepthook:
Traceback (most recent call last):
File "/Users/eoakes/code/ray/python/ray/worker.py", line 836, in custom_excepthook
ray.state.state.add_worker(worker_id, worker_type, worker_info)
File "/Users/eoakes/code/ray/python/ray/state.py", line 733, in add_worker
return self.global_state_accessor.add_worker_info(
AttributeError: 'NoneType' object has no attribute 'add_worker_info'
Original exception was:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
Exception: hello
|
AttributeError
|
def __init__(self,
sync_up_template,
sync_down_template,
delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self._closed = False
self.cmd_process = None
|
def __init__(self,
sync_up_template,
sync_down_template,
delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self.cmd_process = None
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False)
self._closed = False
|
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False)
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=self._get_logfile())
return True
|
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
return True
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(
source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=self._get_logfile())
return True
|
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(
source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
return True
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def on_trial_complete(self, iteration: int, trials: List["Trial"],
trial: "Trial", **info):
trial_syncer = self._get_trial_syncer(trial)
if NODE_IP in trial.last_result:
trainable_ip = trial.last_result[NODE_IP]
else:
trainable_ip = ray.get(trial.runner.get_current_ip.remote())
trial_syncer.set_worker_ip(trainable_ip)
trial_syncer.sync_down_if_needed()
trial_syncer.close()
|
def on_trial_complete(self, iteration: int, trials: List["Trial"],
trial: "Trial", **info):
trial_syncer = self._get_trial_syncer(trial)
if NODE_IP in trial.last_result:
trainable_ip = trial.last_result[NODE_IP]
else:
trainable_ip = ray.get(trial.runner.get_current_ip.remote())
trial_syncer.set_worker_ip(trainable_ip)
trial_syncer.sync_down_if_needed()
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
def mock_storage_client():
"""Mocks storage client that treats a local dir as durable storage."""
client = get_sync_client(LOCAL_SYNC_TEMPLATE, LOCAL_DELETE_TEMPLATE)
path = os.path.join(ray.utils.get_user_temp_dir(),
f"mock-client-{uuid.uuid4().hex[:4]}")
os.makedirs(path, exist_ok=True)
client.set_logdir(path)
return client
|
def mock_storage_client():
"""Mocks storage client that treats a local dir as durable storage."""
return get_sync_client(LOCAL_SYNC_TEMPLATE, LOCAL_DELETE_TEMPLATE)
|
[{'piece_type': 'error message', 'piece_content': '2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down\\nself._local_dir)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down\\nreturn self._execute(self.sync_down_template, source, target)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute\\nfinal_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__\\nrestore_signals, start_new_session)\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child\\nerrpipe_read, errpipe_write = os.pipe()\\nOSError: [Errno 24] Too many open files\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__\\nFile "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__\\nOSError: [Errno 24] Too many open files'}]
|
2020-11-21 02:02:05,077 ERROR syncer.py:190 -- Sync execution failed.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/syncer.py", line 187, in sync_down
self._local_dir)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 197, in sync_down
return self._execute(self.sync_down_template, source, target)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/sync_client.py", line 243, in _execute
final_cmd, shell=True, stderr=subprocess.PIPE, stdout=self.logfile)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 729, in __init__
restore_signals, start_new_session)
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/subprocess.py", line 1254, in _execute_child
errpipe_read, errpipe_write = os.pipe()
OSError: [Errno 24] Too many open files
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/bin/rllib", line 8, in <module>
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/scripts.py", line 34, in cli
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/rllib/train.py", line 215, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 496, in run_experiments
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/tune.py", line 415, in run
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 361, in step
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/callback.py", line 180, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 428, in on_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/ray/tune/logger.py", line 635, in log_trial_start
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 275, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 327, in _get_file_writer
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/writer.py", line 95, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/site-packages/tensorboardX/event_file_writer.py", line 105, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 102, in Queue
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/queues.py", line 42, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/context.py", line 67, in Lock
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 162, in __init__
File "/home/ubuntu/anaconda3/envs/tensorflow2_p36/lib/python3.6/multiprocessing/synchronize.py", line 59, in __init__
OSError: [Errno 24] Too many open files
|
OSError
|
async def _do_long_poll(self):
while True:
try:
updates: Dict[str, UpdatedObject] = await self._poll_once()
self._update(updates)
logger.debug(f"LongPollerClient received udpates: {updates}")
for key, updated_object in updates.items():
# NOTE(simon):
# This blocks the loop from doing another poll. Consider
# use loop.create_task here or poll first then call the
# callbacks.
callback = self.key_listeners[key]
await callback(updated_object.object_snapshot)
except ray.exceptions.RayActorError:
# This can happen during shutdown where the controller is
# intentionally killed, the client should just gracefully
# exit.
logger.debug("LongPollerClient failed to connect to host. "
"Shutting down.")
break
|
async def _do_long_poll(self):
while True:
updates: Dict[str, UpdatedObject] = await self._poll_once()
self._update(updates)
logger.debug(f"LongPollerClient received updates: {updates}")
for key, updated_object in updates.items():
# NOTE(simon): This blocks the loop from doing another poll.
# Consider use loop.create_task here or poll first then call
# the callbacks.
callback = self.key_listeners[key]
await callback(updated_object.object_snapshot)
|
[{'piece_type': 'source code', 'piece_content': 'import ray\\nimport asyncio\\n\\nray.init()\\n@ray.remote\\nclass A:\\ndef hi(self): return "hi"\\n\\na = A.remote()\\nasync def main():\\nawait a.hi.remote()\\nray.kill(a)\\nawait a.hi.remote()\\n\\nasyncio.get_event_loop().run_until_complete(main())'}, {'piece_type': 'error message', 'piece_content': 'Exception in callback async_set_result.<locals>.set_future()\\nhandle: <Handle async_set_result.<locals>.set_future()>\\nTraceback (most recent call last):\\nFile "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run\\nself._callback(*self._args)\\nFile "python/ray/_raylet.pyx", line 1530, in ray._raylet.async_set_result.set_future\\nAttributeError: \\'RayActorError\\' object has no attribute \\'as_instanceof_cause\\''}]
|
Exception in callback async_set_result.<locals>.set_future()
handle: <Handle async_set_result.<locals>.set_future()>
Traceback (most recent call last):
File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run
self._callback(*self._args)
File "python/ray/_raylet.pyx", line 1530, in ray._raylet.async_set_result.set_future
AttributeError: 'RayActorError' object has no attribute 'as_instanceof_cause'
|
AttributeError
|
async def _get_actor(actor):
actor = dict(actor)
worker_id = actor["address"]["workerId"]
core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})
actor_constructor = core_worker_stats.get("actorTitle",
"Unknown actor constructor")
actor["actorConstructor"] = actor_constructor
actor.update(core_worker_stats)
# TODO(fyrestone): remove this, give a link from actor
# info to worker info in front-end.
node_id = actor["address"]["rayletId"]
pid = core_worker_stats.get("pid")
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
actor_process_stats = None
actor_process_gpu_stats = None
if pid:
for process_stats in node_physical_stats.get("workers", []):
if process_stats["pid"] == pid:
actor_process_stats = process_stats
break
for gpu_stats in node_physical_stats.get("gpus", []):
for process in gpu_stats.get("processes", []):
if process["pid"] == pid:
actor_process_gpu_stats = gpu_stats
break
if actor_process_gpu_stats is not None:
break
actor["gpus"] = actor_process_gpu_stats
actor["processStats"] = actor_process_stats
return actor
|
async def _get_actor(actor):
actor = dict(actor)
worker_id = actor["address"]["workerId"]
core_worker_stats = DataSource.core_worker_stats.get(worker_id, {})
actor_constructor = core_worker_stats.get("actorTitle",
"Unknown actor constructor")
actor["actorConstructor"] = actor_constructor
actor.update(core_worker_stats)
# TODO(fyrestone): remove this, give a link from actor
# info to worker info in front-end.
node_id = actor["address"]["rayletId"]
pid = core_worker_stats.get("pid")
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
actor_process_stats = None
actor_process_gpu_stats = None
if pid:
for process_stats in node_physical_stats.get("workers"):
if process_stats["pid"] == pid:
actor_process_stats = process_stats
break
for gpu_stats in node_physical_stats.get("gpus"):
for process in gpu_stats.get("processes", []):
if process["pid"] == pid:
actor_process_gpu_stats = gpu_stats
break
if actor_process_gpu_stats is not None:
break
actor["gpus"] = actor_process_gpu_stats
actor["processStats"] = actor_process_stats
return actor
|
[{'piece_type': 'error message', 'piece_content': 'Error: Traceback (most recent call last): File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 347, in _update_cache response = task.result() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 77, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 182, in get_all_node_details node_details.append(await cls.get_node_info(node_id)) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 159, in get_node_info node_info["actors"] = await cls.get_node_actors(node_id) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 74, in get_node_actors for process_stats in node_physical_stats.get("workers"): TypeError: \\'NoneType\\' object is not iterable'}]
|
Error: Traceback (most recent call last): File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 347, in _update_cache response = task.result() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 77, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details() File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 182, in get_all_node_details node_details.append(await cls.get_node_info(node_id)) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 159, in get_node_info node_info["actors"] = await cls.get_node_actors(node_id) File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 74, in get_node_actors for process_stats in node_physical_stats.get("workers"): TypeError: 'NoneType' object is not iterable
|
TypeError
|
def get_address_info_from_redis_helper(redis_address,
node_ip_address,
redis_password=None):
redis_ip_address, redis_port = redis_address.split(":")
# Get node table from global state accessor.
global_state = ray.state.GlobalState()
global_state._initialize_global_state(redis_address, redis_password)
client_table = global_state.node_table()
if len(client_table) == 0:
raise RuntimeError(
"Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if (client_node_ip_address == node_ip_address
or (client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address())
or client_node_ip_address == redis_ip_address):
relevant_client = client_info
break
if relevant_client is None:
raise RuntimeError(
f"This node has an IP address of {node_ip_address}, and Ray "
"expects this IP address to be either the Redis address or one of"
f" the Raylet addresses. Connected to Redis at {redis_address} and"
" found raylets at "
f"{', '.join(c['NodeManagerAddress'] for c in client_table)} but "
f"none of these match this node's IP {node_ip_address}. Are any of"
" these actually a different IP address for the same node?"
"You might need to provide --node-ip-address to specify the IP "
"address that the head should use when sending to this node.")
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"],
}
|
def get_address_info_from_redis_helper(redis_address,
node_ip_address,
redis_password=None):
redis_ip_address, redis_port = redis_address.split(":")
# Get node table from global state accessor.
global_state = ray.state.GlobalState()
global_state._initialize_global_state(redis_address, redis_password)
client_table = global_state.node_table()
if len(client_table) == 0:
raise RuntimeError(
"Redis has started but no raylets have registered yet.")
relevant_client = None
for client_info in client_table:
client_node_ip_address = client_info["NodeManagerAddress"]
if (client_node_ip_address == node_ip_address
or (client_node_ip_address == "127.0.0.1"
and redis_ip_address == get_node_ip_address())):
relevant_client = client_info
break
if relevant_client is None:
raise RuntimeError(
f"This node has an IP address of {node_ip_address}, and Ray "
"expects this IP address to be either the Redis address or one of"
f" the Raylet addresses. Connected to Redis at {redis_address} and"
" found raylets at "
f"{', '.join(c['NodeManagerAddress'] for c in client_table)} but "
f"none of these match this node's IP {node_ip_address}. Are any of"
" these actually a different IP address for the same node?"
"You might need to provide --node-ip-address to specify the IP "
"address that the head should use when sending to this node.")
return {
"object_store_address": relevant_client["ObjectStoreSocketName"],
"raylet_socket_name": relevant_client["RayletSocketName"],
"node_manager_port": relevant_client["NodeManagerPort"],
}
|
[{'piece_type': 'error message', 'piece_content': '020-11-11 14:48:28,960\\tINFO worker.py:672 -- Connecting to existing Ray cluster at address: ***:***\\n2020-11-11 14:48:28,968\\tWARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run \\'ray start\\' on this node?\\n2020-11-11 14:48:29,977\\tWARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run \\'ray start\\' on this node?\\n2020-11-11 14:48:30,986\\tWARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run \\'ray start\\' on this node?\\n2020-11-11 14:48:31,996\\tWARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run \\'ray start\\' on this node?\\n2020-11-11 14:48:33,005\\tWARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run \\'ray start\\' on this node?\\nTraceback (most recent call last):\\nFile "***", line 39, in <module>\\nray.init(address=\\'***:***\\', _redis_password=\\'***\\')\\nFile "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 779, in init\\nconnect_only=True)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/node.py", line 179, in __init__\\nredis_password=self.redis_password))\\nFile "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 211, in get_address_info_from_redis\\nredis_address, node_ip_address, redis_password=redis_password)\\nFile "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 194, in get_address_info_from_redis_helper\\n"Redis has started but no raylets have registered yet.")\\nRuntimeError: Redis has started but no raylets have registered yet.'}]
|
020-11-11 14:48:28,960 INFO worker.py:672 -- Connecting to existing Ray cluster at address: ***:***
2020-11-11 14:48:28,968 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:29,977 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:30,986 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:31,996 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
2020-11-11 14:48:33,005 WARNING services.py:218 -- Some processes that the driver needs to connect to have not registered with Redis, so retrying. Have you run 'ray start' on this node?
Traceback (most recent call last):
File "***", line 39, in <module>
ray.init(address='***:***', _redis_password='***')
File "/usr/local/lib/python3.6/dist-packages/ray/worker.py", line 779, in init
connect_only=True)
File "/usr/local/lib/python3.6/dist-packages/ray/node.py", line 179, in __init__
redis_password=self.redis_password))
File "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 211, in get_address_info_from_redis
redis_address, node_ip_address, redis_password=redis_password)
File "/usr/local/lib/python3.6/dist-packages/ray/_private/services.py", line 194, in get_address_info_from_redis_helper
"Redis has started but no raylets have registered yet.")
RuntimeError: Redis has started but no raylets have registered yet.
|
RuntimeError
|
def __init__(self):
self.indent_level = 0
self._verbosity = 0
self._verbosity_overriden = False
self._color_mode = "auto"
self._log_style = "record"
self.pretty = False
self.interactive = False
# store whatever colorful has detected for future use if
# the color ouput is toggled (colorful detects # of supported colors,
# so it has some non-trivial logic to determine this)
self._autodetected_cf_colormode = cf.colorful.colormode
self.set_format()
|
def __init__(self):
self.indent_level = 0
self._verbosity = 0
self._color_mode = "auto"
self._log_style = "record"
self.pretty = False
self.interactive = False
# store whatever colorful has detected for future use if
# the color ouput is toggled (colorful detects # of supported colors,
# so it has some non-trivial logic to determine this)
self._autodetected_cf_colormode = cf.colorful.colormode
self.set_format()
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def verbosity(self):
if self._verbosity_overriden:
return self._verbosity
elif not self.pretty:
return 999
return self._verbosity
|
def verbosity(self):
if not self.pretty:
return 999
return self._verbosity
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def _set_verbosity(self, x):
self._verbosity = x
self._verbosity_overriden = True
|
def _set_verbosity(self, x):
self._verbosity = x
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def rsync(config_file: str,
source: Optional[str],
target: Optional[str],
override_cluster_name: Optional[str],
down: bool,
ip_address: Optional[str] = None,
use_internal_ip: bool = False,
no_config_cache: bool = False,
all_nodes: bool = False,
_runner: ModuleType = subprocess) -> None:
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
ip_address (str): Address of node. Raise Exception
if both ip_address and 'all_nodes' are provided.
use_internal_ip (bool): Whether the provided ip_address is
public or private.
all_nodes: whether to sync worker nodes in addition to the head node
"""
if bool(source) != bool(target):
cli_logger.abort(
"Expected either both a source and a target, or neither.")
assert bool(source) == bool(target), (
"Must either provide both or neither source and target.")
if ip_address and all_nodes:
cli_logger.abort("Cannot provide both ip_address and 'all_nodes'.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
is_file_mount = False
if source and target:
for remote_mount in config.get("file_mounts", {}).keys():
if (source if down else target).startswith(remote_mount):
is_file_mount = True
break
provider = _get_node_provider(config["provider"], config["cluster_name"])
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
if cli_logger.verbosity > 0:
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
nodes = []
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
if ip_address:
nodes = [
provider.get_node_id(ip_address, use_internal_ip=use_internal_ip)
]
else:
nodes = [head_node]
if all_nodes:
nodes.extend(_get_worker_nodes(config, override_cluster_name))
for node_id in nodes:
rsync_to_node(node_id, is_head_node=(node_id == head_node))
|
def rsync(config_file: str,
source: Optional[str],
target: Optional[str],
override_cluster_name: Optional[str],
down: bool,
ip_address: Optional[str] = None,
use_internal_ip: bool = False,
no_config_cache: bool = False,
all_nodes: bool = False,
_runner: ModuleType = subprocess) -> None:
"""Rsyncs files.
Arguments:
config_file: path to the cluster yaml
source: source dir
target: target dir
override_cluster_name: set the name of the cluster
down: whether we're syncing remote -> local
ip_address (str): Address of node. Raise Exception
if both ip_address and 'all_nodes' are provided.
use_internal_ip (bool): Whether the provided ip_address is
public or private.
all_nodes: whether to sync worker nodes in addition to the head node
"""
if bool(source) != bool(target):
cli_logger.abort(
"Expected either both a source and a target, or neither.")
assert bool(source) == bool(target), (
"Must either provide both or neither source and target.")
if ip_address and all_nodes:
cli_logger.abort("Cannot provide both ip_address and 'all_nodes'.")
config = yaml.safe_load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
config = _bootstrap_config(config, no_config_cache=no_config_cache)
is_file_mount = False
if source and target:
for remote_mount in config.get("file_mounts", {}).keys():
if (source if down else target).startswith(remote_mount):
is_file_mount = True
break
provider = _get_node_provider(config["provider"], config["cluster_name"])
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
nodes = []
head_node = _get_head_node(
config, config_file, override_cluster_name, create_if_needed=False)
if ip_address:
nodes = [
provider.get_node_id(ip_address, use_internal_ip=use_internal_ip)
]
else:
nodes = [head_node]
if all_nodes:
nodes.extend(_get_worker_nodes(config, override_cluster_name))
for node_id in nodes:
rsync_to_node(node_id, is_head_node=(node_id == head_node))
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
if cli_logger.verbosity > 0:
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
|
def rsync_to_node(node_id, is_head_node):
updater = NodeUpdaterThread(
node_id=node_id,
provider_config=config["provider"],
provider=provider,
auth_config=config["auth"],
cluster_name=config["cluster_name"],
file_mounts=config["file_mounts"],
initialization_commands=[],
setup_commands=[],
ray_start_commands=[],
runtime_hash="",
use_internal_ip=use_internal_ip,
process_runner=_runner,
file_mounts_contents_hash="",
is_head_node=is_head_node,
rsync_options={
"rsync_exclude": config.get("rsync_exclude"),
"rsync_filter": config.get("rsync_filter")
},
docker_config=config.get("docker"))
if down:
rsync = updater.rsync_down
else:
rsync = updater.rsync_up
if source and target:
# print rsync progress for single file rsync
cmd_output_util.set_output_redirected(False)
set_rsync_silent(False)
rsync(source, target, is_file_mount)
else:
updater.sync_file_mounts(rsync)
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def __init__(self,
local_dir: str,
remote_dir: str,
sync_client: Optional[SyncClient] = None):
configure_logging(
log_style="record",
verbosity=env_integer("TUNE_SYNCER_VERBOSITY", 0))
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
|
def __init__(self,
local_dir: str,
remote_dir: str,
sync_client: Optional[SyncClient] = None):
self.local_ip = services.get_node_ip_address()
self.worker_ip = None
sync_client = sync_client or DockerSyncClient()
sync_client.configure(self._cluster_config_file)
super(NodeSyncer, self).__init__(local_dir, remote_dir, sync_client)
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def reset_trial(self,
trial,
new_config,
new_experiment_tag,
logger_creator=None):
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial trainable.
new_experiment_tag (str): New experiment name for trial.
logger_creator (Optional[Callable[[Dict], Logger]]): Function
that instantiates a logger on the actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(new_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
|
def reset_trial(self,
trial,
new_config,
new_experiment_tag,
logger_creator=None):
"""Tries to invoke `Trainable.reset()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial trainable.
new_experiment_tag (str): New experiment name for trial.
logger_creator (Callable[[Dict], Logger]): A function that
instantiates a logger on the actor process.
Returns:
True if `reset_config` is successful else False.
"""
trial.set_experiment_tag(new_experiment_tag)
trial.set_config(new_config)
trainable = trial.runner
with self._change_working_directory(trial):
with warn_if_slow("reset"):
try:
reset_val = ray.get(
trainable.reset.remote(new_config, logger_creator),
timeout=DEFAULT_GET_TIMEOUT)
except GetTimeoutError:
logger.exception("Trial %s: reset timed out.", trial)
return False
return reset_val
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def reset(self, new_config, logger_creator=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._result_logger.flush()
self._result_logger.close()
if logger_creator:
logger.debug("Logger reset.")
self._create_logger(new_config.copy(), logger_creator)
else:
logger.debug("Did not reset logger. Got: "
f"trainable.reset(logger_creator={logger_creator}).")
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
success = self.reset_config(new_config)
if not success:
return False
# Reset attributes. Will be overwritten by `restore` if a checkpoint
# is provided.
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
return True
|
def reset(self, new_config, logger_creator=None):
"""Resets trial for use with new config.
Subclasses should override reset_config() to actually
reset actor behavior for the new config."""
self.config = new_config
self._result_logger.flush()
self._result_logger.close()
self._create_logger(new_config.copy(), logger_creator)
stdout_file = new_config.pop(STDOUT_FILE, None)
stderr_file = new_config.pop(STDERR_FILE, None)
self._close_logfiles()
self._open_logfiles(stdout_file, stderr_file)
success = self.reset_config(new_config)
if not success:
return False
# Reset attributes. Will be overwritten by `restore` if a checkpoint
# is provided.
self._iteration = 0
self._time_total = 0.0
self._timesteps_total = None
self._episodes_total = None
self._time_since_restore = 0.0
self._timesteps_since_restore = 0
self._iterations_since_restore = 0
self._restored = False
return True
|
[{'piece_type': 'error message', 'piece_content': '2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn\\nist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)\\n2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.\\n2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.'}, {'piece_type': 'other', 'piece_content': '# spawns 1 head and 1 worker\\nray up ray_cluster.yaml -y\\n\\n# submits ray_cluster.yaml. after the first epoch of the worker node the error can be seen in the terminal\\nray submit ray_cluster.yaml tune_pl.py --start'}, {'piece_type': 'other', 'piece_content': 'cluster_name: default\\nmin_workers: 1\\nmax_workers: 1\\ninitial_workers: 1\\nautoscaling_mode: default\\n\\ndocker:\\nimage: "rayproject/ray:latest-gpu"\\ncontainer_name: "ray_container"\\n\\npull_before_run: True\\nrun_options: []\\n\\ntarget_utilization_fraction: 0.8\\nidle_timeout_minutes: 10\\n\\nprovider:\\ntype: gcp\\nregion: europe-west4\\navailability_zone: europe-west4-b\\nproject_id: null\\n\\nauth:\\nssh_user: ubuntu\\n\\nhead_node:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\nworker_nodes:\\nmachineType: n1-standard-2\\ndisks:\\n- boot: true\\nautoDelete: true\\ntype: PERSISTENT\\ninitializeParams:\\ndiskSizeGb: 50\\nsourceImage: projects/deeplearning-platform-release/global/images/family/tf-1-13-cpu\\nscheduling:\\n- onHostMaintenance: TERMINATE\\n\\ncluster_synced_files: []\\nfile_mounts_sync_continuously: True\\ninitialization_commands: []\\n\\nrsync_exclude:\\n- "**/.git"\\n- "**/.git/**"\\n\\n\\nrsync_filter:\\n- ".gitignore"\\n\\nsetup_commands:\\n- pip install torch==1.7.0\\n- pip install pytorch-lightning==1.0.6\\n- pip install torchvision==0.8.1\\n\\nhead_setup_commands:\\n- pip install google-api-python-client==1.7.8\\n\\nworker_setup_commands: []\\n\\nhead_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--head\\n--port=6379\\n--object-manager-port=8076\\n--autoscaling-config=~/ray_bootstrap_config.yaml\\n\\nworker_start_ray_commands:\\n- ray stop\\n- >-\\nulimit -n 65536;\\nray start\\n--address=$RAY_HEAD_IP:6379\\n--object-manager-port=8076'}, {'piece_type': 'other', 'piece_content': 'import torch\\nimport pytorch_lightning as pl\\nfrom torch.utils.data import DataLoader, random_split\\nfrom torch.nn import functional as F\\nfrom torchvision.datasets import MNIST\\nfrom torchvision import transforms\\nimport os\\n\\nimport shutil\\nfrom functools import partial\\nfrom tempfile import mkdtemp\\nfrom pytorch_lightning.loggers import TensorBoardLogger\\nfrom pytorch_lightning.utilities.cloud_io import load as pl_load\\nfrom ray import tune\\nimport ray\\nfrom ray.tune.integration.docker import DockerSyncer\\nfrom ray.tune import CLIReporter\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.integration.pytorch_lightning import TuneReportCallback, TuneReportCheckpointCallback\\n\\nclass LightningMNISTClassifier(pl.LightningModule):\\n"""\\nThis has been adapted from\\nhttps://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09\\n"""\\n\\ndef __init__(self, config, data_dir=None):\\nsuper(LightningMNISTClassifier, self).__init__()\\n\\nself.data_dir = data_dir or os.getcwd()\\n\\nself.layer_1_size = config["layer_1_size"]\\nself.layer_2_size = config["layer_2_size"]\\nself.lr = config["lr"]\\nself.batch_size = config["batch_size"]\\n\\n# mnist images are (1, 28, 28) (channels, width, height)\\nself.layer_1 = torch.nn.Linear(28 * 28, self.layer_1_size)\\nself.layer_2 = torch.nn.Linear(self.layer_1_size, self.layer_2_size)\\nself.layer_3 = torch.nn.Linear(self.layer_2_size, 10)\\n\\ndef forward(self, x):\\nbatch_size, channels, width, height = x.size()\\nx = x.view(batch_size, -1)\\n\\nx = self.layer_1(x)\\nx = torch.relu(x)\\n\\nx = self.layer_2(x)\\nx = torch.relu(x)\\n\\nx = self.layer_3(x)\\nx = torch.log_softmax(x, dim=1)\\n\\nreturn x\\n\\ndef cross_entropy_loss(self, logits, labels):\\nreturn F.nll_loss(logits, labels)\\n\\ndef accuracy(self, logits, labels):\\n_, predicted = torch.max(logits.data, 1)\\ncorrect = (predicted == labels).sum().item()\\naccuracy = correct / len(labels)\\nreturn torch.tensor(accuracy)\\n\\ndef training_step(self, train_batch, batch_idx):\\nx, y = train_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\n\\nself.log("ptl/train_loss", loss)\\nself.log("ptl/train_accuracy", accuracy)\\nreturn loss\\n\\ndef validation_step(self, val_batch, batch_idx):\\nx, y = val_batch\\nlogits = self.forward(x)\\nloss = self.cross_entropy_loss(logits, y)\\naccuracy = self.accuracy(logits, y)\\nreturn {"val_loss": loss, "val_accuracy": accuracy}\\n\\ndef validation_epoch_end(self, outputs):\\navg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()\\navg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()\\nself.log("ptl/val_loss", avg_loss)\\nself.log("ptl/val_accuracy", avg_acc)\\n\\n\\n@staticmethod\\ndef download_data(data_dir):\\ntransform = transforms.Compose([\\ntransforms.ToTensor(),\\ntransforms.Normalize((0.1307, ), (0.3081, ))\\n])\\nreturn MNIST(data_dir, train=True, download=True, transform=transform)\\n\\ndef prepare_data(self):\\nmnist_train = self.download_data(self.data_dir)\\n\\nself.mnist_train, self.mnist_val = random_split(\\nmnist_train, [55000, 5000])\\n\\ndef train_dataloader(self):\\nreturn DataLoader(self.mnist_train, batch_size=int(self.batch_size))\\n\\ndef val_dataloader(self):\\nreturn DataLoader(self.mnist_val, batch_size=int(self.batch_size))\\n\\ndef configure_optimizers(self):\\noptimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\\nreturn optimizer\\n\\ndef train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):\\nmodel = LightningMNISTClassifier(config, data_dir)\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=num_gpus,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCallback(\\n{\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\non="validation_end")\\n])\\ntrainer.fit(model)\\n\\n\\ndef train_mnist_tune_checkpoint(config,\\ncheckpoint_dir=None,\\ndata_dir=None,\\nnum_epochs=10):\\n\\ntrainer = pl.Trainer(\\nmax_epochs=num_epochs,\\ngpus=0,\\nlogger=TensorBoardLogger(\\nsave_dir=tune.get_trial_dir(), name="", version="."),\\nprogress_bar_refresh_rate=0,\\ncallbacks=[\\nTuneReportCheckpointCallback(\\nmetrics={\\n"loss": "ptl/val_loss",\\n"mean_accuracy": "ptl/val_accuracy"\\n},\\nfilename="checkpoint",\\non="validation_end")\\n])\\nif checkpoint_dir:\\nckpt = pl_load(\\nos.path.join(checkpoint_dir, "checkpoint"),\\nmap_location=lambda storage, loc: storage)\\nmodel = LightningMNISTClassifier._load_model_state(ckpt, config=config)\\ntrainer.current_epoch = ckpt["epoch"]\\nelse:\\nmodel = LightningMNISTClassifier(config=config, data_dir=data_dir)\\n\\ntrainer.fit(model)\\n\\n\\ndef tune_mnist_pbt(num_samples=64, num_epochs=3):\\n\\ndata_dir = mkdtemp(prefix="mnist_data_")\\n\\nLightningMNISTClassifier.download_data(data_dir)\\n\\nray.init(address=\\'auto\\')\\n\\nconfig = {\\n"layer_1_size": tune.choice([32, 64, 128]),\\n"layer_2_size": tune.choice([64, 128, 256]),\\n"lr": 1e-3,\\n"batch_size": 64,\\n}\\n\\nscheduler = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="loss",\\nmode="min",\\nperturbation_interval=1, # setting this to 1 so that the sync issue happens immediately after the first epoch\\nhyperparam_mutations={\\n"lr": tune.loguniform(1e-4, 1e-1),\\n"batch_size": [32, 64, 128]\\n})\\n\\nreporter = CLIReporter(\\nparameter_columns=["layer_1_size", "layer_2_size", "lr", "batch_size"],\\nmetric_columns=["loss", "mean_accuracy", "training_iteration"])\\n\\nsync_config = tune.SyncConfig(\\nsync_to_driver=DockerSyncer\\n)\\n\\ntune.run(\\npartial(\\ntrain_mnist_tune_checkpoint,\\ndata_dir=data_dir,\\nnum_epochs=num_epochs),\\nresources_per_trial={\\n"cpu": 1,\\n"gpu": 0\\n},\\nconfig=config,\\nsync_config=sync_config,\\nnum_samples=num_samples,\\nscheduler=scheduler,\\nprogress_reporter=reporter,\\nfail_fast=True,\\nqueue_trials=True,\\nreuse_actors=True,\\nname="tune_mnist_pbt")\\n\\nshutil.rmtree(data_dir)\\n\\nif __name__ == \\'__main__\\':\\ntune_mnist_pbt()'}]
|
2020-11-19 19:42:27,243 VINFO updater.py:460 -- `rsync`ed /root/ray_results/tune_mnist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (remote) to /root/ray_results/tune_mn
ist_pbt/DEFAULT_1d092_00006_6_layer_1_size=128,layer_2_size=64_2020-11-19_19-41-58/ (local)
2020-11-19 19:42:27,247 ERROR trial_runner.py:868 -- Trial DEFAULT_1d092_00006: Error handling checkpoint /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 864, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/root/anaconda3/lib/python3.7/site-packages/ray/tune/trial.py", line 498, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial DEFAULT_1d092_00006: Checkpoint path /root/ray_results/2020-11-19_19-42-24w11y5gdl/checkpoint_1/ not found after successful sync down.
2020-11-19 19:42:27,250 WARNING util.py:140 -- The `process_trial_save` operation took 1.05049467086792 seconds to complete, which may be a performance bottleneck.
|
ray.tune.error.TuneError
|
def _bootstrap_config(config: Dict[str, Any],
no_config_cache: bool = False) -> Dict[str, Any]:
config = prepare_config(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(tempfile.gettempdir(),
"ray-config-{}".format(hasher.hexdigest()))
if os.path.exists(cache_key) and not no_config_cache:
config_cache = json.loads(open(cache_key).read())
if config_cache.get("_version", -1) == CONFIG_CACHE_VERSION:
# todo: is it fine to re-resolve? afaik it should be.
# we can have migrations otherwise or something
# but this seems overcomplicated given that resolving is
# relatively cheap
try_reload_log_state(config_cache["config"]["provider"],
config_cache.get("provider_log_info"))
if log_once("_printed_cached_config_warning"):
cli_logger.verbose_warning(
"Loaded cached provider configuration "
"from " + cf.bold("{}"), cache_key)
if cli_logger.verbosity == 0:
cli_logger.warning("Loaded cached provider configuration")
cli_logger.warning(
"If you experience issues with "
"the cloud provider, try re-running "
"the command with {}.", cf.bold("--no-config-cache"))
return config_cache["config"]
else:
cli_logger.warning(
"Found cached cluster config "
"but the version " + cf.bold("{}") + " "
"(expected " + cf.bold("{}") + ") does not match.\\n"
"This is normal if cluster launcher was updated.\\n"
"Config will be re-resolved.",
config_cache.get("_version", "none"), CONFIG_CACHE_VERSION)
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
provider_cls = importer(config["provider"])
cli_logger.print("Checking {} environment settings",
_PROVIDER_PRETTY_NAMES.get(config["provider"]["type"]))
try:
config = provider_cls.fillout_available_node_types_resources(config)
except Exception as exc:
if cli_logger.verbosity > 2:
logger.exception("Failed to autodetect node resources.")
else:
cli_logger.warning(
f"Failed to autodetect node resources: {str(exc)}. "
"You can see full stack trace with higher verbosity.")
# NOTE: if `resources` field is missing, validate_config for non-AWS will
# fail (the schema error will ask the user to manually fill the resources)
# as we currently support autofilling resources for AWS instances only.
validate_config(config)
resolved_config = provider_cls.bootstrap_config(config)
if not no_config_cache:
with open(cache_key, "w") as f:
config_cache = {
"_version": CONFIG_CACHE_VERSION,
"provider_log_info": try_get_log_state(config["provider"]),
"config": resolved_config
}
f.write(json.dumps(config_cache))
return resolved_config
|
def _bootstrap_config(config: Dict[str, Any],
no_config_cache: bool = False) -> Dict[str, Any]:
config = prepare_config(config)
hasher = hashlib.sha1()
hasher.update(json.dumps([config], sort_keys=True).encode("utf-8"))
cache_key = os.path.join(tempfile.gettempdir(),
"ray-config-{}".format(hasher.hexdigest()))
if os.path.exists(cache_key) and not no_config_cache:
config_cache = json.loads(open(cache_key).read())
if config_cache.get("_version", -1) == CONFIG_CACHE_VERSION:
# todo: is it fine to re-resolve? afaik it should be.
# we can have migrations otherwise or something
# but this seems overcomplicated given that resolving is
# relatively cheap
try_reload_log_state(config_cache["config"]["provider"],
config_cache.get("provider_log_info"))
if log_once("_printed_cached_config_warning"):
cli_logger.verbose_warning(
"Loaded cached provider configuration "
"from " + cf.bold("{}"), cache_key)
if cli_logger.verbosity == 0:
cli_logger.warning("Loaded cached provider configuration")
cli_logger.warning(
"If you experience issues with "
"the cloud provider, try re-running "
"the command with {}.", cf.bold("--no-config-cache"))
return config_cache["config"]
else:
cli_logger.warning(
"Found cached cluster config "
"but the version " + cf.bold("{}") + " "
"(expected " + cf.bold("{}") + ") does not match.\\n"
"This is normal if cluster launcher was updated.\\n"
"Config will be re-resolved.",
config_cache.get("_version", "none"), CONFIG_CACHE_VERSION)
importer = _NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
provider_cls = importer(config["provider"])
cli_logger.print("Checking {} environment settings",
_PROVIDER_PRETTY_NAMES.get(config["provider"]["type"]))
config = provider_cls.fillout_available_node_types_resources(config)
# NOTE: if `resources` field is missing, validate_config for non-AWS will
# fail (the schema error will ask the user to manually fill the resources)
# as we currently support autofilling resources for AWS instances only.
validate_config(config)
resolved_config = provider_cls.bootstrap_config(config)
if not no_config_cache:
with open(cache_key, "w") as f:
config_cache = {
"_version": CONFIG_CACHE_VERSION,
"provider_log_info": try_get_log_state(config["provider"]),
"config": resolved_config
}
f.write(json.dumps(config_cache))
return resolved_config
|
[{'piece_type': 'other', 'piece_content': '(base) ➜ tune git:(fix-kubernetes-dep) ✗ pip list | grep boto\\nboto 2.49.0\\nboto3 1.4.8\\nbotocore 1.8.50'}, {'piece_type': 'error message', 'piece_content': '(base) ➜ tune git:(fix-kubernetes-dep) ✗ ray up $CFG -y\\nCluster: basic\\n\\nChecking AWS environment settings\\nTraceback (most recent call last):\\nFile "/Users/rliaw/miniconda3/bin/ray", line 8, in <module>\\nsys.exit(main())\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1471, in main\\nreturn cli()\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 860, in up\\nuse_login_shells=use_login_shells)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 213, in create_or_update_cluster\\nconfig = _bootstrap_config(config, no_config_cache=no_config_cache)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 274, in _bootstrap_config\\nconfig = provider_cls.fillout_available_node_types_resources(config)\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 487, in fillout_available_node_types_resources\\ncluster_config["provider"].get("aws_credentials"))\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 76, in list_ec2_instances\\ninstance_types = ec2.describe_instance_types()\\nFile "/Users/rliaw/miniconda3/lib/python3.7/site-packages/botocore/client.py", line 565, in __getattr__\\nself.__class__.__name__, item)\\nAttributeError: \\'EC2\\' object has no attribute \\'describe_instance_types\\''}]
|
(base) ➜ tune git:(fix-kubernetes-dep) ✗ ray up $CFG -y
Cluster: basic
Checking AWS environment settings
Traceback (most recent call last):
File "/Users/rliaw/miniconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 1471, in main
return cli()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/scripts/scripts.py", line 860, in up
use_login_shells=use_login_shells)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 213, in create_or_update_cluster
config = _bootstrap_config(config, no_config_cache=no_config_cache)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/commands.py", line 274, in _bootstrap_config
config = provider_cls.fillout_available_node_types_resources(config)
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 487, in fillout_available_node_types_resources
cluster_config["provider"].get("aws_credentials"))
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/ray/autoscaler/_private/aws/node_provider.py", line 76, in list_ec2_instances
instance_types = ec2.describe_instance_types()
File "/Users/rliaw/miniconda3/lib/python3.7/site-packages/botocore/client.py", line 565, in __getattr__
self.__class__.__name__, item)
AttributeError: 'EC2' object has no attribute 'describe_instance_types'
|
AttributeError
|
async def get_node_workers(cls, node_id):
workers = []
node_ip = DataSource.node_id_to_ip[node_id]
node_logs = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_errs = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
node_stats = DataSource.node_stats.get(node_id, {})
# Merge coreWorkerStats (node stats) to workers (node physical stats)
pid_to_worker_stats = {}
pid_to_language = {}
pid_to_job_id = {}
for core_worker_stats in node_stats.get("coreWorkersStats", []):
pid = core_worker_stats["pid"]
pid_to_worker_stats.setdefault(pid, []).append(core_worker_stats)
pid_to_language[pid] = core_worker_stats["language"]
pid_to_job_id[pid] = core_worker_stats["jobId"]
for worker in node_physical_stats.get("workers", []):
worker = dict(worker)
pid = worker["pid"]
worker["logCount"] = len(node_logs.get(str(pid), []))
worker["errorCount"] = len(node_errs.get(str(pid), []))
worker["coreWorkerStats"] = pid_to_worker_stats.get(pid, [])
worker["language"] = pid_to_language.get(
pid, dashboard_consts.DEFAULT_LANGUAGE)
worker["jobId"] = pid_to_job_id.get(
pid, dashboard_consts.DEFAULT_JOB_ID)
await GlobalSignals.worker_info_fetched.send(node_id, worker)
workers.append(worker)
return workers
|
async def get_node_workers(cls, node_id):
workers = []
node_ip = DataSource.node_id_to_ip[node_id]
node_logs = DataSource.ip_and_pid_to_logs.get(node_ip, {})
logger.error(node_logs)
node_errs = DataSource.ip_and_pid_to_errors.get(node_ip, {})
logger.error(node_errs)
node_physical_stats = DataSource.node_physical_stats.get(node_id, {})
node_stats = DataSource.node_stats.get(node_id, {})
# Merge coreWorkerStats (node stats) to workers (node physical stats)
pid_to_worker_stats = {}
pid_to_language = {}
pid_to_job_id = {}
for core_worker_stats in node_stats.get("coreWorkersStats", []):
pid = core_worker_stats["pid"]
pid_to_worker_stats.setdefault(pid, []).append(core_worker_stats)
pid_to_language[pid] = core_worker_stats["language"]
pid_to_job_id[pid] = core_worker_stats["jobId"]
for worker in node_physical_stats.get("workers", []):
worker = dict(worker)
pid = worker["pid"]
logger.error(f"pid={pid}")
worker["logCount"] = len(node_logs.get(str(pid), []))
worker["errorCount"] = len(node_errs.get(str(pid), []))
worker["coreWorkerStats"] = pid_to_worker_stats.get(pid, [])
worker["language"] = pid_to_language.get(
pid, dashboard_consts.DEFAULT_LANGUAGE)
worker["jobId"] = pid_to_job_id.get(
pid, dashboard_consts.DEFAULT_JOB_ID)
await GlobalSignals.worker_info_fetched.send(node_id, worker)
workers.append(worker)
return workers
|
[{'piece_type': 'error message', 'piece_content': 'Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: \\'viewData\\''}]
|
Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: 'viewData'
|
KeyError
|
async def get_node_info(cls, node_id):
node_physical_stats = dict(
DataSource.node_physical_stats.get(node_id, {}))
node_stats = dict(DataSource.node_stats.get(node_id, {}))
node = DataSource.nodes.get(node_id, {})
node_ip = DataSource.node_id_to_ip.get(node_id)
# Merge node log count information into the payload
log_info = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_log_count = 0
for entries in log_info.values():
node_log_count += len(entries)
error_info = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_err_count = 0
for entries in error_info.values():
node_err_count += len(entries)
node_stats.pop("coreWorkersStats", None)
view_data = node_stats.get("viewData", [])
ray_stats = cls._extract_view_data(
view_data,
{"object_store_used_memory", "object_store_available_memory"})
node_info = node_physical_stats
# Merge node stats to node physical stats under raylet
node_info["raylet"] = node_stats
node_info["raylet"].update(ray_stats)
# Merge GcsNodeInfo to node physical stats
node_info["raylet"].update(node)
# Merge actors to node physical stats
node_info["actors"] = await cls.get_node_actors(node_id)
# Update workers to node physical stats
node_info["workers"] = DataSource.node_workers.get(node_id, [])
node_info["logCount"] = node_log_count
node_info["errorCount"] = node_err_count
await GlobalSignals.node_info_fetched.send(node_info)
return node_info
|
async def get_node_info(cls, node_id):
node_physical_stats = dict(
DataSource.node_physical_stats.get(node_id, {}))
node_stats = dict(DataSource.node_stats.get(node_id, {}))
node = DataSource.nodes.get(node_id, {})
node_ip = DataSource.node_id_to_ip.get(node_id)
# Merge node log count information into the payload
log_info = DataSource.ip_and_pid_to_logs.get(node_ip, {})
node_log_count = 0
for entries in log_info.values():
node_log_count += len(entries)
error_info = DataSource.ip_and_pid_to_errors.get(node_ip, {})
node_err_count = 0
for entries in error_info.values():
node_err_count += len(entries)
node_stats.pop("coreWorkersStats", None)
ray_stats = cls._extract_view_data(
node_stats["viewData"],
{"object_store_used_memory", "object_store_available_memory"})
node_info = node_physical_stats
# Merge node stats to node physical stats under raylet
node_info["raylet"] = node_stats
node_info["raylet"].update(ray_stats)
# Merge GcsNodeInfo to node physical stats
node_info["raylet"].update(node)
# Merge actors to node physical stats
node_info["actors"] = await cls.get_node_actors(node_id)
# Update workers to node physical stats
node_info["workers"] = DataSource.node_workers.get(node_id, [])
node_info["logCount"] = node_log_count
node_info["errorCount"] = node_err_count
await GlobalSignals.node_info_fetched.send(node_info)
return node_info
|
[{'piece_type': 'error message', 'piece_content': 'Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: \\'viewData\\''}]
|
Error: Traceback (most recent call last): File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/utils.py", line 351, in _update_cache response = task.result()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/modules/stats_collector/stats_collector_head.py", line 89, in get_all_nodes all_node_details = await DataOrganizer.get_all_node_details()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in get_all_node_details for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 201, in <listcomp> for node_id in DataSource.nodes.keys()
File "/root/anaconda3/lib/python3.7/site-packages/ray/new_dashboard/datacenter.py", line 149, in get_node_info node_stats["viewData"], KeyError: 'viewData'
|
KeyError
|
def unflatten_dict(dt, delimiter="/"):
"""Unflatten dict. Does not support unflattening lists."""
dict_type = type(dt)
out = dict_type()
for key, val in dt.items():
path = key.split(delimiter)
item = out
for k in path[:-1]:
item = item.setdefault(k, dict_type())
item[path[-1]] = val
return out
|
def unflatten_dict(dt, delimiter="/"):
"""Unflatten dict. Does not support unflattening lists."""
out = defaultdict(dict)
for key, val in dt.items():
path = key.split(delimiter)
item = out
for k in path[:-1]:
item = item[k]
item[path[-1]] = val
return dict(out)
|
[{'piece_type': 'error message', 'piece_content': '$ python ./python/ray/tune/examples/bohb_example.py\\nFile descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with \\'ulimit -n 8192\\'\\n2020-11-11 20:38:44,944 INFO services.py:1110 -- View the Ray dashboard at http://127.0.0.1:8265\\nTraceback (most recent call last):\\nFile "./python/ray/tune/examples/bohb_example.py", line 87, in <module>\\nstop={"training_iteration": 100})\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/tune.py", line 416, in run\\nrunner.step()\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 380, in step\\nnext_trial = self._get_next_trial() # blocking\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 479, in _get_next_trial\\nself._update_trial_queue(blocking=wait_for_trial)\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 855, in _update_trial_queue\\ntrial = self._search_alg.next_trial()\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 114, in next_trial\\nself._experiment.dir_name)\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 121, in create_trial_if_possible\\nsuggested_config = self.searcher.suggest(trial_id)\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/bohb.py", line 175, in suggest\\nreturn unflatten_dict(config)\\nFile "/Users/hartikainen/github/ray-project/ray/python/ray/tune/utils/util.py", line 276, in unflatten_dict\\nitem = item[k]\\nKeyError: \\'b\\''}]
|
$ python ./python/ray/tune/examples/bohb_example.py
File descriptor limit 256 is too low for production servers and may result in connection errors. At least 8192 is recommended. --- Fix with 'ulimit -n 8192'
2020-11-11 20:38:44,944 INFO services.py:1110 -- View the Ray dashboard at http://127.0.0.1:8265
Traceback (most recent call last):
File "./python/ray/tune/examples/bohb_example.py", line 87, in <module>
stop={"training_iteration": 100})
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/tune.py", line 416, in run
runner.step()
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 380, in step
next_trial = self._get_next_trial() # blocking
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 479, in _get_next_trial
self._update_trial_queue(blocking=wait_for_trial)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/trial_runner.py", line 855, in _update_trial_queue
trial = self._search_alg.next_trial()
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 114, in next_trial
self._experiment.dir_name)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/search_generator.py", line 121, in create_trial_if_possible
suggested_config = self.searcher.suggest(trial_id)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/suggest/bohb.py", line 175, in suggest
return unflatten_dict(config)
File "/Users/hartikainen/github/ray-project/ray/python/ray/tune/utils/util.py", line 276, in unflatten_dict
item = item[k]
KeyError: 'b'
|
KeyError
|
def find_redis_address(address=None):
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
cmdline = proc.cmdline()
# NOTE(kfstorm): To support Windows, we can't use
# `os.path.basename(cmdline[0]) == "raylet"` here.
if len(cmdline) > 0 and "raylet" in os.path.basename(cmdline[0]):
for arglist in cmdline:
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
proc_addr = arg.split("=")[1]
if address is not None and address != proc_addr:
continue
redis_addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return redis_addresses
|
def find_redis_address(address=None):
pids = psutil.pids()
redis_addresses = set()
for pid in pids:
try:
proc = psutil.Process(pid)
# HACK: Workaround for UNIX idiosyncrasy
# Normally, cmdline() is supposed to return the argument list.
# But it in some cases (such as when setproctitle is called),
# an arbitrary string resembling a command-line is stored in
# the first argument.
# Explanation: https://unix.stackexchange.com/a/432681
# More info: https://github.com/giampaolo/psutil/issues/1179
for arglist in proc.cmdline():
# Given we're merely seeking --redis-address, we just split
# every argument on spaces for now.
for arg in arglist.split(" "):
# TODO(ekl): Find a robust solution for locating Redis.
if arg.startswith("--redis-address="):
proc_addr = arg.split("=")[1]
if address is not None and address != proc_addr:
continue
redis_addresses.add(proc_addr)
except psutil.AccessDenied:
pass
except psutil.NoSuchProcess:
pass
return redis_addresses
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/swang/anaconda3/envs/ray-36/bin/ray", line 8, in <module>\\nsys.exit(main())\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1462, in main\\nreturn cli()\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 479, in start\\nf"Ray is already running at {default_address}. "\\nConnectionError: Ray is already running at 192.168.1.46:6379. Please specify a different port using the `--port` command to `ray start`.'}]
|
Traceback (most recent call last):
File "/home/swang/anaconda3/envs/ray-36/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1462, in main
return cli()
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/swang/anaconda3/envs/ray-36/lib/python3.6/site-packages/ray/scripts/scripts.py", line 479, in start
f"Ray is already running at {default_address}. "
ConnectionError: Ray is already running at 192.168.1.46:6379. Please specify a different port using the `--port` command to `ray start`.
|
ConnectionError
|
def _run_helper(self,
final_cmd,
with_output=False,
exit_on_fail=False,
silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells())
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
joined_cmd = " ".join(final_cmd)
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=joined_cmd)
if exit_on_fail:
raise click.ClickException(
"Command failed:\\n\\n {}\\n".format(joined_cmd)) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
|
def _run_helper(self,
final_cmd,
with_output=False,
exit_on_fail=False,
silent=False):
"""Run a command that was already setup with SSH and `bash` settings.
Args:
cmd (List[str]):
Full command to run. Should include SSH options and other
processing that we do.
with_output (bool):
If `with_output` is `True`, command stdout and stderr
will be captured and returned.
exit_on_fail (bool):
If `exit_on_fail` is `True`, the process will exit
if the command fails (exits with a code other than 0).
Raises:
ProcessRunnerError if using new log style and disabled
login shells.
click.ClickException if using login shells.
"""
try:
# For now, if the output is needed we just skip the new logic.
# In the future we could update the new logic to support
# capturing output, but it is probably not needed.
if not cli_logger.old_style and not with_output:
return run_cmd_redirected(
final_cmd,
process_runner=self.process_runner,
silent=silent,
use_login_shells=is_using_login_shells())
if with_output:
return self.process_runner.check_output(final_cmd)
else:
return self.process_runner.check_call(final_cmd)
except subprocess.CalledProcessError as e:
quoted_cmd = " ".join(final_cmd[:-1] + [quote(final_cmd[-1])])
if not cli_logger.old_style and not is_using_login_shells():
raise ProcessRunnerError(
"Command failed",
"ssh_command_failed",
code=e.returncode,
command=quoted_cmd)
if exit_on_fail:
raise click.ClickException(
"Command failed:\\n\\n {}\\n".format(quoted_cmd)) from None
else:
fail_msg = "SSH command failed."
if is_output_redirected():
fail_msg += " See above for the output from the failure."
raise click.ClickException(fail_msg) from None
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 248, in run\\nself.process_runner.check_call(final_cmd, shell=True)\\nFile "/Users/mkoh/.pyenv/versions/3.7.7/lib/python3.7/subprocess.py", line 363, in check_call\\nraise CalledProcessError(retcode, cmd)\\nsubprocess.CalledProcessError: Command \\'kubectl -n nlp exec -it ray-head-22r7w -- bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (tmux kill-session -t flambe)\\'\\' returned non-zero exit status 1.\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nTraceback (most recent call last):\\nFile "/Users/mkoh/projects/flambe-internal/flambe/runner/run.py", line 86, in main\\nsave_to_db=args.save,\\nFile "/Users/mkoh/projects/flambe-internal/flambe/workflow/workflow.py", line 124, in run_remote_experiment\\nsave_to_db=save_to_db,\\nFile "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 263, in run\\nself.kill("flambe")\\nFile "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 163, in kill\\nself.exec_cluster(cmd=cmd)\\nFile "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 60, in exec_cluster\\nwith_output=with_output,\\nFile "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 868, in exec_cluster\\nshutdown_after_run=shutdown_after_run)\\nFile "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 919, in _exec\\nshutdown_after_run=shutdown_after_run)\\nFile "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 252, in run\\n[quote(final_cmd[-1])])\\nTypeError: can only concatenate str (not "list") to str'}]
|
Traceback (most recent call last):
File "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 248, in run
self.process_runner.check_call(final_cmd, shell=True)
File "/Users/mkoh/.pyenv/versions/3.7.7/lib/python3.7/subprocess.py", line 363, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command 'kubectl -n nlp exec -it ray-head-22r7w -- bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (tmux kill-session -t flambe)'' returned non-zero exit status 1.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/mkoh/projects/flambe-internal/flambe/runner/run.py", line 86, in main
save_to_db=args.save,
File "/Users/mkoh/projects/flambe-internal/flambe/workflow/workflow.py", line 124, in run_remote_experiment
save_to_db=save_to_db,
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 263, in run
self.kill("flambe")
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 163, in kill
self.exec_cluster(cmd=cmd)
File "/Users/mkoh/projects/flambe-internal/flambe/cluster/ray/ray_util/ray_cluster.py", line 60, in exec_cluster
with_output=with_output,
File "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 868, in exec_cluster
shutdown_after_run=shutdown_after_run)
File "XXX/lib/python3.7/site-packages/ray/autoscaler/commands.py", line 919, in _exec
shutdown_after_run=shutdown_after_run)
File "XXX/lib/python3.7/site-packages/ray/autoscaler/command_runner.py", line 252, in run
[quote(final_cmd[-1])])
TypeError: can only concatenate str (not "list") to str
|
subprocess.CalledProcessError
|
def __init__(self,
space: Optional[Union[Dict, List[Dict]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
parameter_constraints: Optional[List] = None,
outcome_constraints: Optional[List] = None,
ax_client: Optional[AxClient] = None,
use_early_stopped_trials: Optional[bool] = None,
max_concurrent: Optional[int] = None):
assert ax is not None, "Ax must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(AxSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._ax = ax_client
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._parameter_constraints = parameter_constraints
self._outcome_constraints = outcome_constraints
self.max_concurrent = max_concurrent
self._objective_name = metric
self._parameters = []
self._live_trial_mapping = {}
if self._ax or self._space:
self.setup_experiment()
|
def __init__(self,
space: Optional[List[Dict]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
parameter_constraints: Optional[List] = None,
outcome_constraints: Optional[List] = None,
ax_client: Optional[AxClient] = None,
use_early_stopped_trials: Optional[bool] = None,
max_concurrent: Optional[int] = None):
assert ax is not None, "Ax must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(AxSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._ax = ax_client
self._space = space
self._parameter_constraints = parameter_constraints
self._outcome_constraints = outcome_constraints
self.max_concurrent = max_concurrent
self._objective_name = metric
self._parameters = []
self._live_trial_mapping = {}
if self._ax or self._space:
self.setup_experiment()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
|
def __init__(self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self.setup_optimizer()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space.")
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler))
return (domain.lower, domain.upper)
raise ValueError("BayesOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(bounds)
bounds = spec
return bounds
|
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space.")
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler))
return (domain.lower, domain.upper)
raise ValueError("BayesOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return bounds
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
space: Optional[Union[Dict,
ConfigSpace.ConfigurationSpace]] = None,
bohb_config: Optional[Dict] = None,
max_concurrent: int = 10,
metric: Optional[str] = None,
mode: Optional[str] = None):
from hpbandster.optimizers.config_generators.bohb import BOHB
assert BOHB is not None, "HpBandSter must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._max_concurrent = max_concurrent
self.trial_to_params = {}
self.running = set()
self.paused = set()
self._metric = metric
self._bohb_config = bohb_config
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
super(TuneBOHB, self).__init__(metric=self._metric, mode=mode)
if self._space:
self.setup_bohb()
|
def __init__(self,
space: Optional[ConfigSpace.ConfigurationSpace] = None,
bohb_config: Optional[Dict] = None,
max_concurrent: int = 10,
metric: Optional[str] = None,
mode: Optional[str] = None):
from hpbandster.optimizers.config_generators.bohb import BOHB
assert BOHB is not None, "HpBandSter must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self._max_concurrent = max_concurrent
self.trial_to_params = {}
self.running = set()
self.paused = set()
self._metric = metric
self._bohb_config = bohb_config
self._space = space
super(TuneBOHB, self).__init__(metric=self._metric, mode=mode)
if self._space:
self.setup_bohb()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
optimizer: Optional[BlackboxOptimiser] = None,
domain: Optional[str] = None,
space: Optional[Union[Dict, List[Dict]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
**kwargs):
assert dragonfly is not None, """dragonfly must be installed!
You can install Dragonfly with the command:
`pip install dragonfly-opt`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(DragonflySearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self._opt_arg = optimizer
self._domain = domain
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._initial_points = []
self._live_trial_mapping = {}
self._opt = None
if isinstance(optimizer, BlackboxOptimiser):
if domain or space:
raise ValueError(
"If you pass an optimizer instance to dragonfly, do not "
"pass a `domain` or `space`.")
self._opt = optimizer
self.init_dragonfly()
elif self._space:
self.setup_dragonfly()
|
def __init__(self,
optimizer: Optional[BlackboxOptimiser] = None,
domain: Optional[str] = None,
space: Optional[List[Dict]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
**kwargs):
assert dragonfly is not None, """dragonfly must be installed!
You can install Dragonfly with the command:
`pip install dragonfly-opt`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(DragonflySearch, self).__init__(
metric=metric, mode=mode, **kwargs)
self._opt_arg = optimizer
self._domain = domain
self._space = space
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._initial_points = []
self._live_trial_mapping = {}
self._opt = None
if isinstance(optimizer, BlackboxOptimiser):
if domain or space:
raise ValueError(
"If you pass an optimizer instance to dragonfly, do not "
"pass a `domain` or `space`.")
self._opt = optimizer
self.init_dragonfly()
elif self._space:
self.setup_dragonfly()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
n_initial_points: int = 20,
random_state_seed: Optional[int] = None,
gamma: float = 0.25,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert hpo is not None, (
"HyperOpt must be installed! Run `pip install hyperopt`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
from hyperopt.fmin import generate_trials_to_calculate
super(HyperOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self.max_concurrent = max_concurrent
# hyperopt internally minimizes, so "max" => -1
if mode == "max":
self.metric_op = -1.
elif mode == "min":
self.metric_op = 1.
if n_initial_points is None:
self.algo = hpo.tpe.suggest
else:
self.algo = partial(
hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if gamma is not None:
self.algo = partial(self.algo, gamma=gamma)
if points_to_evaluate is None:
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert isinstance(points_to_evaluate, (list, tuple))
self._hpopt_trials = generate_trials_to_calculate(
points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if random_state_seed is None:
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
self.domain = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
self.domain = hpo.Domain(lambda spc: spc, space)
|
def __init__(
self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[Dict]] = None,
n_initial_points: int = 20,
random_state_seed: Optional[int] = None,
gamma: float = 0.25,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None,
):
assert hpo is not None, (
"HyperOpt must be installed! Run `pip install hyperopt`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
from hyperopt.fmin import generate_trials_to_calculate
super(HyperOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self.max_concurrent = max_concurrent
# hyperopt internally minimizes, so "max" => -1
if mode == "max":
self.metric_op = -1.
elif mode == "min":
self.metric_op = 1.
if n_initial_points is None:
self.algo = hpo.tpe.suggest
else:
self.algo = partial(
hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if gamma is not None:
self.algo = partial(self.algo, gamma=gamma)
if points_to_evaluate is None:
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert isinstance(points_to_evaluate, (list, tuple))
self._hpopt_trials = generate_trials_to_calculate(
points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if random_state_seed is None:
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
self.domain = None
if space:
self.domain = hpo.Domain(lambda spc: spc, space)
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
optimizer: Union[None, Optimizer, ConfiguredOptimizer] = None,
space: Optional[Union[Dict, Parameter]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
**kwargs):
assert ng is not None, "Nevergrad must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(NevergradSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=max_concurrent, **kwargs)
self._space = None
self._opt_factory = None
self._nevergrad_opt = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
if isinstance(optimizer, Optimizer):
if space is not None or isinstance(space, list):
raise ValueError(
"If you pass a configured optimizer to Nevergrad, either "
"pass a list of parameter names or None as the `space` "
"parameter.")
self._parameters = space
self._nevergrad_opt = optimizer
elif isinstance(optimizer, ConfiguredOptimizer):
self._opt_factory = optimizer
self._parameters = None
self._space = space
else:
raise ValueError(
"The `optimizer` argument passed to NevergradSearch must be "
"either an `Optimizer` or a `ConfiguredOptimizer`.")
self._live_trial_mapping = {}
self.max_concurrent = max_concurrent
if self._nevergrad_opt or self._space:
self.setup_nevergrad()
|
def __init__(self,
optimizer: Union[None, Optimizer, ConfiguredOptimizer] = None,
space: Optional[Parameter] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
max_concurrent: Optional[int] = None,
**kwargs):
assert ng is not None, "Nevergrad must be installed!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
super(NevergradSearch, self).__init__(
metric=metric, mode=mode, max_concurrent=max_concurrent, **kwargs)
self._space = None
self._opt_factory = None
self._nevergrad_opt = None
if isinstance(optimizer, Optimizer):
if space is not None or isinstance(space, list):
raise ValueError(
"If you pass a configured optimizer to Nevergrad, either "
"pass a list of parameter names or None as the `space` "
"parameter.")
self._parameters = space
self._nevergrad_opt = optimizer
elif isinstance(optimizer, ConfiguredOptimizer):
self._opt_factory = optimizer
self._parameters = None
self._space = space
else:
raise ValueError(
"The `optimizer` argument passed to NevergradSearch must be "
"either an `Optimizer` or a `ConfiguredOptimizer`.")
self._live_trial_mapping = {}
self.max_concurrent = max_concurrent
if self._nevergrad_opt or self._space:
self.setup_nevergrad()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
space: Optional[Union[Dict, List[Tuple]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sampler: Optional[BaseSampler] = None):
assert ot is not None, (
"Optuna must be installed! Run `pip install optuna`.")
super(OptunaSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=None,
use_early_stopped_trials=None)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space)
self._space = space
self._study_name = "optuna" # Fixed study name for in-memory storage
self._sampler = sampler or ot.samplers.TPESampler()
assert isinstance(self._sampler, BaseSampler), \\
"You can only pass an instance of `optuna.samplers.BaseSampler` " \\
"as a sampler to `OptunaSearcher`."
self._pruner = ot.pruners.NopPruner()
self._storage = ot.storages.InMemoryStorage()
self._ot_trials = {}
self._ot_study = None
if self._space:
self.setup_study(mode)
|
def __init__(self,
space: Optional[List[Tuple]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
sampler: Optional[BaseSampler] = None):
assert ot is not None, (
"Optuna must be installed! Run `pip install optuna`.")
super(OptunaSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=None,
use_early_stopped_trials=None)
self._space = space
self._study_name = "optuna" # Fixed study name for in-memory storage
self._sampler = sampler or ot.samplers.TPESampler()
assert isinstance(self._sampler, BaseSampler), \\
"You can only pass an instance of `optuna.samplers.BaseSampler` " \\
"as a sampler to `OptunaSearcher`."
self._pruner = ot.pruners.NopPruner()
self._storage = ot.storages.InMemoryStorage()
self._ot_trials = {}
self._ot_study = None
if self._space:
self.setup_study(mode)
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
optimizer: Optional[sko.optimizer.Optimizer] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names.")
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = space.values()
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self.setup_skopt()
self._live_trial_mapping = {}
|
def __init__(self,
optimizer: Optional[sko.optimizer.Optimizer] = None,
space: Union[List[str], Dict[str, Union[Tuple, List]]] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
points_to_evaluate: Optional[List[List]] = None,
evaluated_rewards: Optional[List] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
assert sko is not None, """skopt must be installed!
You can install Skopt with the command:
`pip install scikit-optimize`."""
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
super(SkOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
self._initial_points = []
self._parameters = None
self._parameter_names = None
self._parameter_ranges = None
self._space = space
if self._space:
if isinstance(optimizer, sko.Optimizer):
if not isinstance(space, list):
raise ValueError(
"You passed an optimizer instance to SkOpt. Your "
"`space` parameter should be a list of parameter"
"names.")
self._parameter_names = space
else:
self._parameter_names = list(space.keys())
self._parameter_ranges = space.values()
self._points_to_evaluate = points_to_evaluate
self._evaluated_rewards = evaluated_rewards
self._skopt_opt = optimizer
if self._skopt_opt or self._space:
self.setup_skopt()
self._live_trial_mapping = {}
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space.")
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("SkOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Integer):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Categorical):
return domain.categories
raise ValueError("SkOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
space = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(space)
space = spec
return space
|
def convert_search_space(spec: Dict) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a SkOpt search space.")
def resolve_value(domain: Domain) -> Union[Tuple, List]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning("SkOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Integer):
if domain.sampler is not None:
logger.warning(
"SkOpt does not support specific sampling methods."
" The {} sampler will be dropped.".format(sampler))
return domain.lower, domain.upper
if isinstance(domain, Categorical):
return domain.categories
raise ValueError("SkOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
space = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return space
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def __init__(self,
algo: str = "asracos",
budget: Optional[int] = None,
dim_dict: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
**kwargs):
assert zoopt is not None, "ZOOpt not found - please install zoopt " \\
"by `pip install -U zoopt`."
assert budget is not None, "`budget` should not be None!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
_algo = algo.lower()
assert _algo in ["asracos", "sracos"
], "`algo` must be in ['asracos', 'sracos'] currently"
self._algo = _algo
if isinstance(dim_dict, dict) and dim_dict:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(dim_dict)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="dim_dict", cls=type(self)))
dim_dict = self.convert_search_space(dim_dict, join=True)
self._dim_dict = dim_dict
self._budget = budget
self._metric = metric
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
self._live_trial_mapping = {}
self._dim_keys = []
self.solution_dict = {}
self.best_solution_list = []
self.optimizer = None
self.kwargs = kwargs
super(ZOOptSearch, self).__init__(metric=self._metric, mode=mode)
if self._dim_dict:
self.setup_zoopt()
|
def __init__(self,
algo: str = "asracos",
budget: Optional[int] = None,
dim_dict: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
**kwargs):
assert zoopt is not None, "ZOOpt not found - please install zoopt " \\
"by `pip install -U zoopt`."
assert budget is not None, "`budget` should not be None!"
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
_algo = algo.lower()
assert _algo in ["asracos", "sracos"
], "`algo` must be in ['asracos', 'sracos'] currently"
self._algo = _algo
self._dim_dict = dim_dict
self._budget = budget
self._metric = metric
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
self._live_trial_mapping = {}
self._dim_keys = []
self.solution_dict = {}
self.best_solution_list = []
self.optimizer = None
self.kwargs = kwargs
super(ZOOptSearch, self).__init__(metric=self._metric, mode=mode)
if self._dim_dict:
self.setup_zoopt()
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def convert_search_space(spec: Dict,
join: bool = False) -> Dict[str, Tuple]:
spec = copy.deepcopy(spec)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return {}
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a ZOOpt search space.")
def resolve_value(domain: Domain) -> Tuple:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
precision = quantize or 1e-12
if isinstance(sampler, Uniform):
return (ValueType.CONTINUOUS, [domain.lower, domain.upper],
precision)
elif isinstance(domain, Integer):
if isinstance(sampler, Uniform):
return (ValueType.DISCRETE, [domain.lower, domain.upper],
True)
elif isinstance(domain, Categorical):
# Categorical variables would use ValueType.DISCRETE with
# has_partial_order=False, however, currently we do not
# keep track of category values and cannot automatically
# translate back and forth between them.
if isinstance(sampler, Uniform):
return (ValueType.GRID, domain.categories)
raise ValueError("ZOOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
conv_spec = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(conv_spec)
conv_spec = spec
return conv_spec
|
def convert_search_space(spec: Dict) -> Dict[str, Tuple]:
spec = copy.deepcopy(spec)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if not domain_vars and not grid_vars:
return []
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a ZOOpt search space.")
def resolve_value(domain: Domain) -> Tuple:
quantize = None
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
quantize = sampler.q
sampler = sampler.sampler
if isinstance(domain, Float):
precision = quantize or 1e-12
if isinstance(sampler, Uniform):
return (ValueType.CONTINUOUS, [domain.lower, domain.upper],
precision)
elif isinstance(domain, Integer):
if isinstance(sampler, Uniform):
return (ValueType.DISCRETE, [domain.lower, domain.upper],
True)
elif isinstance(domain, Categorical):
# Categorical variables would use ValueType.DISCRETE with
# has_partial_order=False, however, currently we do not
# keep track of category values and cannot automatically
# translate back and forth between them.
if isinstance(sampler, Uniform):
return (ValueType.GRID, domain.categories)
raise ValueError("ZOOpt does not support parameters of type "
"`{}` with samplers of type `{}`".format(
type(domain).__name__,
type(domain.sampler).__name__))
spec = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
return spec
|
[{'piece_type': 'other', 'piece_content': 'config = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}'}, {'piece_type': 'other', 'piece_content': 'from hyperopt import hp\\nconfig = {\\n"learning_rate": hp.choice("learning_rate", [0.001, 0.0001]),\\n"batch_size": tune.choice("batch_size", [32, 64, 128, 256]),\\n"neurons1": hp.choice("neurons1", [32, 64]),\\n"neurons2": hp.choice("neurons2", [32, 64]),\\n"dropout": hp.choice("dropout", [0.1, 0.2, 0.3,]),\\n}\\n\\n[mnist.txt](https://github.com/ray-project/ray/files/5391092/mnist.txt)'}, {'piece_type': 'other', 'piece_content': 'import os\\n\\nos.environ["CUDA_VISIBLE_DEVICES"] = "3,4,5"\\nos.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit"\\n# loglevel : 0 all printed, 1 I not printed, 2 I and W not printed, 3 nothing printed\\nos.environ[\\'TF_CPP_MIN_LOG_LEVEL\\'] = \\'1\\'\\n\\nfrom tensorflow import keras\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\nfrom ray.tune.suggest import ConcurrencyLimiter\\n\\nray.init(configure_logging=False)\\n\\nEPOCHS = 20\\nnum_samples = 100\\nexperiment_name = "test_1"\\n\\nconfig = {\\n"learning_rate": tune.qloguniform(1e-4, 1e-1, 5e-5),\\n"batch_size": tune.choice([32, 64, 128, 256]),\\n"neurons1": tune.qrandint(32, 1024, 32),\\n"neurons2": tune.qrandint(32, 1024, 32),\\n"dropout": tune.choice([0.1, 0.2, 0.3,]),\\n}\\n\\nclass TuneReporter(keras.callbacks.Callback):\\n"""Tune Callback for Keras."""\\ndef on_epoch_end(self, epoch, logs=None):\\ntune.report(keras_info=logs, val_loss=logs[\\'val_loss\\'], val_accuracy=logs["val_accuracy"])\\n\\n\\n\\ndef trainer(config):\\n\\n# Load MNIST dataset as NumPy arrays\\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\\n\\n# Preprocess the data\\nx_train = x_train.reshape(-1, 784).astype(\\'float32\\') / 255\\nx_test = x_test.reshape(-1, 784).astype(\\'float32\\') / 255\\n\\nmodel = keras.Sequential([\\nkeras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\nkeras.layers.Dropout(config[\\'dropout\\']),\\nkeras.layers.Dense(config["neurons2"], activation=\\'relu\\', name=\\'dense_2\\'),\\nkeras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n])\\n\\nmodel.compile(optimizer=optimizers.Adam(learning_rate = config[\\'learning_rate\\']),\\nloss=keras.losses.SparseCategoricalCrossentropy(),\\nmetrics=[\\'accuracy\\'])\\n\\nearlystopping = keras.callbacks.EarlyStopping(monitor="val_loss",\\npatience=10,\\nmin_delta=1e-4,\\nmode=\\'min\\',\\nrestore_best_weights=True,\\nverbose=1)\\n\\ntunerrep = TuneReporter()\\ncallbacks_ = [earlystopping, tunerrep,]\\n\\n\\n\\nhistory = model.fit(\\nx_train,\\ny_train,\\nbatch_size=config["batch_size"],\\nvalidation_data=(x_test, y_test),\\nepochs=EPOCHS,\\ncallbacks=callbacks_)\\n\\nreturn history\\n\\n\\nscheduler = AsyncHyperBandScheduler(time_attr=\\'training_iteration\\',\\nmetric="val_loss",\\nmode="min",\\ngrace_period=10)\\n\\n#Use bayesian optimisation with TPE implemented by hyperopt\\nsearch_alg = HyperOptSearch(config,\\nmetric="val_loss",\\nmode="min",\\n)\\n\\nsearch_alg = ConcurrencyLimiter(search_alg, max_concurrent=4)\\n\\nanalysis = tune.run(trainer,\\nverbose=1,\\nlocal_dir="ray_results",\\nname=experiment_name,\\nnum_samples=num_samples,\\nsearch_alg=search_alg,\\nscheduler=scheduler,\\nraise_on_failed_trial=False,\\nresources_per_trial={"cpu": 2, "gpu": 1},\\nlog_to_file=("stdout.log", "stderr.log"),\\nfail_fast=True,\\n)\\n\\nbest_config = analysis.get_best_config(metric="val_loss", mode=\\'min\\')\\nprint(f\\'Best config: {best_config}\\')'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=82997) self.run()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=82997) raise e\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=82997) self._entrypoint()\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=82997) self._status_reporter.get_checkpoint())\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=82997) output = train_func(config)\\n(pid=82997) File "test_ray.py", line 49, in trainer\\n(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation=\\'relu\\', name=\\'dense_1\\'),\\n(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__\\n(pid=82997) self.units = int(units) if not isinstance(units, int) else units\\n(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not \\'Integer\\'\\n------------------------------------------------------------\\n(pid=84324) Traceback (most recent call last):\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\n(pid=84324) self.run()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run\\n(pid=84324) raise e\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run\\n(pid=84324) self._entrypoint()\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint\\n(pid=84324) self._status_reporter.get_checkpoint())\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func\\n(pid=84324) output = train_func(config)\\n(pid=84324) File "test_ray.py", line 50, in trainer\\n(pid=84324) keras.layers.Dense(10, activation=\\'softmax\\', name=\\'predictions\\'),\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__\\n(pid=84324) self.add(layer)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper\\n(pid=84324) result = method(self, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add\\n(pid=84324) output_tensor = layer(self.outputs[0])\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__\\n(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call\\n(pid=84324) lambda: array_ops.identity(inputs))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond\\n(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond\\n(pid=84324) name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond\\n(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2\\n(pid=84324) op_return_value=pred)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func\\n(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs\\n(pid=84324) rate=self.rate)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func\\n(pid=84324) return func(*args, **kwargs)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout\\n(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2\\n(pid=84324) rate, dtype=x.dtype, name="rate")\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor\\n(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function\\n(pid=84324) return constant(v, dtype=dtype, name=name)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant\\n(pid=84324) allow_broadcast=True)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl\\n(pid=84324) allow_broadcast=allow_broadcast))\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto\\n(pid=84324) _AssertCompatible(values, dtype)\\n(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible\\n(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))\\n(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type \\'Categorical\\' instead.'}, {'piece_type': 'other', 'piece_content': 'name: ray-test\\nchannels:\\n- defaults\\ndependencies:\\n- _libgcc_mutex=0.1=main\\n- _tflow_select=2.3.0=mkl\\n- absl-py=0.10.0=py36_0\\n- aiohttp=3.6.3=py36h7b6447c_0\\n- astor=0.8.1=py36_0\\n- async-timeout=3.0.1=py36_0\\n- attrs=20.2.0=py_0\\n- blas=1.0=mkl\\n- blinker=1.4=py36_0\\n- brotlipy=0.7.0=py36h7b6447c_1000\\n- c-ares=1.16.1=h7b6447c_0\\n- ca-certificates=2020.10.14=0\\n- cachetools=4.1.1=py_0\\n- certifi=2020.6.20=py36_0\\n- cffi=1.14.3=py36he30daa8_0\\n- chardet=3.0.4=py36_1003\\n- click=7.1.2=py_0\\n- cryptography=3.1.1=py36h1ba5d50_0\\n- dataclasses=0.7=py36_0\\n- gast=0.2.2=py36_0\\n- google-auth=1.22.1=py_0\\n- google-auth-oauthlib=0.4.1=py_2\\n- google-pasta=0.2.0=py_0\\n- grpcio=1.31.0=py36hf8bcb03_0\\n- h5py=2.10.0=py36hd6299e0_1\\n- hdf5=1.10.6=hb1b8bf9_0\\n- idna=2.10=py_0\\n- idna_ssl=1.1.0=py36_0\\n- importlib-metadata=2.0.0=py_1\\n- intel-openmp=2020.2=254\\n- keras-applications=1.0.8=py_1\\n- keras-preprocessing=1.1.0=py_1\\n- ld_impl_linux-64=2.33.1=h53a641e_7\\n- libedit=3.1.20191231=h14c3975_1\\n- libffi=3.3=he6710b0_2\\n- libgcc-ng=9.1.0=hdf63c60_0\\n- libgfortran-ng=7.3.0=hdf63c60_0\\n- libprotobuf=3.13.0.1=hd408876_0\\n- libstdcxx-ng=9.1.0=hdf63c60_0\\n- markdown=3.3.1=py36_0\\n- mkl=2020.2=256\\n- mkl-service=2.3.0=py36he904b0f_0\\n- mkl_fft=1.2.0=py36h23d657b_0\\n- mkl_random=1.1.1=py36h0573a6f_0\\n- multidict=4.7.6=py36h7b6447c_1\\n- ncurses=6.2=he6710b0_1\\n- numpy=1.19.1=py36hbc911f0_0\\n- numpy-base=1.19.1=py36hfa32c7d_0\\n- oauthlib=3.1.0=py_0\\n- openssl=1.1.1h=h7b6447c_0\\n- opt_einsum=3.1.0=py_0\\n- pandas=1.1.3=py36he6710b0_0\\n- pip=20.2.3=py36_0\\n- protobuf=3.13.0.1=py36he6710b0_1\\n- pyasn1=0.4.8=py_0\\n- pyasn1-modules=0.2.8=py_0\\n- pycparser=2.20=py_2\\n- pyjwt=1.7.1=py36_0\\n- pyopenssl=19.1.0=py_1\\n- pysocks=1.7.1=py36_0\\n- python=3.6.12=hcff3b4d_2\\n- python-dateutil=2.8.1=py_0\\n- pytz=2020.1=py_0\\n- readline=8.0=h7b6447c_0\\n- requests=2.24.0=py_0\\n- requests-oauthlib=1.3.0=py_0\\n- rsa=4.6=py_0\\n- scipy=1.5.2=py36h0b6359f_0\\n- setuptools=50.3.0=py36hb0f4dca_1\\n- six=1.15.0=py_0\\n- sqlite=3.33.0=h62c20be_0\\n- tensorboard=2.2.1=pyh532a8cf_0\\n- tensorboard-plugin-wit=1.6.0=py_0\\n- tensorflow=2.1.0=mkl_py36h23468d9_0\\n- tensorflow-base=2.1.0=mkl_py36h6d63fb7_0\\n- tensorflow-estimator=2.1.0=pyhd54b08b_0\\n- termcolor=1.1.0=py36_1\\n- tk=8.6.10=hbc83047_0\\n- typing_extensions=3.7.4.3=py_0\\n- urllib3=1.25.10=py_0\\n- werkzeug=1.0.1=py_0\\n- wheel=0.35.1=py_0\\n- wrapt=1.12.1=py36h7b6447c_1\\n- xz=5.2.5=h7b6447c_0\\n- zipp=3.3.0=py_0\\n- zlib=1.2.11=h7b6447c_3\\n- pip:\\n- aiohttp-cors==0.7.0\\n- aioredis==1.3.1\\n- beautifulsoup4==4.9.3\\n- blessings==1.7\\n- cloudpickle==1.6.0\\n- colorama==0.4.4\\n- colorful==0.5.4\\n- contextvars==2.4\\n- decorator==4.4.2\\n- filelock==3.0.12\\n- future==0.18.2\\n- google==3.0.0\\n- google-api-core==1.22.4\\n- googleapis-common-protos==1.52.0\\n- gpustat==0.6.0\\n- hiredis==1.1.0\\n- hyperopt==0.2.5\\n- immutables==0.14\\n- jsonschema==3.2.0\\n- msgpack==1.0.0\\n- networkx==2.5\\n- nvidia-ml-py3==7.352.0\\n- opencensus==0.7.11\\n- opencensus-context==0.1.2\\n- prometheus-client==0.8.0\\n- psutil==5.7.2\\n- py-spy==0.3.3\\n- pyrsistent==0.17.3\\n- pyyaml==5.3.1\\n- ray==1.0.0\\n- redis==3.4.1\\n- soupsieve==2.0.1\\n- tabulate==0.8.7\\n- tensorboardx==2.1\\n- tqdm==4.50.2\\n- yarl==1.5.1'}]
|
Traceback (most recent call last):
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=82997) self.run()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=82997) raise e
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=82997) self._entrypoint()
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=82997) self._status_reporter.get_checkpoint())
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=82997) output = train_func(config)
(pid=82997) File "test_ray.py", line 49, in trainer
(pid=82997) keras.layers.Dense(config["neurons1"], input_shape=(784,), activation='relu', name='dense_1'),
(pid=82997) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 1081, in __init__
(pid=82997) self.units = int(units) if not isinstance(units, int) else units
(pid=82997) TypeError: int() argument must be a string, a bytes-like object or a number, not 'Integer'
------------------------------------------------------------
(pid=84324) Traceback (most recent call last):
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/threading.py", line 916, in _bootstrap_inner
(pid=84324) self.run()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 246, in run
(pid=84324) raise e
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 227, in run
(pid=84324) self._entrypoint()
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 290, in entrypoint
(pid=84324) self._status_reporter.get_checkpoint())
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/ray/tune/function_runner.py", line 497, in _trainable_func
(pid=84324) output = train_func(config)
(pid=84324) File "test_ray.py", line 50, in trainer
(pid=84324) keras.layers.Dense(10, activation='softmax', name='predictions'),
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 116, in __init__
(pid=84324) self.add(layer)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
(pid=84324) result = method(self, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/sequential.py", line 203, in add
(pid=84324) output_tensor = layer(self.outputs[0])
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 773, in __call__
(pid=84324) outputs = call_fn(cast_inputs, *args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 183, in call
(pid=84324) lambda: array_ops.identity(inputs))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/utils/tf_utils.py", line 59, in smart_cond
(pid=84324) pred, true_fn=true_fn, false_fn=false_fn, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/smart_cond.py", line 59, in smart_cond
(pid=84324) name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/control_flow_ops.py", line 1174, in cond
(pid=84324) return cond_v2.cond_v2(pred, true_fn, false_fn, name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/cond_v2.py", line 83, in cond_v2
(pid=84324) op_return_value=pred)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/func_graph.py", line 978, in func_graph_from_py_func
(pid=84324) func_outputs = python_func(*func_args, **func_kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/layers/core.py", line 179, in dropped_inputs
(pid=84324) rate=self.rate)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/util/deprecation.py", line 507, in new_func
(pid=84324) return func(*args, **kwargs)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4289, in dropout
(pid=84324) return dropout_v2(x, rate, noise_shape=noise_shape, seed=seed, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/ops/nn_ops.py", line 4383, in dropout_v2
(pid=84324) rate, dtype=x.dtype, name="rate")
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py", line 1314, in convert_to_tensor
(pid=84324) ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 317, in _constant_tensor_conversion_function
(pid=84324) return constant(v, dtype=dtype, name=name)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 258, in constant
(pid=84324) allow_broadcast=True)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/constant_op.py", line 296, in _constant_impl
(pid=84324) allow_broadcast=allow_broadcast))
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 451, in make_tensor_proto
(pid=84324) _AssertCompatible(values, dtype)
(pid=84324) File "/home/gsukhorukov/.conda/envs/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/tensor_util.py", line 331, in _AssertCompatible
(pid=84324) (dtype.name, repr(mismatch), type(mismatch).__name__))
(pid=84324) TypeError: Expected float32, got <ray.tune.sample.Categorical object at 0x7fe7d1835a90> of type 'Categorical' instead.
|
TypeError
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_code_search_path=None,
_temp_dir=None,
_load_code_from_local=False,
_lru_evict=False,
_metrics_export_port=None,
_object_spilling_config=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_load_code_from_local: Whether code should be loaded from a local
module or from the GCS.
_java_worker_options: Overwrite the options to start Java workers.
_code_search_path (list): Java classpath or python import path.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_object_spilling_config (str): The configuration json string for object
spilling I/O worker.
_system_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
java_worker_options=_java_worker_options,
code_search_path=_code_search_path,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
object_spilling_config=_object_spilling_config)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=ray_constants.DEFAULT_DASHBOARD_PORT,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_java_worker_options=None,
_code_search_path=None,
_temp_dir=None,
_load_code_from_local=False,
_lru_evict=False,
_metrics_export_port=None,
_object_spilling_config=None,
_system_config=None):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray and all of the relevant processes, use this as follows:
.. code-block:: python
ray.init()
To connect to an existing Ray cluster, use this as follows (substituting
in the appropriate address):
.. code-block:: python
ray.init(address="123.45.67.89:6379")
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init().
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port: The port to bind the dashboard server to. Defaults to
8265.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Limit the amount of memory the
driver can use in the object store for creating objects.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_load_code_from_local: Whether code should be loaded from a local
module or from the GCS.
_java_worker_options: Overwrite the options to start Java workers.
_code_search_path (list): Java classpath or python import path.
_lru_evict (bool): If True, when an object store is full, it will evict
objects in LRU order to make more space and when under memory
pressure, ray.ObjectLostError may be thrown. If False, then
reference counting will be used to decide which objects are safe
to evict and when under memory pressure, ray.ObjectStoreFullError
may be thrown.
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_object_spilling_config (str): The configuration json string for object
spilling I/O worker.
_system_config (str): JSON configuration for overriding
RayConfig defaults. For testing purposes ONLY.
Returns:
Address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if "RAY_ADDRESS" in os.environ:
if address is None or address == "auto":
address = os.environ["RAY_ADDRESS"]
else:
raise RuntimeError(
"Cannot use both the RAY_ADDRESS environment variable and "
"the address argument of ray.init simultaneously. If you "
"use RAY_ADDRESS to connect to a specific Ray cluster, "
"please call ray.init() or ray.init(address=\\"auto\\") on the "
"driver.")
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.error("Calling ray.init() again after it has already been "
"called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
java_worker_options=_java_worker_options,
code_search_path=_code_search_path,
start_initial_python_workers_for_first_job=True,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
object_spilling_config=_object_spilling_config)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _lru_evict:
raise ValueError("When connecting to an existing cluster, "
"_lru_evict must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
load_code_from_local=_load_code_from_local,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
job_config=job_config)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
|
[{'piece_type': 'other', 'piece_content': "(pid=136534) F1009 07:02:26.344231 136534 136534 service_based_gcs_client.cc:207] Couldn't reconnect to GCS server. The last attempted GCS server address was 192.168.1.98:39953\\n(pid=136534) *** Check failure stack trace: ***\\n(pid=136534) @ 0x7f56afdb7f5d google::LogMessage::Fail()\\n(pid=136534) @ 0x7f56afdb90bc google::LogMessage::SendToLog()\\n(pid=136534) @ 0x7f56afdb7c39 google::LogMessage::Flush()\\n(pid=136534) @ 0x7f56afdb7e51 google::LogMessage::~LogMessage()\\n(pid=136534) @ 0x7f56afd6eff9 ray::RayLog::~RayLog()\\n(pid=136534) @ 0x7f56afab2a5a ray::gcs::ServiceBasedGcsClient::ReconnectGcsServer()\\n(pid=136534) @ 0x7f56afab2b5f ray::gcs::ServiceBasedGcsClient::GcsServiceFailureDetected()\\n(pid=136534) @ 0x7f56afab2d01 ray::gcs::ServiceBasedGcsClient::PeriodicallyCheckGcsServerAddress()\\n(pid=136534) @ 0x7f56afab5071 ray::gcs::ServiceBasedGcsClient::Connect()\\n(pid=136534) @ 0x7f56afa36746 ray::CoreWorker::CoreWorker()\\n(pid=136534) @ 0x7f56afa3a484 ray::CoreWorkerProcess::CreateWorker()\\n(pid=136534) @ 0x7f56afa3b6f2 ray::CoreWorkerProcess::CoreWorkerProcess()\\n(pid=136534) @ 0x7f56afa3c0bb ray::CoreWorkerProcess::Initialize()\\n(pid=136534) @ 0x7f56af975dce __pyx_pw_3ray_7_raylet_10CoreWorker_1__cinit__()\\n(pid=136534) @ 0x7f56af9775b5 __pyx_tp_new_3ray_7_raylet_CoreWorker()\\n(pid=136534) @ 0x556d5bd4e909 _PyObject_FastCallKeywords\\n(pid=136534) @ 0x556d5bdb439e _PyEval_EvalFrameDefault\\n(pid=136534) @ 0x556d5bcf7160 _PyEval_EvalCodeWithName\\n(pid=136534) @ 0x556d5bd47107 _PyFunction_FastCallKeywords\\n(pid=136534) @ 0x556d5bdb0585 _PyEval_EvalFrameDefault\\n(pid=136534) @ 0x556d5bcf6829 _PyEval_EvalCodeWithName\\n(pid=136534) @ 0x556d5bcf7714 PyEval_EvalCodeEx\\n(pid=136534) @ 0x556d5bcf773c PyEval_EvalCode\\n(pid=136534) @ 0x556d5be0ef14 run_mod\\n(pid=136534) @ 0x556d5be19331 PyRun_FileExFlags\\n(pid=136534) @ 0x556d5be19523 PyRun_SimpleFileExFlags\\n(pid=136534) @ 0x556d5be1a655 pymain_main\\n(pid=136534) @ 0x556d5be1a77c _Py_UnixMain\\n(pid=136534) @ 0x7f56b0fb90b3 __libc_start_main\\n(pid=136534) @ 0x556d5bdbeff0 (unknown)\\nA worker died or was killed while executing task ffffffffffffffff92c1835601000000."}, {'piece_type': 'error message', 'piece_content': '^CTraceback (most recent call last):\\nFile "test.py", line 72, in <module>\\n**tune_kwargs)\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/tune.py", line 405, in run\\nrunner.step()\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 375, in step\\nself._process_events() # blocking\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 475, in _process_events\\ntrial = self.trial_executor.get_next_available_trial() # blocking\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 463, in get_next_available_trial\\n[result_id], _ = ray.wait(shuffled_results)\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 1558, in wait\\nworker.current_task_id,\\nFile "python/ray/_raylet.pyx", line 939, in ray._raylet.CoreWorker.wait\\nFile "python/ray/_raylet.pyx", line 144, in ray._raylet.check_status\\nKeyboardInterrupt\\n^CError in atexit._run_exitfuncs:\\nTraceback (most recent call last):\\nFile "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 784, in shutdown\\ntime.sleep(0.5)\\nKeyboardInterrupt'}, {'piece_type': 'other', 'piece_content': "I1009 07:02:24.829182 112317 112317 gcs_worker_manager.cc:31] Reporting worker failure, worker id = e9f747a7936a76818495\\\\\\n506b3f11c238bda56539, node id = 2041b2a79d53a7b4776d1553e99f625ccd26c15b, address = 192.168.1.98\\nW1009 07:02:24.829221 112317 112317 gcs_actor_manager.cc:544] Actor 35557ddd01000000 is out of scope,, destroying actor \\\\\\nchild.\\nW1009 07:02:24.829221 112317 112317 gcs_actor_manager.cc:544] Actor 35557ddd01000000 is out of scope,, destroying actor \\\\\\nchild.\\nI1009 07:02:24.829226 112317 112317 gcs_actor_manager.cc:558] Destroying actor, actor id = 35557ddd01000000\\nI1009 07:02:24.829228 112317 112317 gcs_actor_manager.cc:1065] Erasing actor 35557ddd01000000 owned by 01000000fffffffff\\\\\\nfffffffffffffffffffffff\\nW1009 07:02:24.829443 112317 112317 gcs_worker_manager.cc:67] Failed to report worker failure, the worker doesn't exist,\\\\\\nworker id = e9f747a7936a76818495506b3f11c238bda56539, node id = 2041b2a79d53a7b4776d1553e99f625ccd26c15b, address = 192\\\\\\n.168.1.98\\nW1009 07:02:24.829443 112317 112317 gcs_worker_manager.cc:67] Failed to report worker failure, the worker doesn't exist,\\\\\\nworker id = e9f747a7936a76818495506b3f11c238bda56539, node id = 2041b2a79d53a7b4776d1553e99f625ccd26c15b, address = 192\\\\\\n.168.1.98"}, {'piece_type': 'other', 'piece_content': 'I1009 07:03:00.041805 112317 112317 gcs_actor_scheduler.cc:387] Retry creating actor 92c1835601000000 on worker 7bb2509f\\\\\\n320bd6f298c0f4a49bb9debecd2349e2 at node 2041b2a79d53a7b4776d1553e99f625ccd26c15b\\nI1009 07:03:00.041824 112317 112317 gcs_actor_scheduler.cc:322] Start creating actor 92c1835601000000 on worker 7bb2509f\\\\\\n320bd6f298c0f4a49bb9debecd2349e2 at node 2041b2a79d53a7b4776d1553e99f625ccd26c15b\\nI1009 07:03:00.111589 112317 112317 gcs_actor_scheduler.cc:387] Retry creating actor 7bb0b57001000000 on worker 3a3c4734\\\\\\n91f3af24c7eb1796c06b1ed67a13690e at node 2041b2a79d53a7b4776d1553e99f625ccd26c15b\\nI1009 07:03:00.111609 112317 112317 gcs_actor_scheduler.cc:322] Start creating actor 7bb0b57001000000 on worker 3a3c4734\\\\\\n91f3af24c7eb1796c06b1ed67a13690e at node 2041b2a79d53a7b4776d1553e99f625ccd26c15b\\nI1009 07:03:00.128782 112317 112317 gcs_actor_scheduler.cc:387] Retry creating actor 4d95c66301000000 on worker af65091d\\\\\\ne52553f02a58a926af9ce296da7b45ac at node 2041b2a79d53a7b4776d1553e99f625ccd26c15b'}, {'piece_type': 'other', 'piece_content': 'aiohttp==3.6.2\\naiohttp-cors==0.7.0\\naioredis==1.3.1\\nasync-timeout==3.0.1\\nattrs==20.2.0\\nbeautifulsoup4==4.9.3\\nblessings==1.7\\ncachetools==4.1.1\\ncertifi==2020.6.20\\nchardet==3.0.4\\nclick==7.1.2\\ncloudpickle==1.6.0\\ncolorama==0.4.3\\ncolorful==0.5.4\\ndecorator==4.4.2\\nfilelock==3.0.12\\nflake8==3.8.4\\nfuture==0.18.2\\ngoogle==3.0.0\\ngoogle-api-core==1.22.4\\ngoogle-auth==1.22.1\\ngoogleapis-common-protos==1.52.0\\ngpustat==0.6.0\\ngrpcio==1.32.0\\nhiredis==1.1.0\\nhyperopt==0.2.5\\nidna==2.10\\nimportlib-metadata==2.0.0\\njsonschema==3.2.0\\nmccabe==0.6.1\\nmsgpack==1.0.0\\nmultidict==4.7.6\\nnetworkx==2.5\\nnumpy==1.19.2\\nnvidia-ml-py3==7.352.0\\nopencensus==0.7.10\\nopencensus-context==0.1.1\\npandas==1.1.3\\nPillow==7.2.0\\nprometheus-client==0.8.0\\nprotobuf==3.13.0\\npsutil==5.7.2\\npy-spy==0.3.3\\npyasn1==0.4.8\\npyasn1-modules==0.2.8\\npycodestyle==2.6.0\\npyflakes==2.2.0\\npyrsistent==0.17.3\\npython-dateutil==2.8.1\\npytz==2020.1\\nPyYAML==5.3.1\\nray==1.0.0\\nredis==3.4.1\\nrequests==2.24.0\\nrsa==4.6\\nscipy==1.5.2\\nsix==1.15.0\\nsoupsieve==2.0.1\\ntabulate==0.8.7\\ntensorboardX==2.1\\ntorch==1.6.0\\ntorchvision==0.7.0\\ntqdm==4.50.2\\ntyping-extensions==3.7.4.3\\nurllib3==1.25.10\\nyarl==1.6.0\\nzipp==3.3.0'}, {'piece_type': 'other', 'piece_content': '"""This test checks that HyperOpt is functional.\\n\\nIt also checks that it is usable with a separate scheduler.\\n"""\\nimport time\\n\\nimport ray\\nfrom ray import tune\\nfrom ray.tune.suggest import ConcurrencyLimiter\\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\\nfrom ray.tune.suggest.hyperopt import HyperOptSearch\\n\\n\\ndef evaluation_fn(step, width, height):\\nreturn (0.1 + width * step / 100)**(-1) + height * 0.1\\n\\n\\ndef easy_objective(config):\\n# Hyperparameters\\nwidth, height = config["width"], config["height"]\\n\\nfor step in range(config["steps"]):\\n# Iterative training function - can be any arbitrary training procedure\\nintermediate_score = evaluation_fn(step, width, height)\\n# Feed the score back back to Tune.\\ntune.report(iterations=step, mean_loss=intermediate_score)\\ntime.sleep(0.1)\\n\\n\\nif __name__ == "__main__":\\nimport argparse\\n\\nparser = argparse.ArgumentParser()\\nparser.add_argument(\\n"--smoke-test", action="store_true", help="Finish quickly for testing")\\nargs, _ = parser.parse_known_args()\\nray.init(configure_logging=False)\\n\\ncurrent_best_params = [\\n{\\n"width": 1,\\n"height": 2,\\n"activation": 0 # Activation will be relu\\n},\\n{\\n"width": 4,\\n"height": 2,\\n"activation": 1 # Activation will be tanh\\n}\\n]\\n\\ntune_kwargs = {\\n"num_samples": 10 if args.smoke_test else 10000,\\n"config": {\\n"steps": 10,\\n"width": tune.uniform(0, 20),\\n"height": tune.uniform(-100, 100),\\n# This is an ignored parameter.\\n"activation": tune.choice(["relu", "tanh"])\\n}\\n}\\nalgo = HyperOptSearch(points_to_evaluate=current_best_params)\\nalgo = ConcurrencyLimiter(algo, max_concurrent=4)\\n\\nscheduler = AsyncHyperBandScheduler()\\ntune.run(\\neasy_objective,\\nsearch_alg=algo,\\nscheduler=scheduler,\\nmetric="mean_loss",\\nmode="min",\\n**tune_kwargs)'}]
|
^CTraceback (most recent call last):
File "test.py", line 72, in <module>
**tune_kwargs)
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/tune.py", line 405, in run
runner.step()
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 375, in step
self._process_events() # blocking
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 475, in _process_events
trial = self.trial_executor.get_next_available_trial() # blocking
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/tune/ray_trial_executor.py", line 463, in get_next_available_trial
[result_id], _ = ray.wait(shuffled_results)
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 1558, in wait
worker.current_task_id,
File "python/ray/_raylet.pyx", line 939, in ray._raylet.CoreWorker.wait
File "python/ray/_raylet.pyx", line 144, in ray._raylet.check_status
KeyboardInterrupt
^CError in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/home/demattia/miniconda3/envs/test_tune/lib/python3.7/site-packages/ray/worker.py", line 784, in shutdown
time.sleep(0.5)
KeyboardInterrupt
|
CError
|
def with_parameters(fn, **kwargs):
"""Wrapper for function trainables to pass arbitrary large data objects.
This wrapper function will store all passed parameters in the Ray
object store and retrieve them when calling the function. It can thus
be used to pass arbitrary data, even datasets, to Tune trainable functions.
This can also be used as an alternative to `functools.partial` to pass
default arguments to trainables.
Args:
fn: function to wrap
**kwargs: parameters to store in object store.
.. code-block:: python
from ray import tune
def train(config, data=None):
for sample in data:
# ...
tune.report(loss=loss)
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(train, data=data),
#...
)
"""
if not callable(fn):
raise ValueError(
"`tune.with_parameters()` only works with the function API. "
"If you want to pass parameters to Trainable _classes_, consider "
"passing them via the `config` parameter.")
prefix = f"{str(fn)}_"
for k, v in kwargs.items():
parameter_registry.put(prefix + k, v)
use_checkpoint = detect_checkpoint_function(fn)
def inner(config, checkpoint_dir=None):
fn_kwargs = {}
if use_checkpoint:
default = checkpoint_dir
sig = inspect.signature(fn)
if "checkpoint_dir" in sig.parameters:
default = sig.parameters["checkpoint_dir"].default \\
or default
fn_kwargs["checkpoint_dir"] = default
for k in kwargs:
fn_kwargs[k] = parameter_registry.get(prefix + k)
fn(config, **fn_kwargs)
# Use correct function signature if no `checkpoint_dir` parameter is set
if not use_checkpoint:
def _inner(config):
inner(config, checkpoint_dir=None)
return _inner
return inner
|
def with_parameters(fn, **kwargs):
"""Wrapper for function trainables to pass arbitrary large data objects.
This wrapper function will store all passed parameters in the Ray
object store and retrieve them when calling the function. It can thus
be used to pass arbitrary data, even datasets, to Tune trainable functions.
This can also be used as an alternative to `functools.partial` to pass
default arguments to trainables.
Args:
fn: function to wrap
**kwargs: parameters to store in object store.
.. code-block:: python
from ray import tune
def train(config, data=None):
for sample in data:
# ...
tune.report(loss=loss)
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(train, data=data),
#...
)
"""
prefix = f"{str(fn)}_"
for k, v in kwargs.items():
parameter_registry.put(prefix + k, v)
use_checkpoint = detect_checkpoint_function(fn)
def inner(config, checkpoint_dir=None):
fn_kwargs = {}
if use_checkpoint:
default = checkpoint_dir
sig = inspect.signature(fn)
if "checkpoint_dir" in sig.parameters:
default = sig.parameters["checkpoint_dir"].default \\
or default
fn_kwargs["checkpoint_dir"] = default
for k in kwargs:
fn_kwargs[k] = parameter_registry.get(prefix + k)
fn(config, **fn_kwargs)
# Use correct function signature if no `checkpoint_dir` parameter is set
if not use_checkpoint:
def _inner(config):
inner(config, checkpoint_dir=None)
return _inner
return inner
|
[{'piece_type': 'error message', 'piece_content': 'Failure # 1 (occurred at 2020-09-26_16-50-01)\\nTraceback (most recent call last):\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/trial_runner.py", line 518, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/ray_trial_executor.py", line 488, in fetch_result\\nresult = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)\\nFile "/home/karol/PycharmProjects/ray/python/ray/worker.py", line 1438, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(TuneError): �[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)\\nFile "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/trainable.py", line 336, in train\\nresult = self.step()\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 346, in step\\nself._report_thread_runner_error(block=True)\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 464, in _report_thread_runner_error\\nraise TuneError(("Trial raised an exception. Traceback:\\\\n{}"\\nray.tune.error.TuneError: Trial raised an exception. Traceback:\\n�[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 233, in run\\nself._entrypoint()\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 295, in entrypoint\\nreturn self._trainable_func(self.config, self._status_reporter,\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 527, in _trainable_func\\noutput = fn()\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 595, in _inner\\ninner(config, checkpoint_dir=None)\\nFile "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 589, in inner\\nfn(config, **fn_kwargs)\\nTypeError: __init__() got multiple values for argument \\'arch\\''}, {'piece_type': 'reproducing source code', 'piece_content': 'from ray import tune\\nfrom ray.tune.schedulers import PopulationBasedTraining\\nfrom ray.tune.function_runner import with_parameters\\n\\nray.init(num_cpus=3, num_gpus=3, _memory=7516192768, object_store_memory=7516192768)\\npbt = PopulationBasedTraining(\\ntime_attr="training_iteration",\\nmetric="val_dice",\\nmode="max",\\nperturbation_interval=2,\\nhyperparam_mutations={\\n"lr": lambda: np.random.uniform(0.001, 0.0001)\\n})\\nanalysis = tune.run(with_parameters(MyTrainable,\\narch="FPN",\\nencoder=encoder,\\ntrain_loader=train_loader,\\nval_loader=val_loader,\\noptimizer="adam",\\ndevice=device),\\nscheduler=pbt,\\nreuse_actors=True,\\nkeep_checkpoints_num=3,\\nverbose=1,\\nconfig={"lr": tune.uniform(0.001, 0.0001)},\\nnum_samples=2,\\nresources_per_trial={"gpu": 1, "cpu": 1})\\n\\nclass MyTrainable(tune.Trainable):\\ndef __init__(self, arch, encoder, train_loader, val_loader, optimizer="adam", device="cuda"):\\nsuper().__init__()\\nself.train_loader = train_loader\\nself.val_loader = val_loader\\nself.device = device\\nself.model = NoiseSegModel(arch=arch, encoder=encoder)\\nif str(self.device) != "cpu":\\nself.model = self.model.cuda()\\nself.optimizer = optimizer\\n\\ndef _setup(self, config):\\nif self.optimizer == "adam":\\nself.optimizer = torch.optim.Adam(self.model.parameters(), lr=config.get("lr"))\\nelif self.optimizer == "RMSprop":\\nself.optimizer = torch.optim.RMSprop(self.model.parameters(), lr=config.get("lr"), weight_decay=1e-8, momentum=0.9)\\nelse:\\nraise NotImplementedError\\n\\nself.criterion = nn.BCEWithLogitsLoss()\\nself.best_val_score = 0\\nself.best_global_step = 0\\nself.start_epoch = 0\\n\\n# Other methods from MyTrainable'}]
|
Failure # 1 (occurred at 2020-09-26_16-50-01)
Traceback (most recent call last):
File "/home/karol/PycharmProjects/ray/python/ray/tune/trial_runner.py", line 518, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "/home/karol/PycharmProjects/ray/python/ray/tune/ray_trial_executor.py", line 488, in fetch_result
result = ray.get(trial_future[0], timeout=DEFAULT_GET_TIMEOUT)
File "/home/karol/PycharmProjects/ray/python/ray/worker.py", line 1438, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(TuneError): �[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)
File "python/ray/_raylet.pyx", line 484, in ray._raylet.execute_task
File "python/ray/_raylet.pyx", line 438, in ray._raylet.execute_task.function_executor
File "/home/karol/PycharmProjects/ray/python/ray/tune/trainable.py", line 336, in train
result = self.step()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 346, in step
self._report_thread_runner_error(block=True)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 464, in _report_thread_runner_error
raise TuneError(("Trial raised an exception. Traceback:\\n{}"
ray.tune.error.TuneError: Trial raised an exception. Traceback:
�[36mray::ImplicitFunc.train()�[39m (pid=25150, ip=141.12.239.114)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 233, in run
self._entrypoint()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 295, in entrypoint
return self._trainable_func(self.config, self._status_reporter,
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 527, in _trainable_func
output = fn()
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 595, in _inner
inner(config, checkpoint_dir=None)
File "/home/karol/PycharmProjects/ray/python/ray/tune/function_runner.py", line 589, in inner
fn(config, **fn_kwargs)
TypeError: __init__() got multiple values for argument 'arch'
|
ray.tune.error.TuneError
|
def wandb_mixin(func: Callable):
"""wandb_mixin
Weights and biases (https://www.wandb.com/) is a tool for experiment
tracking, model optimization, and dataset versioning. This Ray Tune
Trainable mixin helps initializing the Wandb API for use with the
``Trainable`` class or with `@wandb_mixin` for the function API.
For basic usage, just prepend your training function with the
``@wandb_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
wandb.log()
Wandb configuration is done by passing a ``wandb`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``wandb`` config entry is passed to ``wandb.init()``
as keyword arguments. The exception are the following settings, which
are used to configure the ``WandbTrainableMixin`` itself:
Args:
api_key_file (str): Path to file containing the Wandb API KEY. This
file must be on all nodes if using the `wandb_mixin`.
api_key (str): Wandb API Key. Alternative to setting `api_key_file`.
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
by Tune, but can be overwritten by filling out the respective configuration
values.
Please see here for all other valid configuration settings:
https://docs.wandb.com/library/init
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
wandb.log({"loss": loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# wandb configuration
"wandb": {
"project": "Optimization_Project",
"api_key_file": "/path/to/file"
}
})
"""
func.__mixins__ = (WandbTrainableMixin, )
return func
|
def wandb_mixin(func: Callable):
"""wandb_mixin
Weights and biases (https://www.wandb.com/) is a tool for experiment
tracking, model optimization, and dataset versioning. This Ray Tune
Trainable mixin helps initializing the Wandb API for use with the
``Trainable`` class or with `@wandb_mixin` for the function API.
For basic usage, just prepend your training function with the
``@wandb_mixin`` decorator:
.. code-block:: python
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
wandb.log()
Wandb configuration is done by passing a ``wandb`` key to
the ``config`` parameter of ``tune.run()`` (see example below).
The content of the ``wandb`` config entry is passed to ``wandb.init()``
as keyword arguments. The exception are the following settings, which
are used to configure the ``WandbTrainableMixin`` itself:
Args:
api_key_file (str): Path to file containing the Wandb API KEY. This
file must be on all nodes if using the `wandb_mixin`.
api_key (str): Wandb API Key. Alternative to setting `api_key_file`.
Wandb's ``group``, ``run_id`` and ``run_name`` are automatically selected
by Tune, but can be overwritten by filling out the respective configuration
values.
Please see here for all other valid configuration settings:
https://docs.wandb.com/library/init
Example:
.. code-block:: python
from ray import tune
from ray.tune.integration.wandb import wandb_mixin
@wandb_mixin
def train_fn(config):
for i in range(10):
loss = self.config["a"] + self.config["b"]
wandb.log({"loss": loss})
tune.report(loss=loss, done=True)
tune.run(
train_fn,
config={
# define search space here
"a": tune.choice([1, 2, 3]),
"b": tune.choice([4, 5, 6]),
# wandb configuration
"wandb": {
"project": "Optimization_Project",
"api_key_file": "/path/to/file"
}
})
"""
func.__mixins__ = (WandbTrainableMixin, )
func.__wandb_group__ = func.__name__
return func
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/ubuntu/run_ray_tune.py", line 222, in <module>\\ntune_helsinki_(args)\\nFile "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_\\nray_wandb_func = wandb_mixin(ray_func)\\nFile "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin\\nfunc.__wandb_group__ = func.__name__\\nAttributeError: \\'functools.partial\\' object has no attribute \\'__name__\\'\\nShared connection to 34.220.26.193 closed.\\nError: Command failed:'}, {'piece_type': 'other', 'piece_content': 'ray_func = partial(ray_main, args)\\nray_wandb_func = wandb_mixin(ray_func)'}]
|
Traceback (most recent call last):
File "/home/ubuntu/run_ray_tune.py", line 222, in <module>
tune_helsinki_(args)
File "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_
ray_wandb_func = wandb_mixin(ray_func)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin
func.__wandb_group__ = func.__name__
AttributeError: 'functools.partial' object has no attribute '__name__'
Shared connection to 34.220.26.193 closed.
Error: Command failed:
|
AttributeError
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification.")
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id if self.trial else None
trial_name = str(self.trial) if self.trial else None
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError(
"You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop(
"group", self.trial.trainable_name if self.trial else None)
# remove unpickleable items!
config = _clean_log(config)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs)
self._wandb.start()
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification.")
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError(
"You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
# remove unpickleable items!
config = _clean_log(config)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs)
self._wandb.start()
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/ubuntu/run_ray_tune.py", line 222, in <module>\\ntune_helsinki_(args)\\nFile "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_\\nray_wandb_func = wandb_mixin(ray_func)\\nFile "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin\\nfunc.__wandb_group__ = func.__name__\\nAttributeError: \\'functools.partial\\' object has no attribute \\'__name__\\'\\nShared connection to 34.220.26.193 closed.\\nError: Command failed:'}, {'piece_type': 'other', 'piece_content': 'ray_func = partial(ray_main, args)\\nray_wandb_func = wandb_mixin(ray_func)'}]
|
Traceback (most recent call last):
File "/home/ubuntu/run_ray_tune.py", line 222, in <module>
tune_helsinki_(args)
File "/home/ubuntu/run_ray_tune.py", line 106, in tune_helsinki_
ray_wandb_func = wandb_mixin(ray_func)
File "/home/ubuntu/anaconda3/lib/python3.7/site-packages/ray/tune/integration/wandb.py", line 142, in wandb_mixin
func.__wandb_group__ = func.__name__
AttributeError: 'functools.partial' object has no attribute '__name__'
Shared connection to 34.220.26.193 closed.
Error: Command failed:
|
AttributeError
|
def Concurrently(ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None,
round_robin_weights=None):
"""Operator that runs the given parent iterators concurrently.
Args:
mode (str): One of 'round_robin', 'async'. In 'round_robin' mode,
we alternate between pulling items from each parent iterator in
order deterministically. In 'async' mode, we pull from each parent
iterator as fast as they are produced. This is non-deterministic.
output_indexes (list): If specified, only output results from the
given ops. For example, if ``output_indexes=[0]``, only results
from the first op in ops will be returned.
round_robin_weights (list): List of weights to use for round robin
mode. For example, ``[2, 1]`` will cause the iterator to pull twice
as many items from the first iterator as the second. ``[2, 1, *]``
will cause as many items to be pulled as possible from the third
iterator without blocking. This is only allowed in round robin
mode.
Examples:
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
if round_robin_weights:
raise ValueError(
"round_robin_weights cannot be specified in async mode")
else:
raise ValueError("Unknown mode {}".format(mode))
if round_robin_weights and all(r == "*" for r in round_robin_weights):
raise ValueError("Cannot specify all round robin weights = *")
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(
*ops[1:],
deterministic=deterministic,
round_robin_weights=round_robin_weights)
if output_indexes:
output = (output.filter(lambda tup: tup[0] in output_indexes)
.for_each(lambda tup: tup[1]))
return output
|
def Concurrently(ops: List[LocalIterator],
*,
mode="round_robin",
output_indexes=None,
round_robin_weights=None):
"""Operator that runs the given parent iterators concurrently.
Args:
mode (str): One of {'round_robin', 'async'}.
- In 'round_robin' mode, we alternate between pulling items from
each parent iterator in order deterministically.
- In 'async' mode, we pull from each parent iterator as fast as
they are produced. This is non-deterministic.
output_indexes (list): If specified, only output results from the
given ops. For example, if output_indexes=[0], only results from
the first op in ops will be returned.
round_robin_weights (list): List of weights to use for round robin
mode. For example, [2, 1] will cause the iterator to pull twice
as many items from the first iterator as the second. [2, 1, *] will
cause as many items to be pulled as possible from the third
iterator without blocking. This is only allowed in round robin
mode.
>>> sim_op = ParallelRollouts(...).for_each(...)
>>> replay_op = LocalReplay(...).for_each(...)
>>> combined_op = Concurrently([sim_op, replay_op], mode="async")
"""
if len(ops) < 2:
raise ValueError("Should specify at least 2 ops.")
if mode == "round_robin":
deterministic = True
elif mode == "async":
deterministic = False
if round_robin_weights:
raise ValueError(
"round_robin_weights cannot be specified in async mode")
else:
raise ValueError("Unknown mode {}".format(mode))
if round_robin_weights and all(r == "*" for r in round_robin_weights):
raise ValueError("Cannot specify all round robin weights = *")
if output_indexes:
for i in output_indexes:
assert i in range(len(ops)), ("Index out of range", i)
def tag(op, i):
return op.for_each(lambda x: (i, x))
ops = [tag(op, i) for i, op in enumerate(ops)]
output = ops[0].union(
*ops[1:],
deterministic=deterministic,
round_robin_weights=round_robin_weights)
if output_indexes:
output = (output.filter(lambda tup: tup[0] in output_indexes)
.for_each(lambda tup: tup[1]))
return output
|
[{'piece_type': 'other', 'piece_content': 'from ray.tune.logger import pretty_print\\nfrom ray.rllib.agents.dqn.dqn import DEFAULT_CONFIG\\nfrom ray.rllib.agents.dqn.dqn import DQNTrainer\\nimport numpy as np\\n\\nconfig = DEFAULT_CONFIG.copy()\\nconfig["env"] = "CartPole-v0"\\nconfig["input"] = {"demo-out": 0.3, "sampler": 0.7}\\nconfig["explore"] = False\\ntrainer = DQNTrainer(config=config)\\nbest_eps_length_avg = np.PINF\\nwhile True:\\nresults = trainer.train()\\nprint(pretty_print(results))'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>\\nresults = trainer.train()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train\\nraise e\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train\\nresult = Trainable.train(self)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train\\nresult = self.step()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step\\nres = next(self.train_exec_impl)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union\\nitem = next(it)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\n[Previous line repeated 2 more times]\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay\\nitem = local_buffer.replay()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay\\nbeta=self.prioritized_replay_beta)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample\\nbatch = self._encode_sample(idxes)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample\\nout = SampleBatch.concat_samples([self._storage[i] for i in idxes])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples\\nout[k] = concat_aligned([s[k] for s in samples])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>\\nout[k] = concat_aligned([s[k] for s in samples])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__\\nreturn self.data[key]\\nKeyError: \\'action_logp\\''}]
|
Traceback (most recent call last):
File "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>
results = trainer.train()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train
raise e
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train
result = Trainable.train(self)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train
result = self.step()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union
item = next(it)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
[Previous line repeated 2 more times]
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay
item = local_buffer.replay()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay
beta=self.prioritized_replay_beta)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample
batch = self._encode_sample(idxes)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample
out = SampleBatch.concat_samples([self._storage[i] for i in idxes])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__
return self.data[key]
KeyError: 'action_logp'
|
KeyError
|
def ParallelRollouts(workers: WorkerSet, *, mode="bulk_sync",
num_async=1) -> LocalIterator[SampleBatch]:
"""Operator to collect experiences in parallel from rollout workers.
If there are no remote workers, experiences will be collected serially from
the local worker instance instead.
Args:
workers (WorkerSet): set of rollout workers to use.
mode (str): One of 'async', 'bulk_sync', 'raw'. In 'async' mode,
batches are returned as soon as they are computed by rollout
workers with no order guarantees. In 'bulk_sync' mode, we collect
one batch from each worker and concatenate them together into a
large batch to return. In 'raw' mode, the ParallelIterator object
is returned directly and the caller is responsible for implementing
gather and updating the timesteps counter.
num_async (int): In async mode, the max number of async
requests in flight per actor.
Returns:
A local iterator over experiences collected in parallel.
Examples:
>>> rollouts = ParallelRollouts(workers, mode="async")
>>> batch = next(rollouts)
>>> print(batch.count)
50 # config.rollout_fragment_length
>>> rollouts = ParallelRollouts(workers, mode="bulk_sync")
>>> batch = next(rollouts)
>>> print(batch.count)
200 # config.rollout_fragment_length * config.num_workers
Updates the STEPS_SAMPLED_COUNTER counter in the local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
def report_timesteps(batch):
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += batch.count
return batch
if not workers.remote_workers():
# Handle the serial sampling case.
def sampler(_):
while True:
yield workers.local_worker().sample()
return (LocalIterator(sampler, SharedMetrics())
.for_each(report_timesteps))
# Create a parallel iterator over generated experiences.
rollouts = from_actors(workers.remote_workers())
if mode == "bulk_sync":
return rollouts \\
.batch_across_shards() \\
.for_each(lambda batches: SampleBatch.concat_samples(batches)) \\
.for_each(report_timesteps)
elif mode == "async":
return rollouts.gather_async(
num_async=num_async).for_each(report_timesteps)
elif mode == "raw":
return rollouts
else:
raise ValueError("mode must be one of 'bulk_sync', 'async', 'raw', "
"got '{}'".format(mode))
|
def ParallelRollouts(workers: WorkerSet, *, mode="bulk_sync",
num_async=1) -> LocalIterator[SampleBatch]:
"""Operator to collect experiences in parallel from rollout workers.
If there are no remote workers, experiences will be collected serially from
the local worker instance instead.
Args:
workers (WorkerSet): set of rollout workers to use.
mode (str): One of {'async', 'bulk_sync', 'raw'}.
- In 'async' mode, batches are returned as soon as they are
computed by rollout workers with no order guarantees.
- In 'bulk_sync' mode, we collect one batch from each worker
and concatenate them together into a large batch to return.
- In 'raw' mode, the ParallelIterator object is returned directly
and the caller is responsible for implementing gather and
updating the timesteps counter.
num_async (int): In async mode, the max number of async
requests in flight per actor.
Returns:
A local iterator over experiences collected in parallel.
Examples:
>>> rollouts = ParallelRollouts(workers, mode="async")
>>> batch = next(rollouts)
>>> print(batch.count)
50 # config.rollout_fragment_length
>>> rollouts = ParallelRollouts(workers, mode="bulk_sync")
>>> batch = next(rollouts)
>>> print(batch.count)
200 # config.rollout_fragment_length * config.num_workers
Updates the STEPS_SAMPLED_COUNTER counter in the local iterator context.
"""
# Ensure workers are initially in sync.
workers.sync_weights()
def report_timesteps(batch):
metrics = _get_shared_metrics()
metrics.counters[STEPS_SAMPLED_COUNTER] += batch.count
return batch
if not workers.remote_workers():
# Handle the serial sampling case.
def sampler(_):
while True:
yield workers.local_worker().sample()
return (LocalIterator(sampler, SharedMetrics())
.for_each(report_timesteps))
# Create a parallel iterator over generated experiences.
rollouts = from_actors(workers.remote_workers())
if mode == "bulk_sync":
return rollouts \\
.batch_across_shards() \\
.for_each(lambda batches: SampleBatch.concat_samples(batches)) \\
.for_each(report_timesteps)
elif mode == "async":
return rollouts.gather_async(
num_async=num_async).for_each(report_timesteps)
elif mode == "raw":
return rollouts
else:
raise ValueError("mode must be one of 'bulk_sync', 'async', 'raw', "
"got '{}'".format(mode))
|
[{'piece_type': 'other', 'piece_content': 'from ray.tune.logger import pretty_print\\nfrom ray.rllib.agents.dqn.dqn import DEFAULT_CONFIG\\nfrom ray.rllib.agents.dqn.dqn import DQNTrainer\\nimport numpy as np\\n\\nconfig = DEFAULT_CONFIG.copy()\\nconfig["env"] = "CartPole-v0"\\nconfig["input"] = {"demo-out": 0.3, "sampler": 0.7}\\nconfig["explore"] = False\\ntrainer = DQNTrainer(config=config)\\nbest_eps_length_avg = np.PINF\\nwhile True:\\nresults = trainer.train()\\nprint(pretty_print(results))'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>\\nresults = trainer.train()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train\\nraise e\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train\\nresult = Trainable.train(self)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train\\nresult = self.step()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step\\nres = next(self.train_exec_impl)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union\\nitem = next(it)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__\\nreturn next(self.built_iterator)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach\\nfor item in it:\\n[Previous line repeated 2 more times]\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay\\nitem = local_buffer.replay()\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay\\nbeta=self.prioritized_replay_beta)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample\\nbatch = self._encode_sample(idxes)\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample\\nout = SampleBatch.concat_samples([self._storage[i] for i in idxes])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples\\nout[k] = concat_aligned([s[k] for s in samples])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>\\nout[k] = concat_aligned([s[k] for s in samples])\\nFile "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__\\nreturn self.data[key]\\nKeyError: \\'action_logp\\''}]
|
Traceback (most recent call last):
File "/home/enes/ws/code/arl/mt/test/consume_experiences.py", line 16, in <module>
results = trainer.train()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 522, in train
raise e
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer.py", line 508, in train
result = Trainable.train(self)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/tune/trainable.py", line 332, in train
result = self.step()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/agents/trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 845, in apply_filter
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 1078, in build_union
item = next(it)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 758, in __next__
return next(self.built_iterator)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/util/iter.py", line 785, in apply_foreach
for item in it:
[Previous line repeated 2 more times]
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_ops.py", line 89, in gen_replay
item = local_buffer.replay()
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 331, in replay
beta=self.prioritized_replay_beta)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 173, in sample
batch = self._encode_sample(idxes)
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/execution/replay_buffer.py", line 64, in _encode_sample
out = SampleBatch.concat_samples([self._storage[i] for i in idxes])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in concat_samples
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 93, in <listcomp>
out[k] = concat_aligned([s[k] for s in samples])
File "/home/enes/ws/envs/rlws/lib/python3.7/site-packages/ray/rllib/policy/sample_batch.py", line 294, in __getitem__
return self.data[key]
KeyError: 'action_logp'
|
KeyError
|
def _get_node_specific_docker_config(self, node_id):
if "docker" not in self.config:
return {}
docker_config = copy.deepcopy(self.config.get("docker", {}))
node_specific_docker = self._get_node_type_specific_fields(
node_id, "docker")
docker_config.update(node_specific_docker)
return docker_config
|
def _get_node_specific_docker_config(self, node_id):
docker_config = copy.deepcopy(self.config.get("docker", {}))
node_specific_docker = self._get_node_type_specific_fields(
node_id, "docker")
docker_config.update(node_specific_docker)
return docker_config
|
[{'piece_type': 'other', 'piece_content': 'min_workers: 1\\nmax_workers: 1\\ninitial_workers: 1'}, {'piece_type': 'other', 'piece_content': '(myenv) tcw@pop-os:~/projects/ray$ ray up example-full.yaml\\nCommands running under a login shell can produce more output than special processing can handle.\\nThus, the output from subcommands will be logged as is.\\nConsider using --use-normal-shells, if you tested your workflow and it is compatible.\\n\\nCluster configuration valid\\nCluster: default\\n2020-09-09 20:16:16,175 INFO config.py:68 -- KubernetesNodeProvider: namespace \\'ray\\' not found, attempting to create it\\n2020-09-09 20:16:16,213 INFO config.py:72 -- KubernetesNodeProvider: successfully created namespace \\'ray\\'\\n2020-09-09 20:16:16,250 INFO config.py:97 -- KubernetesNodeProvider: autoscaler_service_account \\'autoscaler\\' not found, attempting to create it\\n2020-09-09 20:16:16,291 INFO config.py:99 -- KubernetesNodeProvider: successfully created autoscaler_service_account \\'autoscaler\\'\\n2020-09-09 20:16:16,401 INFO config.py:123 -- KubernetesNodeProvider: autoscaler_role \\'autoscaler\\' not found, attempting to create it\\n2020-09-09 20:16:16,436 INFO config.py:125 -- KubernetesNodeProvider: successfully created autoscaler_role \\'autoscaler\\'\\n2020-09-09 20:16:16,473 INFO config.py:156 -- KubernetesNodeProvider: autoscaler_role_binding \\'autoscaler\\' not found, attempting to create it\\n2020-09-09 20:16:16,518 INFO config.py:158 -- KubernetesNodeProvider: successfully created autoscaler_role_binding \\'autoscaler\\'\\n2020-09-09 20:16:16,554 INFO config.py:189 -- KubernetesNodeProvider: service \\'ray-head\\' not found, attempting to create it\\n2020-09-09 20:16:16,613 INFO config.py:191 -- KubernetesNodeProvider: successfully created service \\'ray-head\\'\\n2020-09-09 20:16:16,653 INFO config.py:189 -- KubernetesNodeProvider: service \\'ray-workers\\' not found, attempting to create it\\n2020-09-09 20:16:16,702 INFO config.py:191 -- KubernetesNodeProvider: successfully created service \\'ray-workers\\'\\nThis will create a new cluster [y/N]: y\\n2020-09-09 20:16:18,507 INFO commands.py:592 -- get_or_create_head_node: Launching new head node...\\n2020-09-09 20:16:18,507 INFO node_provider.py:87 -- KubernetesNodeProvider: calling create_namespaced_pod (count=1).\\n2020-09-09 20:16:18,606 INFO commands.py:631 -- get_or_create_head_node: Updating files on head node...\\n2020-09-09 20:16:18,609 INFO updater.py:97 -- NodeUpdater: ray-head-xn4b5: Updating to c1ad72dff590afc61824084810dc92021a0f3d39\\n2020-09-09 20:16:18,697 INFO updater.py:219 -- NodeUpdater: ray-head-xn4b5: Waiting for remote shell...\\n2020-09-09 20:16:18,729 INFO command_runner.py:223 -- NodeUpdater: ray-head-xn4b5: Running [\\'kubectl\\', \\'-n\\', \\'ray\\', \\'exec\\', \\'-it\\', \\'ray-head-xn4b5\\', \\'--\\', \\'bash\\', \\'--login\\', \\'-c\\', \\'-i\\', "\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (uptime)\\'"]\\nkubectl controls the Kubernetes cluster manager.\\n\\nFind more information at: https://kubernetes.io/docs/reference/kubectl/overview/\\n\\nBasic Commands (Beginner):\\ncreate Create a resource from a file or from stdin.\\nexpose Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\\nrun Run a particular image on the cluster\\nset Set specific features on objects\\n\\nBasic Commands (Intermediate):\\nexplain Documentation of resources\\nget Display one or many resources\\nedit Edit a resource on the server\\ndelete Delete resources by filenames, stdin, resources and names, or by resources and label selector\\n\\nDeploy Commands:\\nrollout Manage the rollout of a resource\\nscale Set a new size for a Deployment, ReplicaSet or Replication Controller\\nautoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController\\n\\nCluster Management Commands:\\ncertificate Modify certificate resources.\\ncluster-info Display cluster info\\ntop Display Resource (CPU/Memory/Storage) usage.\\ncordon Mark node as unschedulable\\nuncordon Mark node as schedulable\\ndrain Drain node in preparation for maintenance\\ntaint Update the taints on one or more nodes\\n\\nTroubleshooting and Debugging Commands:\\ndescribe Show details of a specific resource or group of resources\\nlogs Print the logs for a container in a pod\\nattach Attach to a running container\\nexec Execute a command in a container\\nport-forward Forward one or more local ports to a pod\\nproxy Run a proxy to the Kubernetes API server\\ncp Copy files and directories to and from containers.\\nauth Inspect authorization\\n\\nAdvanced Commands:\\ndiff Diff live version against would-be applied version\\napply Apply a configuration to a resource by filename or stdin\\npatch Update field(s) of a resource using strategic merge patch\\nreplace Replace a resource by filename or stdin\\nwait Experimental: Wait for a specific condition on one or many resources.\\nconvert Convert config files between different API versions\\nkustomize Build a kustomization target from a directory or a remote url.\\n\\nSettings Commands:\\nlabel Update the labels on a resource\\nannotate Update the annotations on a resource\\ncompletion Output shell completion code for the specified shell (bash or zsh)\\n\\nOther Commands:\\napi-resources Print the supported API resources on the server\\napi-versions Print the supported API versions on the server, in the form of "group/version"\\nconfig Modify kubeconfig files\\nplugin Provides utilities for interacting with plugins.\\nversion Print the client and server version information\\n\\nUsage:\\nkubectl [flags] [options]\\n\\nUse "kubectl <command> --help" for more information about a given command.\\nUse "kubectl options" for a list of global command-line options (applies to all commands).\\n2020-09-09 20:16:18,777 INFO log_timer.py:27 -- NodeUpdater: ray-head-xn4b5: Got remote shell [LogTimer=80ms]\\n2020-09-09 20:16:18,905 INFO command_runner.py:223 -- NodeUpdater: ray-head-xn4b5: Running [\\'kubectl\\', \\'-n\\', \\'ray\\', \\'exec\\', \\'-it\\', \\'ray-head-xn4b5\\', \\'--\\', \\'bash\\', \\'--login\\', \\'-c\\', \\'-i\\', "\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (mkdir -p ~)\\'"]\\nkubectl controls the Kubernetes cluster manager.\\n\\nFind more information at: https://kubernetes.io/docs/reference/kubectl/overview/\\n\\nBasic Commands (Beginner):\\ncreate Create a resource from a file or from stdin.\\nexpose Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\\nrun Run a particular image on the cluster\\nset Set specific features on objects\\n\\nBasic Commands (Intermediate):\\nexplain Documentation of resources\\nget Display one or many resources\\nedit Edit a resource on the server\\ndelete Delete resources by filenames, stdin, resources and names, or by resources and label selector\\n\\nDeploy Commands:\\nrollout Manage the rollout of a resource\\nscale Set a new size for a Deployment, ReplicaSet or Replication Controller\\nautoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController\\n\\nCluster Management Commands:\\ncertificate Modify certificate resources.\\ncluster-info Display cluster info\\ntop Display Resource (CPU/Memory/Storage) usage.\\ncordon Mark node as unschedulable\\nuncordon Mark node as schedulable\\ndrain Drain node in preparation for maintenance\\ntaint Update the taints on one or more nodes\\n\\nTroubleshooting and Debugging Commands:\\ndescribe Show details of a specific resource or group of resources\\nlogs Print the logs for a container in a pod\\nattach Attach to a running container\\nexec Execute a command in a container\\nport-forward Forward one or more local ports to a pod\\nproxy Run a proxy to the Kubernetes API server\\ncp Copy files and directories to and from containers.\\nauth Inspect authorization\\n\\nAdvanced Commands:\\ndiff Diff live version against would-be applied version\\napply Apply a configuration to a resource by filename or stdin\\npatch Update field(s) of a resource using strategic merge patch\\nreplace Replace a resource by filename or stdin\\nwait Experimental: Wait for a specific condition on one or many resources.\\nconvert Convert config files between different API versions\\nkustomize Build a kustomization target from a directory or a remote url.\\n\\nSettings Commands:\\nlabel Update the labels on a resource\\nannotate Update the annotations on a resource\\ncompletion Output shell completion code for the specified shell (bash or zsh)\\n\\nOther Commands:\\napi-resources Print the supported API resources on the server\\napi-versions Print the supported API versions on the server, in the form of "group/version"\\nconfig Modify kubeconfig files\\nplugin Provides utilities for interacting with plugins.\\nversion Print the client and server version information\\n\\nUsage:\\nkubectl [flags] [options]\\n\\nUse "kubectl <command> --help" for more information about a given command.\\nUse "kubectl options" for a list of global command-line options (applies to all commands).\\n2020-09-09 20:16:18,985 INFO updater.py:420 -- NodeUpdater: ray-head-xn4b5: Syncing /tmp/ray-bootstrap-ixliccej to ~/ray_bootstrap_config.yaml...\\nerror: unable to upgrade connection: container not found ("ray-node")\\nrsync: connection unexpectedly closed (0 bytes received so far) [sender]\\nrsync error: error in rsync protocol data stream (code 12) at io.c(235) [sender=3.1.3]\\n2020-09-09 20:16:19,670 WARNING command_runner.py:255 -- NodeUpdater: ray-head-xn4b5: rsync failed: \\'Command \\'[\\'/home/tcw/projects/mu0_minimal/myenv/lib/python3.7/site-packages/ray/autoscaler/kubernetes/kubectl-rsync.sh\\', \\'-avz\\', \\'/tmp/ray-bootstrap-ixliccej\\', \\'ray-head-xn4b5@ray:/root/ray_bootstrap_config.yaml\\']\\' returned non-zero exit status 12.\\'. Falling back to \\'kubectl cp\\'\\n2020-09-09 20:16:20,912 INFO log_timer.py:27 -- NodeUpdater: ray-head-xn4b5: Synced /tmp/ray-bootstrap-ixliccej to ~/ray_bootstrap_config.yaml [LogTimer=2006ms]\\n2020-09-09 20:16:21,012 INFO command_runner.py:223 -- NodeUpdater: ray-head-xn4b5: Running [\\'kubectl\\', \\'-n\\', \\'ray\\', \\'exec\\', \\'-it\\', \\'ray-head-xn4b5\\', \\'--\\', \\'bash\\', \\'--login\\', \\'-c\\', \\'-i\\', "\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (ray stop)\\'"]\\nkubectl controls the Kubernetes cluster manager.\\n\\nFind more information at: https://kubernetes.io/docs/reference/kubectl/overview/\\n\\nBasic Commands (Beginner):\\ncreate Create a resource from a file or from stdin.\\nexpose Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\\nrun Run a particular image on the cluster\\nset Set specific features on objects\\n\\nBasic Commands (Intermediate):\\nexplain Documentation of resources\\nget Display one or many resources\\nedit Edit a resource on the server\\ndelete Delete resources by filenames, stdin, resources and names, or by resources and label selector\\n\\nDeploy Commands:\\nrollout Manage the rollout of a resource\\nscale Set a new size for a Deployment, ReplicaSet or Replication Controller\\nautoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController\\n\\nCluster Management Commands:\\ncertificate Modify certificate resources.\\ncluster-info Display cluster info\\ntop Display Resource (CPU/Memory/Storage) usage.\\ncordon Mark node as unschedulable\\nuncordon Mark node as schedulable\\ndrain Drain node in preparation for maintenance\\ntaint Update the taints on one or more nodes\\n\\nTroubleshooting and Debugging Commands:\\ndescribe Show details of a specific resource or group of resources\\nlogs Print the logs for a container in a pod\\nattach Attach to a running container\\nexec Execute a command in a container\\nport-forward Forward one or more local ports to a pod\\nproxy Run a proxy to the Kubernetes API server\\ncp Copy files and directories to and from containers.\\nauth Inspect authorization\\n\\nAdvanced Commands:\\ndiff Diff live version against would-be applied version\\napply Apply a configuration to a resource by filename or stdin\\npatch Update field(s) of a resource using strategic merge patch\\nreplace Replace a resource by filename or stdin\\nwait Experimental: Wait for a specific condition on one or many resources.\\nconvert Convert config files between different API versions\\nkustomize Build a kustomization target from a directory or a remote url.\\n\\nSettings Commands:\\nlabel Update the labels on a resource\\nannotate Update the annotations on a resource\\ncompletion Output shell completion code for the specified shell (bash or zsh)\\n\\nOther Commands:\\napi-resources Print the supported API resources on the server\\napi-versions Print the supported API versions on the server, in the form of "group/version"\\nconfig Modify kubeconfig files\\nplugin Provides utilities for interacting with plugins.\\nversion Print the client and server version information\\n\\nUsage:\\nkubectl [flags] [options]\\n\\nUse "kubectl <command> --help" for more information about a given command.\\nUse "kubectl options" for a list of global command-line options (applies to all commands).\\n2020-09-09 20:16:21,057 INFO command_runner.py:223 -- NodeUpdater: ray-head-xn4b5: Running [\\'kubectl\\', \\'-n\\', \\'ray\\', \\'exec\\', \\'-it\\', \\'ray-head-xn4b5\\', \\'--\\', \\'bash\\', \\'--login\\', \\'-c\\', \\'-i\\', "\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && (ulimit -n 65536; ray start --head --num-cpus=$MY_CPU_REQUEST --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --webui-host 0.0.0.0)\\'"]\\nkubectl controls the Kubernetes cluster manager.\\n\\nFind more information at: https://kubernetes.io/docs/reference/kubectl/overview/\\n\\nBasic Commands (Beginner):\\ncreate Create a resource from a file or from stdin.\\nexpose Take a replication controller, service, deployment or pod and expose it as a new Kubernetes Service\\nrun Run a particular image on the cluster\\nset Set specific features on objects\\n\\nBasic Commands (Intermediate):\\nexplain Documentation of resources\\nget Display one or many resources\\nedit Edit a resource on the server\\ndelete Delete resources by filenames, stdin, resources and names, or by resources and label selector\\n\\nDeploy Commands:\\nrollout Manage the rollout of a resource\\nscale Set a new size for a Deployment, ReplicaSet or Replication Controller\\nautoscale Auto-scale a Deployment, ReplicaSet, or ReplicationController\\n\\nCluster Management Commands:\\ncertificate Modify certificate resources.\\ncluster-info Display cluster info\\ntop Display Resource (CPU/Memory/Storage) usage.\\ncordon Mark node as unschedulable\\nuncordon Mark node as schedulable\\ndrain Drain node in preparation for maintenance\\ntaint Update the taints on one or more nodes\\n\\nTroubleshooting and Debugging Commands:\\ndescribe Show details of a specific resource or group of resources\\nlogs Print the logs for a container in a pod\\nattach Attach to a running container\\nexec Execute a command in a container\\nport-forward Forward one or more local ports to a pod\\nproxy Run a proxy to the Kubernetes API server\\ncp Copy files and directories to and from containers.\\nauth Inspect authorization\\n\\nAdvanced Commands:\\ndiff Diff live version against would-be applied version\\napply Apply a configuration to a resource by filename or stdin\\npatch Update field(s) of a resource using strategic merge patch\\nreplace Replace a resource by filename or stdin\\nwait Experimental: Wait for a specific condition on one or many resources.\\nconvert Convert config files between different API versions\\nkustomize Build a kustomization target from a directory or a remote url.\\n\\nSettings Commands:\\nlabel Update the labels on a resource\\nannotate Update the annotations on a resource\\ncompletion Output shell completion code for the specified shell (bash or zsh)\\n\\nOther Commands:\\napi-resources Print the supported API resources on the server\\napi-versions Print the supported API versions on the server, in the form of "group/version"\\nconfig Modify kubeconfig files\\nplugin Provides utilities for interacting with plugins.\\nversion Print the client and server version information\\n\\nUsage:\\nkubectl [flags] [options]\\n\\nUse "kubectl <command> --help" for more information about a given command.\\nUse "kubectl options" for a list of global command-line options (applies to all commands).\\n2020-09-09 20:16:21,096 INFO log_timer.py:27 -- NodeUpdater: ray-head-xn4b5: Ray start commands succeeded [LogTimer=84ms]\\n2020-09-09 20:16:21,096 INFO log_timer.py:27 -- NodeUpdater: ray-head-xn4b5: Applied config c1ad72dff590afc61824084810dc92021a0f3d39 [LogTimer=2486ms]\\n2020-09-09 20:16:21,254 INFO commands.py:718 -- get_or_create_head_node: Head node up-to-date, IP address is: 192.168.6.142\\nTo monitor autoscaling activity, you can run:\\n\\nray exec /home/tcw/projects/mu0_minimal/ray/example-full.yaml \\'tail -n 100 -f /tmp/ray/session_*/logs/monitor*\\'\\n\\nTo open a console on the cluster:\\n\\nray attach /home/tcw/projects/mu0_minimal/ray/example-full.yaml\\n\\nTo get a remote shell to the cluster manually, run:\\n\\nkubectl -n ray exec -it ray-head-xn4b5 bash'}, {'piece_type': 'other', 'piece_content': '$ kubectl -n ray exec -it ray-head-xn4b5 bash\\n(base) root@ray-head-xn4b5:/# ray start --head --num-cpus=3 --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --webui-host 0.0.0.0'}, {'piece_type': 'other', 'piece_content': '(base) root@ray-head-xn4b5:/# tail -n 100 -f /tmp/ray/session_*/logs/monitor*'}, {'piece_type': 'error message', 'piece_content': '==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.err <==\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)\\n- MostDelayedHeartbeats: {\\'192.168.6.142\\': 0.13077020645141602}\\n- NodeIdleSeconds: Min=29 Mean=29 Max=29\\n- NumNodesConnected: 1\\n- NumNodesUsed: 0.0\\n- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory\\n- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0\\n\\n2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)\\n- MostDelayedHeartbeats: {\\'192.168.6.142\\': 0.14269304275512695}\\n- NodeIdleSeconds: Min=29 Mean=29 Max=29\\n- NumNodesConnected: 1\\n- NumNodesUsed: 0.0\\n- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory\\n- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0\\n\\n2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.\\nError in monitor loop\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run\\nself._run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run\\nself.autoscaler.update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update\\nraise e\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered\\n2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod\\n2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)\\nError in sys.excepthook:\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook\\nworker_id = global_worker.worker_id\\nAttributeError: \\'Worker\\' object has no attribute \\'worker_id\\'\\n\\nOriginal exception was:\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>\\nmonitor.run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run\\nself._run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run\\nself.autoscaler.update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update\\nraise e\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>\\nredis_client, ray_constants.MONITOR_DIED_ERROR, message)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis\\npubsub_msg.SerializeAsString())\\nAttributeError: SerializeAsString\\n\\n==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.out <==\\nDestroying cluster. Confirm [y/N]: y [automatic, due to --yes]\\n1 random worker nodes will not be shut down. (due to --keep-min-workers)\\nThe head node will not be shut down. (due to --workers-only)\\nNo nodes remaining.\\n\\n==> /tmp/ray/session_latest/logs/monitor.err <==\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)\\n- MostDelayedHeartbeats: {\\'192.168.6.142\\': 0.13077020645141602}\\n- NodeIdleSeconds: Min=29 Mean=29 Max=29\\n- NumNodesConnected: 1\\n- NumNodesUsed: 0.0\\n- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory\\n- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0\\n\\n2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)\\n- MostDelayedHeartbeats: {\\'192.168.6.142\\': 0.14269304275512695}\\n- NodeIdleSeconds: Min=29 Mean=29 Max=29\\n- NumNodesConnected: 1\\n- NumNodesUsed: 0.0\\n- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory\\n- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0\\n\\n2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.\\nError in monitor loop\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run\\nself._run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run\\nself.autoscaler.update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update\\nraise e\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered\\n2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod\\n2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)\\nError in sys.excepthook:\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook\\nworker_id = global_worker.worker_id\\nAttributeError: \\'Worker\\' object has no attribute \\'worker_id\\'\\n\\nOriginal exception was:\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>\\nmonitor.run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run\\nself._run()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run\\nself.autoscaler.update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update\\nraise e\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update\\nself._update()\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>\\nself.should_update(node_id) for node_id in nodes):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update\\ndocker_config = self._get_node_specific_docker_config(node_id)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config\\nnode_id, "docker")\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields\\nfields = self.config[fields_key]\\nKeyError: \\'docker\\'\\n\\nDuring handling of the above exception, another exception occurred:\\n\\nTraceback (most recent call last):\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>\\nredis_client, ray_constants.MONITOR_DIED_ERROR, message)\\nFile "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis\\npubsub_msg.SerializeAsString())\\nAttributeError: SerializeAsString\\n\\n==> /tmp/ray/session_latest/logs/monitor.out <==\\nDestroying cluster. Confirm [y/N]: y [automatic, due to --yes]\\n1 random worker nodes will not be shut down. (due to --keep-min-workers)\\nThe head node will not be shut down. (due to --workers-only)\\nNo nodes remaining.'}]
|
==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.err <==
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.13077020645141602}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.14269304275512695}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.
Error in monitor loop
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered
2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod
2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)
Error in sys.excepthook:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook
worker_id = global_worker.worker_id
AttributeError: 'Worker' object has no attribute 'worker_id'
Original exception was:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>
monitor.run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>
redis_client, ray_constants.MONITOR_DIED_ERROR, message)
File "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis
pubsub_msg.SerializeAsString())
AttributeError: SerializeAsString
==> /tmp/ray/session_2020-09-09_17-20-39_779593_74/logs/monitor.out <==
Destroying cluster. Confirm [y/N]: y [automatic, due to --yes]
1 random worker nodes will not be shut down. (due to --keep-min-workers)
The head node will not be shut down. (due to --workers-only)
No nodes remaining.
==> /tmp/ray/session_latest/logs/monitor.err <==
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,100 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.13077020645141602}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,112 INFO autoscaler.py:520 -- Cluster status: 1/1 target nodes (0 pending)
- MostDelayedHeartbeats: {'192.168.6.142': 0.14269304275512695}
- NodeIdleSeconds: Min=29 Mean=29 Max=29
- NumNodesConnected: 1
- NumNodesUsed: 0.0
- ResourceUsage: 0.0/3.0 CPU, 0.0 GiB/1.21 GiB memory, 0.0 GiB/0.42 GiB object_store_memory
- TimeSinceLastHeartbeat: Min=0 Mean=0 Max=0
2020-09-09 17:21:11,128 ERROR autoscaler.py:123 -- StandardAutoscaler: Error during autoscaling.
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 CRITICAL autoscaler.py:130 -- StandardAutoscaler: Too many errors, abort.
Error in monitor loop
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
2020-09-09 17:21:11,129 ERROR autoscaler.py:554 -- StandardAutoscaler: kill_workers triggered
2020-09-09 17:21:11,134 INFO node_provider.py:121 -- KubernetesNodeProvider: calling delete_namespaced_pod
2020-09-09 17:21:11,148 ERROR autoscaler.py:559 -- StandardAutoscaler: terminated 1 node(s)
Error in sys.excepthook:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/worker.py", line 834, in custom_excepthook
worker_id = global_worker.worker_id
AttributeError: 'Worker' object has no attribute 'worker_id'
Original exception was:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 368, in <module>
monitor.run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 313, in run
self._run()
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 268, in _run
self.autoscaler.update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 132, in update
raise e
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 121, in update
self._update()
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in _update
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 250, in <genexpr>
self.should_update(node_id) for node_id in nodes):
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 456, in should_update
docker_config = self._get_node_specific_docker_config(node_id)
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 431, in _get_node_specific_docker_config
node_id, "docker")
File "/root/anaconda3/lib/python3.7/site-packages/ray/autoscaler/autoscaler.py", line 417, in _get_node_type_specific_fields
fields = self.config[fields_key]
KeyError: 'docker'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/root/anaconda3/lib/python3.7/site-packages/ray/monitor.py", line 380, in <module>
redis_client, ray_constants.MONITOR_DIED_ERROR, message)
File "/root/anaconda3/lib/python3.7/site-packages/ray/utils.py", line 128, in push_error_to_driver_through_redis
pubsub_msg.SerializeAsString())
AttributeError: SerializeAsString
==> /tmp/ray/session_latest/logs/monitor.out <==
Destroying cluster. Confirm [y/N]: y [automatic, due to --yes]
1 random worker nodes will not be shut down. (due to --keep-min-workers)
The head node will not be shut down. (due to --workers-only)
No nodes remaining.
|
KeyError
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8")
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
[{'piece_type': 'error message', 'piece_content': 'Process _WandbLoggingProcess-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap\\nself.run()\\nFile "[...]/ray/tune/integration/wandb.py", line 127, in run\\nwandb.init(*self.args, **self.kwargs)\\nFile "[...]/wandb/__init__.py", line 1303, in init\\nas_defaults=not allow_val_change)\\nFile "[...]/wandb/wandb_config.py", line 333, in _update\\nself.persist()\\nFile "[...]/wandb/wandb_config.py", line 238, in persist\\nconf_file.write(str(self))\\nFile "[...]/wandb/wandb_config.py", line 374, in __str__\\nallow_unicode=True, encoding=\\'utf-8\\')\\nFile "[...]/yaml/__init__.py", line 290, in dump\\nreturn dump_all([data], stream, Dumper=Dumper, **kwds)\\nFile "[...]/yaml/__init__.py", line 278, in dump_all\\ndumper.represent(data)\\nFile "[...]/yaml/representer.py", line 27, in represent\\nnode = self.represent_data(data)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 58, in represent_data\\nnode = self.yaml_representers[None](self, data)\\nFile "[...]/yaml/representer.py", line 231, in represent_undefined\\nraise RepresenterError("cannot represent an object", data)\\nyaml.representer.RepresenterError: (\\'cannot represent an object\\', <class \\'__main__.MyCallbacks\\'>)'}, {'piece_type': 'other', 'piece_content': 'from ray import tune\\nfrom ray.rllib.agents.ppo import PPOTrainer\\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\\nfrom ray.tune.integration.wandb import WandbLogger\\n\\nclass MyCallbacks(DefaultCallbacks):\\ndef on_episode_end(self, worker, base_env, policies, episode, **kwargs):\\nprint("Episode ended")\\n\\ntune.run(\\nPPOTrainer,\\ncheckpoint_freq=1,\\nconfig={\\n"framework": "torch",\\n"num_workers": 8,\\n"num_gpus": 1,\\n"env": "CartPole-v0",\\n"callbacks": MyCallbacks,\\n"logger_config": {\\n"wandb": {\\n"project": "test",\\n"api_key_file": "./wandb_api_key_file",\\n}\\n}\\n},\\nstop={\\n"training_iteration":10\\n},\\nloggers=[WandbLogger]\\n)'}]
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _init(self):
config = self.config.copy()
config.pop("callbacks", None) # Remove callbacks
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification.")
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError(
"You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs)
self._wandb.start()
|
def _init(self):
config = self.config.copy()
try:
if config.get("logger_config", {}).get("wandb"):
logger_config = config.pop("logger_config")
wandb_config = logger_config.get("wandb").copy()
else:
wandb_config = config.pop("wandb").copy()
except KeyError:
raise ValueError(
"Wandb logger specified but no configuration has been passed. "
"Make sure to include a `wandb` key in your `config` dict "
"containing at least a `project` specification.")
_set_api_key(wandb_config)
exclude_results = self._exclude_results.copy()
# Additional excludes
additional_excludes = wandb_config.pop("excludes", [])
exclude_results += additional_excludes
# Log config keys on each result?
log_config = wandb_config.pop("log_config", False)
if not log_config:
exclude_results += ["config"]
# Fill trial ID and name
trial_id = self.trial.trial_id
trial_name = str(self.trial)
# Project name for Wandb
try:
wandb_project = wandb_config.pop("project")
except KeyError:
raise ValueError(
"You need to specify a `project` in your wandb `config` dict.")
# Grouping
wandb_group = wandb_config.pop("group", self.trial.trainable_name)
wandb_init_kwargs = dict(
id=trial_id,
name=trial_name,
resume=True,
reinit=True,
allow_val_change=True,
group=wandb_group,
project=wandb_project,
config=config)
wandb_init_kwargs.update(wandb_config)
self._queue = Queue()
self._wandb = self._logger_process_cls(
queue=self._queue,
exclude=exclude_results,
to_config=self._config_results,
**wandb_init_kwargs)
self._wandb.start()
|
[{'piece_type': 'error message', 'piece_content': 'Process _WandbLoggingProcess-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap\\nself.run()\\nFile "[...]/ray/tune/integration/wandb.py", line 127, in run\\nwandb.init(*self.args, **self.kwargs)\\nFile "[...]/wandb/__init__.py", line 1303, in init\\nas_defaults=not allow_val_change)\\nFile "[...]/wandb/wandb_config.py", line 333, in _update\\nself.persist()\\nFile "[...]/wandb/wandb_config.py", line 238, in persist\\nconf_file.write(str(self))\\nFile "[...]/wandb/wandb_config.py", line 374, in __str__\\nallow_unicode=True, encoding=\\'utf-8\\')\\nFile "[...]/yaml/__init__.py", line 290, in dump\\nreturn dump_all([data], stream, Dumper=Dumper, **kwds)\\nFile "[...]/yaml/__init__.py", line 278, in dump_all\\ndumper.represent(data)\\nFile "[...]/yaml/representer.py", line 27, in represent\\nnode = self.represent_data(data)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 58, in represent_data\\nnode = self.yaml_representers[None](self, data)\\nFile "[...]/yaml/representer.py", line 231, in represent_undefined\\nraise RepresenterError("cannot represent an object", data)\\nyaml.representer.RepresenterError: (\\'cannot represent an object\\', <class \\'__main__.MyCallbacks\\'>)'}, {'piece_type': 'other', 'piece_content': 'from ray import tune\\nfrom ray.rllib.agents.ppo import PPOTrainer\\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\\nfrom ray.tune.integration.wandb import WandbLogger\\n\\nclass MyCallbacks(DefaultCallbacks):\\ndef on_episode_end(self, worker, base_env, policies, episode, **kwargs):\\nprint("Episode ended")\\n\\ntune.run(\\nPPOTrainer,\\ncheckpoint_freq=1,\\nconfig={\\n"framework": "torch",\\n"num_workers": 8,\\n"num_gpus": 1,\\n"env": "CartPole-v0",\\n"callbacks": MyCallbacks,\\n"logger_config": {\\n"wandb": {\\n"project": "test",\\n"api_key_file": "./wandb_api_key_file",\\n}\\n}\\n},\\nstop={\\n"training_iteration":10\\n},\\nloggers=[WandbLogger]\\n)'}]
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
elif _is_allowed_type(obj):
return obj
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8")
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
fallback = str(obj)
# Try to convert to int
try:
fallback = int(fallback)
return fallback
except ValueError:
pass
# Try to convert to float
try:
fallback = float(fallback)
return fallback
except ValueError:
pass
# Else, return string
return fallback
|
def _clean_log(obj):
# Fixes https://github.com/ray-project/ray/issues/10631
if isinstance(obj, dict):
return {k: _clean_log(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_clean_log(v) for v in obj]
# Else
try:
pickle.dumps(obj)
yaml.dump(
obj,
Dumper=yaml.SafeDumper,
default_flow_style=False,
allow_unicode=True,
encoding="utf-8")
return obj
except Exception:
# give up, similar to _SafeFallBackEncoder
return str(obj)
|
[{'piece_type': 'error message', 'piece_content': 'Process _WandbLoggingProcess-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap\\nself.run()\\nFile "[...]/ray/tune/integration/wandb.py", line 127, in run\\nwandb.init(*self.args, **self.kwargs)\\nFile "[...]/wandb/__init__.py", line 1303, in init\\nas_defaults=not allow_val_change)\\nFile "[...]/wandb/wandb_config.py", line 333, in _update\\nself.persist()\\nFile "[...]/wandb/wandb_config.py", line 238, in persist\\nconf_file.write(str(self))\\nFile "[...]/wandb/wandb_config.py", line 374, in __str__\\nallow_unicode=True, encoding=\\'utf-8\\')\\nFile "[...]/yaml/__init__.py", line 290, in dump\\nreturn dump_all([data], stream, Dumper=Dumper, **kwds)\\nFile "[...]/yaml/__init__.py", line 278, in dump_all\\ndumper.represent(data)\\nFile "[...]/yaml/representer.py", line 27, in represent\\nnode = self.represent_data(data)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 58, in represent_data\\nnode = self.yaml_representers[None](self, data)\\nFile "[...]/yaml/representer.py", line 231, in represent_undefined\\nraise RepresenterError("cannot represent an object", data)\\nyaml.representer.RepresenterError: (\\'cannot represent an object\\', <class \\'__main__.MyCallbacks\\'>)'}, {'piece_type': 'other', 'piece_content': 'from ray import tune\\nfrom ray.rllib.agents.ppo import PPOTrainer\\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\\nfrom ray.tune.integration.wandb import WandbLogger\\n\\nclass MyCallbacks(DefaultCallbacks):\\ndef on_episode_end(self, worker, base_env, policies, episode, **kwargs):\\nprint("Episode ended")\\n\\ntune.run(\\nPPOTrainer,\\ncheckpoint_freq=1,\\nconfig={\\n"framework": "torch",\\n"num_workers": 8,\\n"num_gpus": 1,\\n"env": "CartPole-v0",\\n"callbacks": MyCallbacks,\\n"logger_config": {\\n"wandb": {\\n"project": "test",\\n"api_key_file": "./wandb_api_key_file",\\n}\\n}\\n},\\nstop={\\n"training_iteration":10\\n},\\nloggers=[WandbLogger]\\n)'}]
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def _handle_result(self, result):
config_update = result.get("config", {}).copy()
log = {}
flat_result = flatten_dict(result, delimiter="/")
for k, v in flat_result.items():
if any(
k.startswith(item + "/") or k == item
for item in self._to_config):
config_update[k] = v
elif any(
k.startswith(item + "/") or k == item
for item in self._exclude):
continue
elif not _is_allowed_type(v):
continue
else:
log[k] = v
config_update.pop("callbacks", None) # Remove callbacks
return log, config_update
|
def _handle_result(self, result):
config_update = result.get("config", {}).copy()
log = {}
flat_result = flatten_dict(result, delimiter="/")
for k, v in flat_result.items():
if any(
k.startswith(item + "/") or k == item
for item in self._to_config):
config_update[k] = v
elif any(
k.startswith(item + "/") or k == item
for item in self._exclude):
continue
elif not isinstance(v, Number):
continue
else:
log[k] = v
config_update.pop("callbacks", None) # Remove callbacks
return log, config_update
|
[{'piece_type': 'error message', 'piece_content': 'Process _WandbLoggingProcess-1:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap\\nself.run()\\nFile "[...]/ray/tune/integration/wandb.py", line 127, in run\\nwandb.init(*self.args, **self.kwargs)\\nFile "[...]/wandb/__init__.py", line 1303, in init\\nas_defaults=not allow_val_change)\\nFile "[...]/wandb/wandb_config.py", line 333, in _update\\nself.persist()\\nFile "[...]/wandb/wandb_config.py", line 238, in persist\\nconf_file.write(str(self))\\nFile "[...]/wandb/wandb_config.py", line 374, in __str__\\nallow_unicode=True, encoding=\\'utf-8\\')\\nFile "[...]/yaml/__init__.py", line 290, in dump\\nreturn dump_all([data], stream, Dumper=Dumper, **kwds)\\nFile "[...]/yaml/__init__.py", line 278, in dump_all\\ndumper.represent(data)\\nFile "[...]/yaml/representer.py", line 27, in represent\\nnode = self.represent_data(data)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 48, in represent_data\\nnode = self.yaml_representers[data_types[0]](self, data)\\nFile "[...]/yaml/representer.py", line 207, in represent_dict\\nreturn self.represent_mapping(\\'tag:yaml.org,2002:map\\', data)\\nFile "[...]/yaml/representer.py", line 118, in represent_mapping\\nnode_value = self.represent_data(item_value)\\nFile "[...]/yaml/representer.py", line 58, in represent_data\\nnode = self.yaml_representers[None](self, data)\\nFile "[...]/yaml/representer.py", line 231, in represent_undefined\\nraise RepresenterError("cannot represent an object", data)\\nyaml.representer.RepresenterError: (\\'cannot represent an object\\', <class \\'__main__.MyCallbacks\\'>)'}, {'piece_type': 'other', 'piece_content': 'from ray import tune\\nfrom ray.rllib.agents.ppo import PPOTrainer\\nfrom ray.rllib.agents.callbacks import DefaultCallbacks\\nfrom ray.tune.integration.wandb import WandbLogger\\n\\nclass MyCallbacks(DefaultCallbacks):\\ndef on_episode_end(self, worker, base_env, policies, episode, **kwargs):\\nprint("Episode ended")\\n\\ntune.run(\\nPPOTrainer,\\ncheckpoint_freq=1,\\nconfig={\\n"framework": "torch",\\n"num_workers": 8,\\n"num_gpus": 1,\\n"env": "CartPole-v0",\\n"callbacks": MyCallbacks,\\n"logger_config": {\\n"wandb": {\\n"project": "test",\\n"api_key_file": "./wandb_api_key_file",\\n}\\n}\\n},\\nstop={\\n"training_iteration":10\\n},\\nloggers=[WandbLogger]\\n)'}]
|
Process _WandbLoggingProcess-1:
Traceback (most recent call last):
File "/usr/lib/python3.7/multiprocessing/process.py", line 297, in _bootstrap
self.run()
File "[...]/ray/tune/integration/wandb.py", line 127, in run
wandb.init(*self.args, **self.kwargs)
File "[...]/wandb/__init__.py", line 1303, in init
as_defaults=not allow_val_change)
File "[...]/wandb/wandb_config.py", line 333, in _update
self.persist()
File "[...]/wandb/wandb_config.py", line 238, in persist
conf_file.write(str(self))
File "[...]/wandb/wandb_config.py", line 374, in __str__
allow_unicode=True, encoding='utf-8')
File "[...]/yaml/__init__.py", line 290, in dump
return dump_all([data], stream, Dumper=Dumper, **kwds)
File "[...]/yaml/__init__.py", line 278, in dump_all
dumper.represent(data)
File "[...]/yaml/representer.py", line 27, in represent
node = self.represent_data(data)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 48, in represent_data
node = self.yaml_representers[data_types[0]](self, data)
File "[...]/yaml/representer.py", line 207, in represent_dict
return self.represent_mapping('tag:yaml.org,2002:map', data)
File "[...]/yaml/representer.py", line 118, in represent_mapping
node_value = self.represent_data(item_value)
File "[...]/yaml/representer.py", line 58, in represent_data
node = self.yaml_representers[None](self, data)
File "[...]/yaml/representer.py", line 231, in represent_undefined
raise RepresenterError("cannot represent an object", data)
yaml.representer.RepresenterError: ('cannot represent an object', <class '__main__.MyCallbacks'>)
|
yaml.representer.RepresenterError
|
def start(node_ip_address, redis_address, address, redis_port, port,
num_redis_shards, redis_max_clients, redis_password,
redis_shard_ports, object_manager_port, node_manager_port,
gcs_server_port, min_worker_port, max_worker_port, memory,
object_store_memory, redis_max_memory, num_cpus, num_gpus, resources,
head, include_webui, webui_host, include_dashboard, dashboard_host,
dashboard_port, block, plasma_directory, huge_pages,
autoscaling_config, no_redirect_worker_output, no_redirect_output,
plasma_store_socket_name, raylet_socket_name, temp_dir,
java_worker_options, code_search_path, load_code_from_local,
system_config, lru_evict, enable_object_reconstruction,
metrics_export_port, log_style, log_color, verbose):
"""Start Ray processes manually on the local machine."""
cli_logger.log_style = log_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.detect_colors()
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head.")
if redis_address is not None:
cli_logger.abort("{} is deprecated. Use {} instead.",
cf.bold("--redis-address"), cf.bold("--address"))
raise DeprecationWarning("The --redis-address argument is "
"deprecated. Please use --address instead.")
if redis_port is not None:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--redis-port"), cf.bold("--port"))
cli_logger.old_warning(
logger, "The --redis-port argument will be deprecated soon. "
"Please use --port instead.")
if port is not None and port != redis_port:
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--port"), cf.bold("--redis-port"), cf.bold("--port"))
raise ValueError("Cannot specify both --port and --redis-port "
"as port is a rename of deprecated redis-port")
if include_webui is not None:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--include-webui"),
cf.bold("--include-dashboard"))
cli_logger.old_warning(
logger, "The --include-webui argument will be deprecated soon"
"Please use --include-dashboard instead.")
if include_dashboard is not None:
include_dashboard = include_webui
dashboard_host_default = "localhost"
if webui_host != dashboard_host_default:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--webui-host"),
cf.bold("--dashboard-host"))
cli_logger.old_warning(
logger, "The --webui-host argument will be deprecated"
" soon. Please use --dashboard-host instead.")
if webui_host != dashboard_host and dashboard_host != "localhost":
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--dashboard-host"), cf.bold("--webui-host"),
cf.bold("--dashboard-host"))
raise ValueError(
"Cannot specify both --webui-host and --dashboard-host,"
" please specify only the latter")
else:
dashboard_host = webui_host
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if address is not None:
(redis_address, redis_address_ip,
redis_address_port) = services.validate_redis_address(address)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.",
cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold("--resources='\\"CustomResource3\\": 1, "
"\\"CustomResource2\\": 2}'"))
raise Exception("Unable to parse the --resources argument using "
"json.loads. Try using a format like\\n\\n"
" --resources='{\\"CustomResource1\\": 3, "
"\\"CustomReseource2\\": 2}'")
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
java_worker_options=java_worker_options,
load_code_from_local=load_code_from_local,
code_search_path=code_search_path,
_system_config=system_config,
lru_evict=lru_evict,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port)
if head:
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
if num_redis_shards is None:
num_redis_shards = len(redis_shard_ports)
# Check that the arguments match.
if len(redis_shard_ports) != num_redis_shards:
cli_logger.error(
"`{}` must be a comma-separated list of ports, "
"with length equal to `{}` (which defaults to {})",
cf.bold("--redis-shard-ports"),
cf.bold("--num-redis-shards"), cf.bold("1"))
cli_logger.abort(
"Example: `{}`",
cf.bold("--num-redis-shards 3 "
"--redis_shard_ports 6380,6381,6382"))
raise Exception("If --redis-shard-ports is provided, it must "
"have the form '6380,6381,6382', and the "
"number of ports provided must equal "
"--num-redis-shards (which is 1 if not "
"provided)")
if redis_address is not None:
cli_logger.abort(
"`{}` starts a new Redis server, `{}` should not be set.",
cf.bold("--head"), cf.bold("--address"))
raise Exception("If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided.")
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address())
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(logger, "Using IP address {} for this node.",
ray_params.node_ip_address)
ray_params.update_if_absent(
redis_port=port or redis_port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
autoscaling_config=autoscaling_config,
)
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block)
redis_address = node.redis_address
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print(
"To connect to this Ray runtime from another node, run")
cli_logger.print(
cf.bold(" ray start --address='{}'{}"), redis_address,
f" --redis-password='{redis_password}'"
if redis_password else "")
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})", c.magenta("."),
c.magenta("="), c.yellow("'auto'"),
", redis_password{}{}".format(
c.magenta("="),
c.yellow("'" + redis_password + "'"))
if redis_password else "")
cli_logger.newline()
cli_logger.print(
cf.underlined("If connection fails, check your "
"firewall settings other "
"network configuration."))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\\nStarted Ray on this node. You can add additional nodes to "
"the cluster by calling\\n\\n"
" ray start --address='{}'{}\\n\\n"
"from the node you wish to add. You can connect a driver to the "
"cluster from Python by running\\n\\n"
" import ray\\n"
" ray.init(address='auto'{})\\n\\n"
"If you have trouble connecting from a different machine, check "
"that your firewall is configured properly. If you wish to "
"terminate the processes that have been started, run\\n\\n"
" ray stop".format(
redis_address, " --redis-password='" + redis_password + "'"
if redis_password else "",
", _redis_password='" + redis_password + "'"
if redis_password else ""))
else:
# Start Ray on a non-head node.
if not (redis_port is None and port is None):
cli_logger.abort("`{}/{}` should not be specified without `{}`.",
cf.bold("--port"), cf.bold("--redis-port"),
cf.bold("--head"))
raise Exception(
"If --head is not passed in, --port and --redis-port are not "
"allowed.")
if redis_shard_ports is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-shard-ports "
"is not allowed.")
if redis_address is None:
cli_logger.abort("`{}` is required unless starting with `{}`.",
cf.bold("--address"), cf.bold("--head"))
raise Exception("If --head is not passed in, --address must "
"be provided.")
if num_redis_shards is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--num-redis-shards"), cf.bold("--head"))
raise Exception("If --head is not passed in, --num-redis-shards "
"must not be provided.")
if redis_max_clients is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-max-clients"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-max-clients "
"must not be provided.")
if include_webui:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-web-ui"), cf.bold("--head"))
raise Exception("If --head is not passed in, the --include-webui"
"flag is not relevant.")
if include_dashboard:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"), cf.bold("--head"))
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant.")
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address))
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(logger, "Using IP address {} for this node.",
ray_params.node_ip_address)
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address,
redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger, "\\nStarted Ray on this node. If you wish to terminate the "
"processes that have been started, run\\n\\n"
" ray stop")
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal.")
cli_logger.print(
"Runing subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly.")
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
cli_logger.old_error(logger,
"Ray processes died unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)})
cli_logger.old_error(
logger, "\\t{} died with exit code {}".format(
process_type, process.returncode))
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
cli_logger.old_error(
logger, "Killing remaining processes and exiting...")
sys.exit(1)
|
def start(node_ip_address, redis_address, address, redis_port, port,
num_redis_shards, redis_max_clients, redis_password,
redis_shard_ports, object_manager_port, node_manager_port,
gcs_server_port, min_worker_port, max_worker_port, memory,
object_store_memory, redis_max_memory, num_cpus, num_gpus, resources,
head, include_webui, webui_host, include_dashboard, dashboard_host,
dashboard_port, block, plasma_directory, huge_pages,
autoscaling_config, no_redirect_worker_output, no_redirect_output,
plasma_store_socket_name, raylet_socket_name, temp_dir,
java_worker_options, code_search_path, load_code_from_local,
system_config, lru_evict, enable_object_reconstruction,
metrics_export_port, log_style, log_color, verbose):
"""Start Ray processes manually on the local machine."""
cli_logger.log_style = log_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
cli_logger.detect_colors()
if gcs_server_port and not head:
raise ValueError(
"gcs_server_port can be only assigned when you specify --head.")
if redis_address is not None:
cli_logger.abort("{} is deprecated. Use {} instead.",
cf.bold("--redis-address"), cf.bold("--address"))
raise DeprecationWarning("The --redis-address argument is "
"deprecated. Please use --address instead.")
if redis_port is not None:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--redis-port"), cf.bold("--port"))
cli_logger.old_warning(
logger, "The --redis-port argument will be deprecated soon. "
"Please use --port instead.")
if port is not None and port != redis_port:
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--port"), cf.bold("--redis-port"), cf.bold("--port"))
raise ValueError("Cannot specify both --port and --redis-port "
"as port is a rename of deprecated redis-port")
if include_webui is not None:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--include-webui"),
cf.bold("--include-dashboard"))
cli_logger.old_warning(
logger, "The --include-webui argument will be deprecated soon"
"Please use --include-dashboard instead.")
if include_dashboard is not None:
include_dashboard = include_webui
dashboard_host_default = "localhost"
if webui_host != dashboard_host_default:
cli_logger.warning("{} is being deprecated. Use {} instead.",
cf.bold("--webui-host"),
cf.bold("--dashboard-host"))
cli_logger.old_warning(
logger, "The --webui-host argument will be deprecated"
" soon. Please use --dashboard-host instead.")
if webui_host != dashboard_host and dashboard_host != "localhost":
cli_logger.abort(
"Incompatible values for {} and {}. Use only {} instead.",
cf.bold("--dashboard-host"), cf.bold("--webui-host"),
cf.bold("--dashboard-host"))
raise ValueError(
"Cannot specify both --webui-host and --dashboard-host,"
" please specify only the latter")
else:
dashboard_host = webui_host
# Convert hostnames to numerical IP address.
if node_ip_address is not None:
node_ip_address = services.address_to_ip(node_ip_address)
if address is not None:
(redis_address, redis_address_ip,
redis_address_port) = services.validate_redis_address(address)
try:
resources = json.loads(resources)
except Exception:
cli_logger.error("`{}` is not a valid JSON string.",
cf.bold("--resources"))
cli_logger.abort(
"Valid values look like this: `{}`",
cf.bold("--resources='\\"CustomResource3\\": 1, "
"\\"CustomResource2\\": 2}'"))
raise Exception("Unable to parse the --resources argument using "
"json.loads. Try using a format like\\n\\n"
" --resources='{\\"CustomResource1\\": 3, "
"\\"CustomReseource2\\": 2}'")
redirect_worker_output = None if not no_redirect_worker_output else True
redirect_output = None if not no_redirect_output else True
ray_params = ray.parameter.RayParams(
node_ip_address=node_ip_address,
min_worker_port=min_worker_port,
max_worker_port=max_worker_port,
object_manager_port=object_manager_port,
node_manager_port=node_manager_port,
gcs_server_port=gcs_server_port,
memory=memory,
object_store_memory=object_store_memory,
redis_password=redis_password,
redirect_worker_output=redirect_worker_output,
redirect_output=redirect_output,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
plasma_directory=plasma_directory,
huge_pages=huge_pages,
plasma_store_socket_name=plasma_store_socket_name,
raylet_socket_name=raylet_socket_name,
temp_dir=temp_dir,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
java_worker_options=java_worker_options,
load_code_from_local=load_code_from_local,
code_search_path=code_search_path,
_system_config=system_config,
lru_evict=lru_evict,
enable_object_reconstruction=enable_object_reconstruction,
metrics_export_port=metrics_export_port)
if head:
# Start Ray on the head node.
if redis_shard_ports is not None:
redis_shard_ports = redis_shard_ports.split(",")
# Infer the number of Redis shards from the ports if the number is
# not provided.
if num_redis_shards is None:
num_redis_shards = len(redis_shard_ports)
# Check that the arguments match.
if len(redis_shard_ports) != num_redis_shards:
cli_logger.error(
"`{}` must be a comma-separated list of ports, "
"with length equal to `{}` (which defaults to {})",
cf.bold("--redis-shard-ports"),
cf.bold("--num-redis-shards"), cf.bold("1"))
cli_logger.abort(
"Example: `{}`",
cf.bold("--num-redis-shards 3 "
"--redis_shard_ports 6380,6381,6382"))
raise Exception("If --redis-shard-ports is provided, it must "
"have the form '6380,6381,6382', and the "
"number of ports provided must equal "
"--num-redis-shards (which is 1 if not "
"provided)")
if redis_address is not None:
cli_logger.abort(
"`{}` starts a new Redis server, `{}` should not be set.",
cf.bold("--head"), cf.bold("--address"))
raise Exception("If --head is passed in, a Redis server will be "
"started, so a Redis address should not be "
"provided.")
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address())
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(logger, "Using IP address {} for this node.",
ray_params.node_ip_address)
ray_params.update_if_absent(
redis_port=port or redis_port,
redis_shard_ports=redis_shard_ports,
redis_max_memory=redis_max_memory,
num_redis_shards=num_redis_shards,
redis_max_clients=redis_max_clients,
autoscaling_config=autoscaling_config,
)
node = ray.node.Node(
ray_params, head=True, shutdown_at_exit=block, spawn_reaper=block)
redis_address = node.redis_address
# this is a noop if new-style is not set, so the old logger calls
# are still in place
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
with cli_logger.group("Next steps"):
cli_logger.print(
"To connect to this Ray runtime from another node, run")
cli_logger.print(
cf.bold(" ray start --address='{}'{}"), redis_address,
f" --redis-password='{redis_password}'"
if redis_password else "")
cli_logger.newline()
cli_logger.print("Alternatively, use the following Python code:")
with cli_logger.indented():
with cf.with_style("monokai") as c:
cli_logger.print("{} ray", c.magenta("import"))
cli_logger.print(
"ray{}init(address{}{}{})", c.magenta("."),
c.magenta("="), c.yellow("'auto'"),
", redis_password{}{}".format(
c.magenta("="),
c.yellow("'" + redis_password + "'"))
if redis_password else "")
cli_logger.newline()
cli_logger.print(
cf.underlined("If connection fails, check your "
"firewall settings other "
"network configuration."))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger,
"\\nStarted Ray on this node. You can add additional nodes to "
"the cluster by calling\\n\\n"
" ray start --address='{}'{}\\n\\n"
"from the node you wish to add. You can connect a driver to the "
"cluster from Python by running\\n\\n"
" import ray\\n"
" ray.init(address='auto'{})\\n\\n"
"If you have trouble connecting from a different machine, check "
"that your firewall is configured properly. If you wish to "
"terminate the processes that have been started, run\\n\\n"
" ray stop".format(
redis_address, " --redis-password='" + redis_password + "'"
if redis_password else "",
", redis_password='" + redis_password + "'"
if redis_password else ""))
else:
# Start Ray on a non-head node.
if not (redis_port is None and port is None):
cli_logger.abort("`{}/{}` should not be specified without `{}`.",
cf.bold("--port"), cf.bold("--redis-port"),
cf.bold("--head"))
raise Exception(
"If --head is not passed in, --port and --redis-port are not "
"allowed.")
if redis_shard_ports is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-shard-ports "
"is not allowed.")
if redis_address is None:
cli_logger.abort("`{}` is required unless starting with `{}`.",
cf.bold("--address"), cf.bold("--head"))
raise Exception("If --head is not passed in, --address must "
"be provided.")
if num_redis_shards is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--num-redis-shards"), cf.bold("--head"))
raise Exception("If --head is not passed in, --num-redis-shards "
"must not be provided.")
if redis_max_clients is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-max-clients"), cf.bold("--head"))
raise Exception("If --head is not passed in, --redis-max-clients "
"must not be provided.")
if include_webui:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-web-ui"), cf.bold("--head"))
raise Exception("If --head is not passed in, the --include-webui"
"flag is not relevant.")
if include_dashboard:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--include-dashboard"), cf.bold("--head"))
raise ValueError(
"If --head is not passed in, the --include-dashboard"
"flag is not relevant.")
# Wait for the Redis server to be started. And throw an exception if we
# can't connect to it.
services.wait_for_redis_to_start(
redis_address_ip, redis_address_port, password=redis_password)
# Create a Redis client.
redis_client = services.create_redis_client(
redis_address, password=redis_password)
# Check that the version information on this node matches the version
# information that the cluster was started with.
services.check_version_info(redis_client)
# Get the node IP address if one is not provided.
ray_params.update_if_absent(
node_ip_address=services.get_node_ip_address(redis_address))
cli_logger.labeled_value("Local node IP", ray_params.node_ip_address)
cli_logger.old_info(logger, "Using IP address {} for this node.",
ray_params.node_ip_address)
# Check that there aren't already Redis clients with the same IP
# address connected with this Redis instance. This raises an exception
# if the Redis server already has clients on this node.
check_no_existing_redis_clients(ray_params.node_ip_address,
redis_client)
ray_params.update(redis_address=redis_address)
node = ray.node.Node(
ray_params, head=False, shutdown_at_exit=block, spawn_reaper=block)
cli_logger.newline()
startup_msg = "Ray runtime started."
cli_logger.success("-" * len(startup_msg))
cli_logger.success(startup_msg)
cli_logger.success("-" * len(startup_msg))
cli_logger.newline()
cli_logger.print("To terminate the Ray runtime, run")
cli_logger.print(cf.bold(" ray stop"))
cli_logger.old_info(
logger, "\\nStarted Ray on this node. If you wish to terminate the "
"processes that have been started, run\\n\\n"
" ray stop")
if block:
cli_logger.newline()
with cli_logger.group(cf.bold("--block")):
cli_logger.print(
"This command will now block until terminated by a signal.")
cli_logger.print(
"Runing subprocesses are monitored and a message will be "
"printed if any of them terminate unexpectedly.")
while True:
time.sleep(1)
deceased = node.dead_processes()
if len(deceased) > 0:
cli_logger.newline()
cli_logger.error("Some Ray subprcesses exited unexpectedly:")
cli_logger.old_error(logger,
"Ray processes died unexpectedly:")
with cli_logger.indented():
for process_type, process in deceased:
cli_logger.error(
"{}",
cf.bold(str(process_type)),
_tags={"exit code": str(process.returncode)})
cli_logger.old_error(
logger, "\\t{} died with exit code {}".format(
process_type, process.returncode))
# shutdown_at_exit will handle cleanup.
cli_logger.newline()
cli_logger.error("Remaining processes will be killed.")
cli_logger.old_error(
logger, "Killing remaining processes and exiting...")
sys.exit(1)
|
[{'piece_type': 'error message', 'piece_content': '$ ray memory\\n2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>\\nsys.exit(main())\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main\\nreturn cli()\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory\\nray.init(address=address, redis_password=redis_password)\\nTypeError: init() got an unexpected keyword argument \\'redis_password\\''}]
|
$ ray memory
2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main
return cli()
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory
ray.init(address=address, redis_password=redis_password)
TypeError: init() got an unexpected keyword argument 'redis_password'
|
TypeError
|
def memory(address, redis_password):
"""Print object references held in a Ray cluster."""
if not address:
address = services.find_redis_address_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, _redis_password=redis_password)
print(ray.internal.internal_api.memory_summary())
|
def memory(address, redis_password):
"""Print object references held in a Ray cluster."""
if not address:
address = services.find_redis_address_or_die()
logger.info(f"Connecting to Ray instance at {address}.")
ray.init(address=address, redis_password=redis_password)
print(ray.internal.internal_api.memory_summary())
|
[{'piece_type': 'error message', 'piece_content': '$ ray memory\\n2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.\\nTraceback (most recent call last):\\nFile "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>\\nsys.exit(main())\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main\\nreturn cli()\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory\\nray.init(address=address, redis_password=redis_password)\\nTypeError: init() got an unexpected keyword argument \\'redis_password\\''}]
|
$ ray memory
2020-09-09 05:24:50,248 INFO scripts.py:1474 -- Connecting to Ray instance at 172.31.56.46:6379.
Traceback (most recent call last):
File "/home/ubuntu/anaconda3/bin/ray", line 8, in <module>
sys.exit(main())
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1602, in main
return cli()
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/ubuntu/anaconda3/lib/python3.6/site-packages/ray/scripts/scripts.py", line 1475, in memory
ray.init(address=address, redis_password=redis_password)
TypeError: init() got an unexpected keyword argument 'redis_password'
|
TypeError
|
def choose_trial_to_run(self, trial_runner, allow_recurse=True):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in scrubbed:
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
# MAIN CHANGE HERE!
if not any(t.status == Trial.RUNNING
for t in trial_runner.get_trials()):
for hyperband in self._hyperbands:
for bracket in hyperband:
if bracket and any(trial.status == Trial.PAUSED
for trial in bracket.current_trials()):
# This will change the trial state
self._process_bracket(trial_runner, bracket)
# If there are pending trials now, suggest one.
# This is because there might be both PENDING and
# PAUSED trials now, and PAUSED trials will raise
# an error before the trial runner tries again.
if allow_recurse and any(
trial.status == Trial.PENDING
for trial in bracket.current_trials()):
return self.choose_trial_to_run(
trial_runner, allow_recurse=False)
# MAIN CHANGE HERE!
return None
|
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in scrubbed:
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
# MAIN CHANGE HERE!
if not any(t.status == Trial.RUNNING
for t in trial_runner.get_trials()):
for hyperband in self._hyperbands:
for bracket in hyperband:
if bracket and any(trial.status == Trial.PAUSED
for trial in bracket.current_trials()):
# This will change the trial state and let the
# trial runner retry.
self._process_bracket(trial_runner, bracket)
# MAIN CHANGE HERE!
return None
|
[{'piece_type': 'error message', 'piece_content': '== Status ==\\nMemory usage on this node: 7.0/15.6 GiB\\nUsing HyperBand: num_stopped=832 total_brackets=3\\nRound #0:\\nNone\\nBracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}\\nBracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}\\nResources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects\\nResult logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB\\nNumber of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)\\n+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+\\n| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |\\n|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|\\n| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |\\n| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |\\n| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |\\n| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |\\n| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |\\n| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |\\n| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |\\n| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |\\n| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |\\n| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |\\n| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |\\n| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |\\n| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |\\n| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |\\n| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |\\n| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |\\n| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |\\n| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |\\n+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+\\n... 980 more trials not shown (156 PAUSED, 823 TERMINATED)\\n\\n\\nTraceback (most recent call last):\\nFile "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>\\nverbose=1,\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run\\nrunner.step()\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step\\nself.trial_executor.on_no_available_trials(self)\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials\\nraise TuneError("There are paused trials, but no more pending "\\nray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.\\n\\nProcess finished with exit code 1'}, {'piece_type': 'reproducing source code', 'piece_content': 'import ray\\nfrom ray import tune\\nfrom ray.tune.suggest.bohb import TuneBOHB\\nfrom ray.tune.schedulers.hb_bohb import HyperBandForBOHB\\nimport ConfigSpace as CS\\n\\nray.init()\\n\\nconfig_space = CS.ConfigurationSpace()\\nconfig_space.add_hyperparameter(CS.UniformFloatHyperparameter("lr", lower=0.00001, upper=0.1, log=True))\\nconfig_space.add_hyperparameter(CS.UniformIntegerHyperparameter("train_batch_size", lower=32, upper=1024, log=False))\\nconfig_space.add_hyperparameter(CS.CategoricalHyperparameter("batch_mode", choices=[\\'truncate_episodes\\', \\'complete_episodes\\']))\\n\\nbohb_search = TuneBOHB(\\nspace=config_space,\\nbohb_config=None,\\nmax_concurrent=32,\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\'\\n)\\n\\nbohb_hyperband = HyperBandForBOHB(\\ntime_attr=\\'episodes_total\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\nreduction_factor=3,\\n)\\n\\nconfig = {\\n"env": "MountainCar-v0",\\n"min_iter_time_s": 15,\\n"num_gpus": 0,\\n"num_workers": 1,\\n"double_q": True,\\n"n_step": 3,\\n"target_network_update_freq": 1000,\\n"buffer_size": 20000,\\n# "prioritzed_replay": True,\\n"learning_starts": 1000,\\n"log_level": "ERROR"\\n}\\n\\nanalysis = tune.run(\\nrun_or_experiment="DQN",\\nname=\\'MCv0_DQN_BOHB\\',\\nconfig=config,\\nnum_samples=1000,\\ncheckpoint_at_end=True,\\nsearch_alg=bohb_search,\\nscheduler=bohb_hyperband,\\nverbose=1,\\n)'}]
|
== Status ==
Memory usage on this node: 7.0/15.6 GiB
Using HyperBand: num_stopped=832 total_brackets=3
Round #0:
None
Bracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}
Bracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}
Resources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects
Result logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB
Number of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |
|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|
| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |
| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |
| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |
| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |
| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |
| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |
| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |
| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |
| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |
| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |
| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |
| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |
| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |
| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |
| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |
| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |
| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |
| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |
| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |
| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |
| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
... 980 more trials not shown (156 PAUSED, 823 TERMINATED)
Traceback (most recent call last):
File "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>
verbose=1,
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run
runner.step()
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step
self.trial_executor.on_no_available_trials(self)
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials
raise TuneError("There are paused trials, but no more pending "
ray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.
Process finished with exit code 1
|
ray.tune.error.TuneError
|
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands))
for i, band in enumerate(self._hyperbands):
out += "\\nRound #{}:".format(i)
for bracket in band:
if bracket:
out += "\\n {}".format(bracket)
return out
|
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands))
for i, band in enumerate(self._hyperbands):
out += "\\nRound #{}:".format(i)
for bracket in band:
out += "\\n {}".format(bracket)
return out
|
[{'piece_type': 'error message', 'piece_content': '== Status ==\\nMemory usage on this node: 7.0/15.6 GiB\\nUsing HyperBand: num_stopped=832 total_brackets=3\\nRound #0:\\nNone\\nBracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}\\nBracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}\\nResources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects\\nResult logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB\\nNumber of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)\\n+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+\\n| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |\\n|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|\\n| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |\\n| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |\\n| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |\\n| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |\\n| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |\\n| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |\\n| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |\\n| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |\\n| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |\\n| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |\\n| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |\\n| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |\\n| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |\\n| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |\\n| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |\\n| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |\\n| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |\\n| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |\\n| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |\\n+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+\\n... 980 more trials not shown (156 PAUSED, 823 TERMINATED)\\n\\n\\nTraceback (most recent call last):\\nFile "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>\\nverbose=1,\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run\\nrunner.step()\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step\\nself.trial_executor.on_no_available_trials(self)\\nFile "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials\\nraise TuneError("There are paused trials, but no more pending "\\nray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.\\n\\nProcess finished with exit code 1'}, {'piece_type': 'reproducing source code', 'piece_content': 'import ray\\nfrom ray import tune\\nfrom ray.tune.suggest.bohb import TuneBOHB\\nfrom ray.tune.schedulers.hb_bohb import HyperBandForBOHB\\nimport ConfigSpace as CS\\n\\nray.init()\\n\\nconfig_space = CS.ConfigurationSpace()\\nconfig_space.add_hyperparameter(CS.UniformFloatHyperparameter("lr", lower=0.00001, upper=0.1, log=True))\\nconfig_space.add_hyperparameter(CS.UniformIntegerHyperparameter("train_batch_size", lower=32, upper=1024, log=False))\\nconfig_space.add_hyperparameter(CS.CategoricalHyperparameter("batch_mode", choices=[\\'truncate_episodes\\', \\'complete_episodes\\']))\\n\\nbohb_search = TuneBOHB(\\nspace=config_space,\\nbohb_config=None,\\nmax_concurrent=32,\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\'\\n)\\n\\nbohb_hyperband = HyperBandForBOHB(\\ntime_attr=\\'episodes_total\\',\\nmetric=\\'episode_reward_mean\\',\\nmode=\\'max\\',\\nmax_t=2000,\\nreduction_factor=3,\\n)\\n\\nconfig = {\\n"env": "MountainCar-v0",\\n"min_iter_time_s": 15,\\n"num_gpus": 0,\\n"num_workers": 1,\\n"double_q": True,\\n"n_step": 3,\\n"target_network_update_freq": 1000,\\n"buffer_size": 20000,\\n# "prioritzed_replay": True,\\n"learning_starts": 1000,\\n"log_level": "ERROR"\\n}\\n\\nanalysis = tune.run(\\nrun_or_experiment="DQN",\\nname=\\'MCv0_DQN_BOHB\\',\\nconfig=config,\\nnum_samples=1000,\\ncheckpoint_at_end=True,\\nsearch_alg=bohb_search,\\nscheduler=bohb_hyperband,\\nverbose=1,\\n)'}]
|
== Status ==
Memory usage on this node: 7.0/15.6 GiB
Using HyperBand: num_stopped=832 total_brackets=3
Round #0:
None
Bracket(Max Size (n)=2, Milestone (r)=1458, completed=100.0%): {RUNNING: 1, TERMINATED: 833}
Bracket(Max Size (n)=324, Milestone (r)=8, completed=47.3%): {PAUSED: 166}
Resources requested: 4/32 CPUs, 0/0 GPUs, 0.0/8.69 GiB heap, 0.0/2.98 GiB objects
Result logdir: /home/dl-user/ray_results/MCv0_DQN_BOHB
Number of trials: 1000 (166 PAUSED, 1 RUNNING, 833 TERMINATED)
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
| Trial name | status | loc | batch_mode | lr | train_batch_size | iter | total time (s) | ts | reward |
|-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------|
| DQN_MountainCar-v0_0428be42 | PAUSED | | truncate_episodes | 1.99095e-05 | 408 | 2 | 25.6885 | 4032 | -200 |
| DQN_MountainCar-v0_0428be45 | PAUSED | | truncate_episodes | 0.000382289 | 211 | 2 | 24.7536 | 5040 | -200 |
| DQN_MountainCar-v0_0428be48 | PAUSED | | truncate_episodes | 0.000324929 | 233 | 2 | 25.5532 | 5040 | -200 |
| DQN_MountainCar-v0_0747e5f2 | PAUSED | | truncate_episodes | 0.000114766 | 38 | 2 | 23.8492 | 7056 | -200 |
| DQN_MountainCar-v0_0747e5f5 | PAUSED | | truncate_episodes | 9.1226e-05 | 200 | 2 | 24.2349 | 5040 | -200 |
| DQN_MountainCar-v0_08218bf0 | PAUSED | | truncate_episodes | 0.000284028 | 69 | 2 | 25.3671 | 7056 | -200 |
| DQN_MountainCar-v0_093c0b8c | PAUSED | | truncate_episodes | 0.00237606 | 114 | 2 | 23.3935 | 6048 | -200 |
| DQN_MountainCar-v0_0a55eae6 | PAUSED | | truncate_episodes | 0.000417829 | 111 | 2 | 23.4849 | 6048 | -200 |
| DQN_MountainCar-v0_0b307d56 | PAUSED | | truncate_episodes | 0.000196047 | 59 | 2 | 23.1338 | 7056 | -200 |
| DQN_MountainCar-v0_0eedea91 | PAUSED | | truncate_episodes | 6.58278e-05 | 59 | 2 | 24.0254 | 7056 | -200 |
| DQN_MountainCar-v0_1fcd888b | RUNNING | 172.16.160.219:47910 | truncate_episodes | 0.000237864 | 751 | 88 | 1638.34 | 199584 | -122.05 |
| DQN_MountainCar-v0_0023f4f6 | TERMINATED | | truncate_episodes | 0.000255833 | 158 | 1 | 5.56779 | 1008 | -200 |
| DQN_MountainCar-v0_0023f4f9 | TERMINATED | | complete_episodes | 0.000262904 | 156 | 1 | 5.43817 | 1200 | -200 |
| DQN_MountainCar-v0_0023f4fc | TERMINATED | | complete_episodes | 0.0002605 | 260 | 1 | 5.33452 | 1200 | -200 |
| DQN_MountainCar-v0_0108428e | TERMINATED | | truncate_episodes | 3.89327e-05 | 732 | 4 | 36.2218 | 5040 | -200 |
| DQN_MountainCar-v0_01084291 | TERMINATED | | truncate_episodes | 2.39745e-05 | 714 | 4 | 36.2585 | 5040 | -200 |
| DQN_MountainCar-v0_01084294 | TERMINATED | | truncate_episodes | 4.9252e-05 | 808 | 4 | 38.4182 | 5040 | -200 |
| DQN_MountainCar-v0_01084297 | TERMINATED | | truncate_episodes | 7.42384e-05 | 804 | 4 | 38.0425 | 5040 | -200 |
| DQN_MountainCar-v0_014223c0 | TERMINATED | | truncate_episodes | 0.0520328 | 71 | 1 | 6.21906 | 1008 | -200 |
| DQN_MountainCar-v0_01939ac4 | TERMINATED | | complete_episodes | 8.34678e-05 | 124 | 1 | 5.37302 | 1200 | -200 |
| DQN_MountainCar-v0_01a4cc45 | TERMINATED | | complete_episodes | 0.00973094 | 373 | 3 | 27.2147 | 24000 | -200 |
+-----------------------------+------------+----------------------+-------------------+-------------+--------------------+--------+------------------+--------+----------+
... 980 more trials not shown (156 PAUSED, 823 TERMINATED)
Traceback (most recent call last):
File "/home/dl-user/python-code/modularized_version_ray/ray_BOHB.py", line 123, in <module>
verbose=1,
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/tune.py", line 327, in run
runner.step()
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_runner.py", line 342, in step
self.trial_executor.on_no_available_trials(self)
File "/home/dl-user/.local/lib/python3.7/site-packages/ray/tune/trial_executor.py", line 177, in on_no_available_trials
raise TuneError("There are paused trials, but no more pending "
ray.tune.error.TuneError: There are paused trials, but no more pending trials with sufficient resources.
Process finished with exit code 1
|
ray.tune.error.TuneError
|
def run_rsync_up(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.run_cp_up(source, target)
|
def run_rsync_up(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
source,
"{}@{}:{}".format(self.node_id, self.namespace, target),
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.process_runner.check_call(self.kubectl + [
"cp", source, "{}/{}:{}".format(self.namespace, self.node_id,
target)
])
|
[{'piece_type': 'error message', 'piece_content': '2020-07-17 21:53:48,101\\tERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "code/checkpoint_validation_failed.py", line 115, in <module>\\n_main()\\nFile "code/checkpoint_validation_failed.py", line 94, in _main\\nprint(f"VALIDATE TRAINABLE CLASS: {validate_save_restore(TrainExample)}")\\nFile "code/checkpoint_validation_failed.py", line 67, in validate_save_restore\\nrestore_check = ray.get(trainable_2.restore.remote(old_trainable))\\nFile "/opt/conda/lib/python3.6/site-packages/ray/worker.py", line 1526, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(FileNotFoundError): ray::TrainExample.restore() (pid=2704, ip=172.17.0.8)\\nFile "python/ray/_raylet.pyx", line 471, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 424, in ray._raylet.execute_task.function_executor\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trainable.py", line 444, in restore\\nwith open(checkpoint_path + ".tune_metadata", "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'/root/ray_results/2020-07-17_21-43-17dbvo01tp/checkpoint_3/.tune_metadata\\'\\ncommand terminated with exit code 1'}]
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def run_rsync_down(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.run_cp_down(source, target)
|
def run_rsync_down(self, source, target):
if target.startswith("~"):
target = "/root" + target[1:]
try:
self.process_runner.check_call([
KUBECTL_RSYNC,
"-avz",
"{}@{}:{}".format(self.node_id, self.namespace, source),
target,
])
except Exception as e:
logger.warning(self.log_prefix +
"rsync failed: '{}'. Falling back to 'kubectl cp'"
.format(e))
self.process_runner.check_call(self.kubectl + [
"cp", "{}/{}:{}".format(self.namespace, self.node_id, source),
target
])
|
[{'piece_type': 'error message', 'piece_content': '2020-07-17 21:53:48,101\\tERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "code/checkpoint_validation_failed.py", line 115, in <module>\\n_main()\\nFile "code/checkpoint_validation_failed.py", line 94, in _main\\nprint(f"VALIDATE TRAINABLE CLASS: {validate_save_restore(TrainExample)}")\\nFile "code/checkpoint_validation_failed.py", line 67, in validate_save_restore\\nrestore_check = ray.get(trainable_2.restore.remote(old_trainable))\\nFile "/opt/conda/lib/python3.6/site-packages/ray/worker.py", line 1526, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(FileNotFoundError): ray::TrainExample.restore() (pid=2704, ip=172.17.0.8)\\nFile "python/ray/_raylet.pyx", line 471, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 424, in ray._raylet.execute_task.function_executor\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trainable.py", line 444, in restore\\nwith open(checkpoint_path + ".tune_metadata", "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'/root/ray_results/2020-07-17_21-43-17dbvo01tp/checkpoint_3/.tune_metadata\\'\\ncommand terminated with exit code 1'}]
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif isclass(sync_function) and issubclass(sync_function, Syncer):
_syncers[key] = sync_function(local_dir, remote_dir, None)
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
def get_node_syncer(local_dir, remote_dir=None, sync_function=None):
"""Returns a NodeSyncer.
Args:
local_dir (str): Source directory for syncing.
remote_dir (str): Target directory for syncing. If not provided, a
noop Syncer is returned.
sync_function (func|str|bool): Function for syncing the local_dir to
remote_dir. If string, then it must be a string template for
syncer to run. If True or not provided, it defaults rsync. If
False, a noop Syncer is returned.
"""
key = (local_dir, remote_dir)
if key in _syncers:
return _syncers[key]
elif not remote_dir or sync_function is False:
sync_client = NOOP
elif sync_function and sync_function is not True:
sync_client = get_sync_client(sync_function)
else:
sync = log_sync_template()
if sync:
sync_client = CommandBasedClient(sync, sync)
sync_client.set_logdir(local_dir)
else:
sync_client = NOOP
_syncers[key] = NodeSyncer(local_dir, remote_dir, sync_client)
return _syncers[key]
|
[{'piece_type': 'error message', 'piece_content': '2020-07-17 21:53:48,101\\tERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/\\nTraceback (most recent call last):\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save\\ntrial.on_checkpoint(trial.saving_to)\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint\\nself, checkpoint.value))\\nray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.'}, {'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "code/checkpoint_validation_failed.py", line 115, in <module>\\n_main()\\nFile "code/checkpoint_validation_failed.py", line 94, in _main\\nprint(f"VALIDATE TRAINABLE CLASS: {validate_save_restore(TrainExample)}")\\nFile "code/checkpoint_validation_failed.py", line 67, in validate_save_restore\\nrestore_check = ray.get(trainable_2.restore.remote(old_trainable))\\nFile "/opt/conda/lib/python3.6/site-packages/ray/worker.py", line 1526, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(FileNotFoundError): ray::TrainExample.restore() (pid=2704, ip=172.17.0.8)\\nFile "python/ray/_raylet.pyx", line 471, in ray._raylet.execute_task\\nFile "python/ray/_raylet.pyx", line 424, in ray._raylet.execute_task.function_executor\\nFile "/opt/conda/lib/python3.6/site-packages/ray/tune/trainable.py", line 444, in restore\\nwith open(checkpoint_path + ".tune_metadata", "rb") as f:\\nFileNotFoundError: [Errno 2] No such file or directory: \\'/root/ray_results/2020-07-17_21-43-17dbvo01tp/checkpoint_3/.tune_metadata\\'\\ncommand terminated with exit code 1'}]
|
2020-07-17 21:53:48,101 ERROR trial_runner.py:550 -- Trial TrainExample_fd24b_00001: Error handling checkpoint /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial_runner.py", line 546, in _process_trial_save
trial.on_checkpoint(trial.saving_to)
File "/opt/conda/lib/python3.6/site-packages/ray/tune/trial.py", line 448, in on_checkpoint
self, checkpoint.value))
ray.tune.error.TuneError: Trial TrainExample_fd24b_00001: Checkpoint path /root/ray_results/TrainExample/TrainExample_1_randomforestclassifier__n_estimators=5_2020-07-17_21-53-462l3hkjfs/checkpoint_1/ not found after successful sync down.
|
ray.tune.error.TuneError
|
def action_prob(self, batch: SampleBatchType) -> np.ndarray:
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
log_likelihoods: TensorType = self.policy.compute_log_likelihoods(
actions=batch[SampleBatch.ACTIONS],
obs_batch=batch[SampleBatch.CUR_OBS],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=batch.data.get(SampleBatch.PREV_REWARDS))
return convert_to_numpy(log_likelihoods)
|
def action_prob(self, batch: SampleBatchType) -> TensorType:
"""Returns the probs for the batch actions for the current policy."""
num_state_inputs = 0
for k in batch.keys():
if k.startswith("state_in_"):
num_state_inputs += 1
state_keys = ["state_in_{}".format(i) for i in range(num_state_inputs)]
log_likelihoods = self.policy.compute_log_likelihoods(
actions=batch[SampleBatch.ACTIONS],
obs_batch=batch[SampleBatch.CUR_OBS],
state_batches=[batch[k] for k in state_keys],
prev_action_batch=batch.data.get(SampleBatch.PREV_ACTIONS),
prev_reward_batch=batch.data.get(SampleBatch.PREV_REWARDS))
return log_likelihoods
|
[{'piece_type': 'error message', 'piece_content': 'Traceback (most recent call last):\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\tune\\\\trial_runner.py", line 497, in _process_trial\\nresult = self.trial_executor.fetch_result(trial)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\tune\\\\ray_trial_executor.py", line 434, in fetch_result\\nresult = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\worker.py", line 1553, in get\\nraise value.as_instanceof_cause()\\nray.exceptions.RayTaskError(AttributeError): ray::MARWIL.train() (pid=9136, ip=10.0.0.18)\\nFile "python\\\\ray\\\\_raylet.pyx", line 474, in ray._raylet.execute_task\\nFile "python\\\\ray\\\\_raylet.pyx", line 427, in ray._raylet.execute_task.function_executor\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\function_manager.py", line 567, in actor_method_executor\\nraise e\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\function_manager.py", line 559, in actor_method_executor\\nmethod_returns = method(actor, *args, **kwargs)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\rllib\\\\agents\\\\trainer.py", line 522, in train\\nraise e\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\rllib\\\\agents\\\\trainer.py", line 508, in train\\nresult = Trainable.train(self)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\tune\\\\trainable.py", line 337, in train\\nresult = self.step()\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\rllib\\\\agents\\\\trainer_template.py", line 110, in step\\nres = next(self.train_exec_impl)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\util\\\\iter.py", line 758, in __next__\\nreturn next(self.built_iterator)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\util\\\\iter.py", line 793, in apply_foreach\\nresult = fn(item)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\rllib\\\\execution\\\\metric_ops.py", line 87, in __call__\\nres = summarize_episodes(episodes, orig_episodes)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\ray\\\\rllib\\\\evaluation\\\\metrics.py", line 173, in summarize_episodes\\nmetrics[k] = np.mean(v_list)\\nFile "<__array_function__ internals>", line 6, in mean\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\numpy\\\\core\\\\fromnumeric.py", line 3335, in mean\\nout=out, **kwargs)\\nFile "C:\\\\Users\\\\Julius\\\\Anaconda3\\\\envs\\\\ray\\\\lib\\\\site-packages\\\\numpy\\\\core\\\\_methods.py", line 161, in _mean\\nret = ret.dtype.type(ret / rcount)\\nAttributeError: \\'torch.dtype\\' object has no attribute \\'type\\''}]
|
Traceback (most recent call last):
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\tune\\trial_runner.py", line 497, in _process_trial
result = self.trial_executor.fetch_result(trial)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\tune\\ray_trial_executor.py", line 434, in fetch_result
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\worker.py", line 1553, in get
raise value.as_instanceof_cause()
ray.exceptions.RayTaskError(AttributeError): ray::MARWIL.train() (pid=9136, ip=10.0.0.18)
File "python\\ray\\_raylet.pyx", line 474, in ray._raylet.execute_task
File "python\\ray\\_raylet.pyx", line 427, in ray._raylet.execute_task.function_executor
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\function_manager.py", line 567, in actor_method_executor
raise e
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\function_manager.py", line 559, in actor_method_executor
method_returns = method(actor, *args, **kwargs)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\rllib\\agents\\trainer.py", line 522, in train
raise e
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\rllib\\agents\\trainer.py", line 508, in train
result = Trainable.train(self)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\tune\\trainable.py", line 337, in train
result = self.step()
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\rllib\\agents\\trainer_template.py", line 110, in step
res = next(self.train_exec_impl)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\util\\iter.py", line 758, in __next__
return next(self.built_iterator)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\util\\iter.py", line 793, in apply_foreach
result = fn(item)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\rllib\\execution\\metric_ops.py", line 87, in __call__
res = summarize_episodes(episodes, orig_episodes)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\ray\\rllib\\evaluation\\metrics.py", line 173, in summarize_episodes
metrics[k] = np.mean(v_list)
File "<__array_function__ internals>", line 6, in mean
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\numpy\\core\\fromnumeric.py", line 3335, in mean
out=out, **kwargs)
File "C:\\Users\\Julius\\Anaconda3\\envs\\ray\\lib\\site-packages\\numpy\\core\\_methods.py", line 161, in _mean
ret = ret.dtype.type(ret / rcount)
AttributeError: 'torch.dtype' object has no attribute 'type'
|
AttributeError
|
def run_rsync_up(self, source, target):
# TODO(ilr) Expose this to before NodeUpdater::sync_file_mounts
protected_path = target
if target.find("/root") == 0:
target = target.replace("/root", "/tmp/root")
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(target.rstrip('/'))}")
self.ssh_command_runner.run_rsync_up(source, target)
if self._check_container_status():
self.ssh_command_runner.run("docker cp {} {}:{}".format(
target, self.docker_name,
self._docker_expand_user(protected_path)))
|
def run_rsync_up(self, source, target):
protected_path = target
if target.find("/root") == 0:
target = target.replace("/root", "/tmp/root")
self.ssh_command_runner.run(
f"mkdir -p {os.path.dirname(target.rstrip('/'))}")
self.ssh_command_runner.run_rsync_up(source, target)
if self._check_container_status():
self.ssh_command_runner.run("docker cp {} {}:{}".format(
target, self.docker_name,
self._docker_expand_user(protected_path)))
|
[{'piece_type': 'error message', 'piece_content': '(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml\\n2020-08-12 20:12:39,383\\tINFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1\\n2020-08-12 20:12:39,612\\tINFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1\\n2020-08-12 20:12:39,745\\tINFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:39,746\\tINFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:40,358\\tINFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\nThis will create a new cluster [y/N]: y\\n2020-08-12 20:12:42,619\\tINFO commands.py:531 -- get_or_create_head_node: Launching new head node...\\n2020-08-12 20:12:42,620\\tINFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).\\n2020-08-12 20:12:44,032\\tINFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]\\n2020-08-12 20:12:44,223\\tINFO commands.py:570 -- get_or_create_head_node: Updating files on head node...\\n2020-08-12 20:12:44,320\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,409\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,534\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]\\n2020-08-12 20:12:54,534\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker\\'\\nWarning: Permanently added \\'3.226.253.119\\' (ECDSA) to the list of known hosts.\\n/usr/bin/docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:04,587\\tINFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9\\n2020-08-12 20:14:04,587\\tINFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...\\n2020-08-12 20:14:04,587\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\n2020-08-12 20:14:04,950\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=361ms]\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:21,222\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:26,417\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:31,610\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:36,798\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:41,986\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:47,170\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:52,358\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:57,554\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:02,750\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:07,938\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:13,126\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:18,307\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:23,494\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:01,502\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:06,689\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]\\n2020-08-12 20:19:06,690\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]\\n2020-08-12 20:19:06,690\\tERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run\\nself.do_update()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update\\nself.wait_ready(deadline)\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready\\nassert False, "Unable to connect to node"\\nAssertionError: Unable to connect to node\\n\\n2020-08-12 20:19:06,962\\tERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed\\n2020-08-12 20:19:07,002\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=312ms]'}, {'piece_type': 'other', 'piece_content': '# An unique identifier for the head node and workers of this cluster.\\ncluster_name: richard_cluster_gpu_dummy\\n\\n# The minimum number of workers nodes to launch in addition to the head\\n# node. This number should be >= 0.\\nmin_workers: 1\\n\\n# The maximum number of workers nodes to launch in addition to the head\\n# node. This takes precedence over min_workers.\\nmax_workers: 5\\n\\n# The initial number of worker nodes to launch in addition to the head\\n# node. When the cluster is first brought up (or when it is refreshed with a\\n# subsequent `ray up`) this number of nodes will be started.\\ninitial_workers: 1\\n\\n# Whether or not to autoscale aggressively. If this is enabled, if at any point\\n# we would start more workers, we start at least enough to bring us to\\n# initial_workers.\\nautoscaling_mode: default\\n\\n# This executes all commands on all nodes in the docker efcontainer,\\n# and opens all the necessary ports to support the Ray cluster.\\n# Empty string means disabled.\\ndocker:\\nimage: "pytorch/pytorch:latest" # e.g., tensorflow/tensorflow:1.5.0-py3\\ncontainer_name: "pytorch_docker" # e.g. ray_docker\\n# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image\\n# if no cached version is present.\\npull_before_run: True\\nrun_options: []\\n# - $([ -d /proc/driver ] && echo -n --runtime-nvidia) # Use the nvidia runtime only if nvidia gpu\\'s are installed\\nworker_run_options:\\n- --runtime=nvidia # Extra options to pass into "docker run"\\n\\n# Example of running a GPU head with CPU workers\\n# head_image: "tensorflow/tensorflow:1.13.1-py3"\\n# head_run_options:\\n# - --runtime=nvidia\\n\\n# worker_image: "ubuntu:18.04"\\n# worker_run_options: []\\n\\n# The autoscaler will scale up the cluster to this target fraction of resource\\n# usage. For example, if a cluster of 10 nodes is 100% busy and\\n# target_utilization is 0.8, it would resize the cluster to 13. This fraction\\n# can be decreased to increase the aggressiveness of upscaling.\\n# This value must be less than 1.0 for scaling to happen.\\ntarget_utilization_fraction: 0.8\\n\\n# If a node is idle for this many minutes, it will be removed.\\nidle_timeout_minutes: 5\\n\\n# Cloud-provider specific configuration.\\nprovider:\\ntype: aws\\nregion: us-east-1\\n# Availability zone(s), comma-separated, that nodes may be launched in.\\n# Nodes are currently spread between zones by a round-robin approach,\\n# however this implementation detail should not be relied upon.\\navailability_zone: us-east-1a, us-east-1b\\ncache_stopped_nodes: False\\n\\n# How Ray will authenticate with newly launched nodes.\\nauth:\\nssh_user: ubuntu\\n# By default Ray creates a new private keypair, but you can also use your own.\\n# If you do so, make sure to also set "KeyName" in the head and worker node\\n# configurations below.\\n# ssh_private_key: /path/to/your/key.pem\\n\\n# Provider-specific config for the head node, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nhead_node:\\nInstanceType: c4.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n# You can provision additional disk space with a conf as follows\\nBlockDeviceMappings:\\n- DeviceName: /dev/sda1\\nEbs:\\nVolumeSize: 100\\n\\n# Additional options in the boto docs.\\n\\n# Provider-specific config for worker nodes, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nworker_nodes:\\nInstanceType: p3.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n\\n# Run workers on spot by default. Comment this out to use on-demand.\\nInstanceMarketOptions:\\nMarketType: spot\\n# Additional options can be found in the boto docs, e.g.\\n# SpotOptions:\\n# MaxPrice: MAX_HOURLY_PRICE\\n\\n# Additional options in the boto docs.\\n\\n# Files or directories to copy to the head and worker nodes. The format is a\\n# dictionary from REMOTE_PATH: LOCAL_PATH, e.g.\\nfile_mounts: {\\n\\n}\\n\\n\\n\\n# List of commands that will be run before `setup_commands`. If docker is\\n# enabled, these commands will run outside the container and before docker\\n# is setup.\\ninitialization_commands: []\\n\\n# List of shell commands to run to set up nodes.\\nsetup_commands:\\n# Note: if you\\'re developing Ray, you probably want to create an AMI that\\n# has your Ray repo pre-cloned. Then, you can replace the pip installs\\n# below with a git checkout <your_sha> (and possibly a recompile).\\n- pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# Consider uncommenting these if you also want to run apt-get commands during setup\\n# - sudo pkill -9 apt-get || true\\n# - sudo pkill -9 dpkg || true\\n# - sudo dpkg --configure -a\\n\\n# Custom commands that will be run on the head node after common setup.\\nhead_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Custom commands that will be run on worker nodes after common setup.\\nworker_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Command to start ray on the head node. You don\\'t need to change this.\\nhead_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --num-cpus=0 --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml\\n\\n# Command to start ray on worker nodes. You don\\'t need to change this.\\nworker_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076'}]
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def sync_file_mounts(self, sync_cmd, step_numbers=(0, 2)):
# step_numbers is (# of previous steps, total steps)
previous_steps, total_steps = step_numbers
nolog_paths = []
if cli_logger.verbosity == 0:
nolog_paths = [
"~/ray_bootstrap_key.pem", "~/ray_bootstrap_config.yaml"
]
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run(
"mkdir -p {}".format(os.path.dirname(remote_path)),
run_env="host")
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path),
cf.bold(local_path))
# Rsync file mounts
with cli_logger.group(
"Processing file mounts",
_numbered=("[]", previous_steps + 1, total_steps)):
for remote_path, local_path in self.file_mounts.items():
do_sync(remote_path, local_path)
if self.cluster_synced_files:
with cli_logger.group(
"Processing worker file mounts",
_numbered=("[]", previous_steps + 2, total_steps)):
for path in self.cluster_synced_files:
do_sync(path, path, allow_non_existing_paths=True)
else:
cli_logger.print(
"No worker file mounts to sync",
_numbered=("[]", previous_steps + 2, total_steps))
|
def sync_file_mounts(self, sync_cmd, step_numbers=(0, 2)):
# step_numbers is (# of previous steps, total steps)
previous_steps, total_steps = step_numbers
nolog_paths = []
if cli_logger.verbosity == 0:
nolog_paths = [
"~/ray_bootstrap_key.pem", "~/ray_bootstrap_config.yaml"
]
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run("mkdir -p {}".format(
os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path),
cf.bold(local_path))
# Rsync file mounts
with cli_logger.group(
"Processing file mounts",
_numbered=("[]", previous_steps + 1, total_steps)):
for remote_path, local_path in self.file_mounts.items():
do_sync(remote_path, local_path)
if self.cluster_synced_files:
with cli_logger.group(
"Processing worker file mounts",
_numbered=("[]", previous_steps + 2, total_steps)):
for path in self.cluster_synced_files:
do_sync(path, path, allow_non_existing_paths=True)
else:
cli_logger.print(
"No worker file mounts to sync",
_numbered=("[]", previous_steps + 2, total_steps))
|
[{'piece_type': 'error message', 'piece_content': '(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml\\n2020-08-12 20:12:39,383\\tINFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1\\n2020-08-12 20:12:39,612\\tINFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1\\n2020-08-12 20:12:39,745\\tINFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:39,746\\tINFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:40,358\\tINFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\nThis will create a new cluster [y/N]: y\\n2020-08-12 20:12:42,619\\tINFO commands.py:531 -- get_or_create_head_node: Launching new head node...\\n2020-08-12 20:12:42,620\\tINFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).\\n2020-08-12 20:12:44,032\\tINFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]\\n2020-08-12 20:12:44,223\\tINFO commands.py:570 -- get_or_create_head_node: Updating files on head node...\\n2020-08-12 20:12:44,320\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,409\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,534\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]\\n2020-08-12 20:12:54,534\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker\\'\\nWarning: Permanently added \\'3.226.253.119\\' (ECDSA) to the list of known hosts.\\n/usr/bin/docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:04,587\\tINFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9\\n2020-08-12 20:14:04,587\\tINFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...\\n2020-08-12 20:14:04,587\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\n2020-08-12 20:14:04,950\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=361ms]\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:21,222\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:26,417\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:31,610\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:36,798\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:41,986\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:47,170\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:52,358\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:57,554\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:02,750\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:07,938\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:13,126\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:18,307\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:23,494\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:01,502\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:06,689\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]\\n2020-08-12 20:19:06,690\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]\\n2020-08-12 20:19:06,690\\tERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run\\nself.do_update()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update\\nself.wait_ready(deadline)\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready\\nassert False, "Unable to connect to node"\\nAssertionError: Unable to connect to node\\n\\n2020-08-12 20:19:06,962\\tERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed\\n2020-08-12 20:19:07,002\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=312ms]'}, {'piece_type': 'other', 'piece_content': '# An unique identifier for the head node and workers of this cluster.\\ncluster_name: richard_cluster_gpu_dummy\\n\\n# The minimum number of workers nodes to launch in addition to the head\\n# node. This number should be >= 0.\\nmin_workers: 1\\n\\n# The maximum number of workers nodes to launch in addition to the head\\n# node. This takes precedence over min_workers.\\nmax_workers: 5\\n\\n# The initial number of worker nodes to launch in addition to the head\\n# node. When the cluster is first brought up (or when it is refreshed with a\\n# subsequent `ray up`) this number of nodes will be started.\\ninitial_workers: 1\\n\\n# Whether or not to autoscale aggressively. If this is enabled, if at any point\\n# we would start more workers, we start at least enough to bring us to\\n# initial_workers.\\nautoscaling_mode: default\\n\\n# This executes all commands on all nodes in the docker efcontainer,\\n# and opens all the necessary ports to support the Ray cluster.\\n# Empty string means disabled.\\ndocker:\\nimage: "pytorch/pytorch:latest" # e.g., tensorflow/tensorflow:1.5.0-py3\\ncontainer_name: "pytorch_docker" # e.g. ray_docker\\n# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image\\n# if no cached version is present.\\npull_before_run: True\\nrun_options: []\\n# - $([ -d /proc/driver ] && echo -n --runtime-nvidia) # Use the nvidia runtime only if nvidia gpu\\'s are installed\\nworker_run_options:\\n- --runtime=nvidia # Extra options to pass into "docker run"\\n\\n# Example of running a GPU head with CPU workers\\n# head_image: "tensorflow/tensorflow:1.13.1-py3"\\n# head_run_options:\\n# - --runtime=nvidia\\n\\n# worker_image: "ubuntu:18.04"\\n# worker_run_options: []\\n\\n# The autoscaler will scale up the cluster to this target fraction of resource\\n# usage. For example, if a cluster of 10 nodes is 100% busy and\\n# target_utilization is 0.8, it would resize the cluster to 13. This fraction\\n# can be decreased to increase the aggressiveness of upscaling.\\n# This value must be less than 1.0 for scaling to happen.\\ntarget_utilization_fraction: 0.8\\n\\n# If a node is idle for this many minutes, it will be removed.\\nidle_timeout_minutes: 5\\n\\n# Cloud-provider specific configuration.\\nprovider:\\ntype: aws\\nregion: us-east-1\\n# Availability zone(s), comma-separated, that nodes may be launched in.\\n# Nodes are currently spread between zones by a round-robin approach,\\n# however this implementation detail should not be relied upon.\\navailability_zone: us-east-1a, us-east-1b\\ncache_stopped_nodes: False\\n\\n# How Ray will authenticate with newly launched nodes.\\nauth:\\nssh_user: ubuntu\\n# By default Ray creates a new private keypair, but you can also use your own.\\n# If you do so, make sure to also set "KeyName" in the head and worker node\\n# configurations below.\\n# ssh_private_key: /path/to/your/key.pem\\n\\n# Provider-specific config for the head node, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nhead_node:\\nInstanceType: c4.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n# You can provision additional disk space with a conf as follows\\nBlockDeviceMappings:\\n- DeviceName: /dev/sda1\\nEbs:\\nVolumeSize: 100\\n\\n# Additional options in the boto docs.\\n\\n# Provider-specific config for worker nodes, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nworker_nodes:\\nInstanceType: p3.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n\\n# Run workers on spot by default. Comment this out to use on-demand.\\nInstanceMarketOptions:\\nMarketType: spot\\n# Additional options can be found in the boto docs, e.g.\\n# SpotOptions:\\n# MaxPrice: MAX_HOURLY_PRICE\\n\\n# Additional options in the boto docs.\\n\\n# Files or directories to copy to the head and worker nodes. The format is a\\n# dictionary from REMOTE_PATH: LOCAL_PATH, e.g.\\nfile_mounts: {\\n\\n}\\n\\n\\n\\n# List of commands that will be run before `setup_commands`. If docker is\\n# enabled, these commands will run outside the container and before docker\\n# is setup.\\ninitialization_commands: []\\n\\n# List of shell commands to run to set up nodes.\\nsetup_commands:\\n# Note: if you\\'re developing Ray, you probably want to create an AMI that\\n# has your Ray repo pre-cloned. Then, you can replace the pip installs\\n# below with a git checkout <your_sha> (and possibly a recompile).\\n- pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# Consider uncommenting these if you also want to run apt-get commands during setup\\n# - sudo pkill -9 apt-get || true\\n# - sudo pkill -9 dpkg || true\\n# - sudo dpkg --configure -a\\n\\n# Custom commands that will be run on the head node after common setup.\\nhead_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Custom commands that will be run on worker nodes after common setup.\\nworker_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Command to start ray on the head node. You don\\'t need to change this.\\nhead_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --num-cpus=0 --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml\\n\\n# Command to start ray on worker nodes. You don\\'t need to change this.\\nworker_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076'}]
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run(
"mkdir -p {}".format(os.path.dirname(remote_path)),
run_env="host")
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path),
cf.bold(local_path))
|
def do_sync(remote_path, local_path, allow_non_existing_paths=False):
if allow_non_existing_paths and not os.path.exists(local_path):
# Ignore missing source files. In the future we should support
# the --delete-missing-args command to delete files that have
# been removed
return
assert os.path.exists(local_path), local_path
if os.path.isdir(local_path):
if not local_path.endswith("/"):
local_path += "/"
if not remote_path.endswith("/"):
remote_path += "/"
with LogTimer(self.log_prefix +
"Synced {} to {}".format(local_path, remote_path)):
self.cmd_runner.run("mkdir -p {}".format(
os.path.dirname(remote_path)))
sync_cmd(local_path, remote_path)
if remote_path not in nolog_paths:
# todo: timed here?
cli_logger.print("{} from {}", cf.bold(remote_path),
cf.bold(local_path))
|
[{'piece_type': 'error message', 'piece_content': '(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml\\n2020-08-12 20:12:39,383\\tINFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1\\n2020-08-12 20:12:39,612\\tINFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1\\n2020-08-12 20:12:39,745\\tINFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:39,746\\tINFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:40,358\\tINFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\nThis will create a new cluster [y/N]: y\\n2020-08-12 20:12:42,619\\tINFO commands.py:531 -- get_or_create_head_node: Launching new head node...\\n2020-08-12 20:12:42,620\\tINFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).\\n2020-08-12 20:12:44,032\\tINFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]\\n2020-08-12 20:12:44,223\\tINFO commands.py:570 -- get_or_create_head_node: Updating files on head node...\\n2020-08-12 20:12:44,320\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,409\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,534\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]\\n2020-08-12 20:12:54,534\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker\\'\\nWarning: Permanently added \\'3.226.253.119\\' (ECDSA) to the list of known hosts.\\n/usr/bin/docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:04,587\\tINFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9\\n2020-08-12 20:14:04,587\\tINFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...\\n2020-08-12 20:14:04,587\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\n2020-08-12 20:14:04,950\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=361ms]\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:21,222\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:26,417\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:31,610\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:36,798\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:41,986\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:47,170\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:52,358\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:57,554\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:02,750\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:07,938\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:13,126\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:18,307\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:23,494\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:01,502\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:06,689\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]\\n2020-08-12 20:19:06,690\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]\\n2020-08-12 20:19:06,690\\tERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run\\nself.do_update()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update\\nself.wait_ready(deadline)\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready\\nassert False, "Unable to connect to node"\\nAssertionError: Unable to connect to node\\n\\n2020-08-12 20:19:06,962\\tERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed\\n2020-08-12 20:19:07,002\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=312ms]'}, {'piece_type': 'other', 'piece_content': '# An unique identifier for the head node and workers of this cluster.\\ncluster_name: richard_cluster_gpu_dummy\\n\\n# The minimum number of workers nodes to launch in addition to the head\\n# node. This number should be >= 0.\\nmin_workers: 1\\n\\n# The maximum number of workers nodes to launch in addition to the head\\n# node. This takes precedence over min_workers.\\nmax_workers: 5\\n\\n# The initial number of worker nodes to launch in addition to the head\\n# node. When the cluster is first brought up (or when it is refreshed with a\\n# subsequent `ray up`) this number of nodes will be started.\\ninitial_workers: 1\\n\\n# Whether or not to autoscale aggressively. If this is enabled, if at any point\\n# we would start more workers, we start at least enough to bring us to\\n# initial_workers.\\nautoscaling_mode: default\\n\\n# This executes all commands on all nodes in the docker efcontainer,\\n# and opens all the necessary ports to support the Ray cluster.\\n# Empty string means disabled.\\ndocker:\\nimage: "pytorch/pytorch:latest" # e.g., tensorflow/tensorflow:1.5.0-py3\\ncontainer_name: "pytorch_docker" # e.g. ray_docker\\n# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image\\n# if no cached version is present.\\npull_before_run: True\\nrun_options: []\\n# - $([ -d /proc/driver ] && echo -n --runtime-nvidia) # Use the nvidia runtime only if nvidia gpu\\'s are installed\\nworker_run_options:\\n- --runtime=nvidia # Extra options to pass into "docker run"\\n\\n# Example of running a GPU head with CPU workers\\n# head_image: "tensorflow/tensorflow:1.13.1-py3"\\n# head_run_options:\\n# - --runtime=nvidia\\n\\n# worker_image: "ubuntu:18.04"\\n# worker_run_options: []\\n\\n# The autoscaler will scale up the cluster to this target fraction of resource\\n# usage. For example, if a cluster of 10 nodes is 100% busy and\\n# target_utilization is 0.8, it would resize the cluster to 13. This fraction\\n# can be decreased to increase the aggressiveness of upscaling.\\n# This value must be less than 1.0 for scaling to happen.\\ntarget_utilization_fraction: 0.8\\n\\n# If a node is idle for this many minutes, it will be removed.\\nidle_timeout_minutes: 5\\n\\n# Cloud-provider specific configuration.\\nprovider:\\ntype: aws\\nregion: us-east-1\\n# Availability zone(s), comma-separated, that nodes may be launched in.\\n# Nodes are currently spread between zones by a round-robin approach,\\n# however this implementation detail should not be relied upon.\\navailability_zone: us-east-1a, us-east-1b\\ncache_stopped_nodes: False\\n\\n# How Ray will authenticate with newly launched nodes.\\nauth:\\nssh_user: ubuntu\\n# By default Ray creates a new private keypair, but you can also use your own.\\n# If you do so, make sure to also set "KeyName" in the head and worker node\\n# configurations below.\\n# ssh_private_key: /path/to/your/key.pem\\n\\n# Provider-specific config for the head node, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nhead_node:\\nInstanceType: c4.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n# You can provision additional disk space with a conf as follows\\nBlockDeviceMappings:\\n- DeviceName: /dev/sda1\\nEbs:\\nVolumeSize: 100\\n\\n# Additional options in the boto docs.\\n\\n# Provider-specific config for worker nodes, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nworker_nodes:\\nInstanceType: p3.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n\\n# Run workers on spot by default. Comment this out to use on-demand.\\nInstanceMarketOptions:\\nMarketType: spot\\n# Additional options can be found in the boto docs, e.g.\\n# SpotOptions:\\n# MaxPrice: MAX_HOURLY_PRICE\\n\\n# Additional options in the boto docs.\\n\\n# Files or directories to copy to the head and worker nodes. The format is a\\n# dictionary from REMOTE_PATH: LOCAL_PATH, e.g.\\nfile_mounts: {\\n\\n}\\n\\n\\n\\n# List of commands that will be run before `setup_commands`. If docker is\\n# enabled, these commands will run outside the container and before docker\\n# is setup.\\ninitialization_commands: []\\n\\n# List of shell commands to run to set up nodes.\\nsetup_commands:\\n# Note: if you\\'re developing Ray, you probably want to create an AMI that\\n# has your Ray repo pre-cloned. Then, you can replace the pip installs\\n# below with a git checkout <your_sha> (and possibly a recompile).\\n- pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# Consider uncommenting these if you also want to run apt-get commands during setup\\n# - sudo pkill -9 apt-get || true\\n# - sudo pkill -9 dpkg || true\\n# - sudo dpkg --configure -a\\n\\n# Custom commands that will be run on the head node after common setup.\\nhead_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Custom commands that will be run on worker nodes after common setup.\\nworker_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Command to start ray on the head node. You don\\'t need to change this.\\nhead_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --num-cpus=0 --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml\\n\\n# Command to start ray on worker nodes. You don\\'t need to change this.\\nworker_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076'}]
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def wait_ready(self, deadline):
with cli_logger.group(
"Waiting for SSH to become available", _numbered=("[]", 1, 6)):
with LogTimer(self.log_prefix + "Got remote shell"):
cli_logger.old_info(logger, "{}Waiting for remote shell...",
self.log_prefix)
cli_logger.print("Running `{}` as a test.", cf.bold("uptime"))
first_conn_refused_time = None
while time.time() < deadline and \\
not self.provider.is_terminated(self.node_id):
try:
cli_logger.old_debug(logger,
"{}Waiting for remote shell...",
self.log_prefix)
self.cmd_runner.run("uptime", run_env="host")
cli_logger.old_debug(logger, "Uptime succeeded.")
cli_logger.success("Success.")
return True
except ProcessRunnerError as e:
first_conn_refused_time = \\
cmd_output_util.handle_ssh_fails(
e, first_conn_refused_time,
retry_interval=READY_CHECK_INTERVAL)
time.sleep(READY_CHECK_INTERVAL)
except Exception as e:
# TODO(maximsmol): we should not be ignoring
# exceptions if they get filtered properly
# (new style log + non-interactive shells)
#
# however threading this configuration state
# is a pain and I'm leaving it for later
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd))
cli_logger.print(
"SSH still not available {}, "
"retrying in {} seconds.", cf.gray(retry_str),
cf.bold(str(READY_CHECK_INTERVAL)))
cli_logger.old_debug(logger,
"{}Node not up, retrying: {}",
self.log_prefix, retry_str)
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
|
def wait_ready(self, deadline):
with cli_logger.group(
"Waiting for SSH to become available", _numbered=("[]", 1, 6)):
with LogTimer(self.log_prefix + "Got remote shell"):
cli_logger.old_info(logger, "{}Waiting for remote shell...",
self.log_prefix)
cli_logger.print("Running `{}` as a test.", cf.bold("uptime"))
first_conn_refused_time = None
while time.time() < deadline and \\
not self.provider.is_terminated(self.node_id):
try:
cli_logger.old_debug(logger,
"{}Waiting for remote shell...",
self.log_prefix)
self.cmd_runner.run("uptime")
cli_logger.old_debug(logger, "Uptime succeeded.")
cli_logger.success("Success.")
return True
except ProcessRunnerError as e:
first_conn_refused_time = \\
cmd_output_util.handle_ssh_fails(
e, first_conn_refused_time,
retry_interval=READY_CHECK_INTERVAL)
time.sleep(READY_CHECK_INTERVAL)
except Exception as e:
# TODO(maximsmol): we should not be ignoring
# exceptions if they get filtered properly
# (new style log + non-interactive shells)
#
# however threading this configuration state
# is a pain and I'm leaving it for later
retry_str = str(e)
if hasattr(e, "cmd"):
retry_str = "(Exit Status {}): {}".format(
e.returncode, " ".join(e.cmd))
cli_logger.print(
"SSH still not available {}, "
"retrying in {} seconds.", cf.gray(retry_str),
cf.bold(str(READY_CHECK_INTERVAL)))
cli_logger.old_debug(logger,
"{}Node not up, retrying: {}",
self.log_prefix, retry_str)
time.sleep(READY_CHECK_INTERVAL)
assert False, "Unable to connect to node"
|
[{'piece_type': 'error message', 'piece_content': '(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml\\n2020-08-12 20:12:39,383\\tINFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1\\n2020-08-12 20:12:39,612\\tINFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1\\n2020-08-12 20:12:39,745\\tINFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:39,746\\tINFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [(\\'subnet-f737f791\\', \\'us-east-1a\\')]\\n2020-08-12 20:12:40,358\\tINFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\n2020-08-12 20:12:40,739\\tINFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)\\nThis will create a new cluster [y/N]: y\\n2020-08-12 20:12:42,619\\tINFO commands.py:531 -- get_or_create_head_node: Launching new head node...\\n2020-08-12 20:12:42,620\\tINFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).\\n2020-08-12 20:12:44,032\\tINFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]\\n2020-08-12 20:12:44,223\\tINFO commands.py:570 -- get_or_create_head_node: Updating files on head node...\\n2020-08-12 20:12:44,320\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,409\\tINFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...\\n2020-08-12 20:12:54,534\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]\\n2020-08-12 20:12:54,534\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker\\'\\nWarning: Permanently added \\'3.226.253.119\\' (ECDSA) to the list of known hosts.\\n/usr/bin/docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:04,587\\tINFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9\\n2020-08-12 20:14:04,587\\tINFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...\\n2020-08-12 20:14:04,587\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\n2020-08-12 20:14:04,950\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=361ms]\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:21,222\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:26,417\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:31,610\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:36,798\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:41,986\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:47,170\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:52,358\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:14:57,554\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:02,750\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:07,938\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:13,126\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:18,307\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:15:23,494\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:01,502\\tINFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i \\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c \\'"\\'"\\'bash --login -c -i \\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'"\\'\\'"\\'"\\' \\'\\nError: No such container: pytorch_docker\\nShared connection to 3.226.253.119 closed.\\n2020-08-12 20:19:06,689\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]\\n2020-08-12 20:19:06,690\\tINFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]\\n2020-08-12 20:19:06,690\\tERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node\\n\\nException in thread Thread-2:\\nTraceback (most recent call last):\\nFile "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner\\nself.run()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run\\nself.do_update()\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update\\nself.wait_ready(deadline)\\nFile "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready\\nassert False, "Unable to connect to node"\\nAssertionError: Unable to connect to node\\n\\n2020-08-12 20:19:06,962\\tERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed\\n2020-08-12 20:19:07,002\\tINFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on [\\'i-0729c7a86355d5ff8\\'] [LogTimer=312ms]'}, {'piece_type': 'other', 'piece_content': '# An unique identifier for the head node and workers of this cluster.\\ncluster_name: richard_cluster_gpu_dummy\\n\\n# The minimum number of workers nodes to launch in addition to the head\\n# node. This number should be >= 0.\\nmin_workers: 1\\n\\n# The maximum number of workers nodes to launch in addition to the head\\n# node. This takes precedence over min_workers.\\nmax_workers: 5\\n\\n# The initial number of worker nodes to launch in addition to the head\\n# node. When the cluster is first brought up (or when it is refreshed with a\\n# subsequent `ray up`) this number of nodes will be started.\\ninitial_workers: 1\\n\\n# Whether or not to autoscale aggressively. If this is enabled, if at any point\\n# we would start more workers, we start at least enough to bring us to\\n# initial_workers.\\nautoscaling_mode: default\\n\\n# This executes all commands on all nodes in the docker efcontainer,\\n# and opens all the necessary ports to support the Ray cluster.\\n# Empty string means disabled.\\ndocker:\\nimage: "pytorch/pytorch:latest" # e.g., tensorflow/tensorflow:1.5.0-py3\\ncontainer_name: "pytorch_docker" # e.g. ray_docker\\n# If true, pulls latest version of image. Otherwise, `docker run` will only pull the image\\n# if no cached version is present.\\npull_before_run: True\\nrun_options: []\\n# - $([ -d /proc/driver ] && echo -n --runtime-nvidia) # Use the nvidia runtime only if nvidia gpu\\'s are installed\\nworker_run_options:\\n- --runtime=nvidia # Extra options to pass into "docker run"\\n\\n# Example of running a GPU head with CPU workers\\n# head_image: "tensorflow/tensorflow:1.13.1-py3"\\n# head_run_options:\\n# - --runtime=nvidia\\n\\n# worker_image: "ubuntu:18.04"\\n# worker_run_options: []\\n\\n# The autoscaler will scale up the cluster to this target fraction of resource\\n# usage. For example, if a cluster of 10 nodes is 100% busy and\\n# target_utilization is 0.8, it would resize the cluster to 13. This fraction\\n# can be decreased to increase the aggressiveness of upscaling.\\n# This value must be less than 1.0 for scaling to happen.\\ntarget_utilization_fraction: 0.8\\n\\n# If a node is idle for this many minutes, it will be removed.\\nidle_timeout_minutes: 5\\n\\n# Cloud-provider specific configuration.\\nprovider:\\ntype: aws\\nregion: us-east-1\\n# Availability zone(s), comma-separated, that nodes may be launched in.\\n# Nodes are currently spread between zones by a round-robin approach,\\n# however this implementation detail should not be relied upon.\\navailability_zone: us-east-1a, us-east-1b\\ncache_stopped_nodes: False\\n\\n# How Ray will authenticate with newly launched nodes.\\nauth:\\nssh_user: ubuntu\\n# By default Ray creates a new private keypair, but you can also use your own.\\n# If you do so, make sure to also set "KeyName" in the head and worker node\\n# configurations below.\\n# ssh_private_key: /path/to/your/key.pem\\n\\n# Provider-specific config for the head node, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nhead_node:\\nInstanceType: c4.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n# You can provision additional disk space with a conf as follows\\nBlockDeviceMappings:\\n- DeviceName: /dev/sda1\\nEbs:\\nVolumeSize: 100\\n\\n# Additional options in the boto docs.\\n\\n# Provider-specific config for worker nodes, e.g. instance type. By default\\n# Ray will auto-configure unspecified fields such as SubnetId and KeyName.\\n# For more documentation on available fields, see:\\n# http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances\\nworker_nodes:\\nInstanceType: p3.2xlarge\\nImageId: ami-043f9aeaf108ebc37 # Deep Learning AMI (Ubuntu) Version 24.3\\n\\n# Run workers on spot by default. Comment this out to use on-demand.\\nInstanceMarketOptions:\\nMarketType: spot\\n# Additional options can be found in the boto docs, e.g.\\n# SpotOptions:\\n# MaxPrice: MAX_HOURLY_PRICE\\n\\n# Additional options in the boto docs.\\n\\n# Files or directories to copy to the head and worker nodes. The format is a\\n# dictionary from REMOTE_PATH: LOCAL_PATH, e.g.\\nfile_mounts: {\\n\\n}\\n\\n\\n\\n# List of commands that will be run before `setup_commands`. If docker is\\n# enabled, these commands will run outside the container and before docker\\n# is setup.\\ninitialization_commands: []\\n\\n# List of shell commands to run to set up nodes.\\nsetup_commands:\\n# Note: if you\\'re developing Ray, you probably want to create an AMI that\\n# has your Ray repo pre-cloned. Then, you can replace the pip installs\\n# below with a git checkout <your_sha> (and possibly a recompile).\\n- pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# - pip install -U https://s3-us-west-2.amazonaws.com/ray-wheels/latest/ray-0.9.0.dev0-cp37-cp37m-manylinux1_x86_64.whl\\n# Consider uncommenting these if you also want to run apt-get commands during setup\\n# - sudo pkill -9 apt-get || true\\n# - sudo pkill -9 dpkg || true\\n# - sudo dpkg --configure -a\\n\\n# Custom commands that will be run on the head node after common setup.\\nhead_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Custom commands that will be run on worker nodes after common setup.\\nworker_setup_commands:\\n- pip install boto3 # 1.4.8 adds InstanceMarketOptions\\n\\n# Command to start ray on the head node. You don\\'t need to change this.\\nhead_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --num-cpus=0 --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml\\n\\n# Command to start ray on worker nodes. You don\\'t need to change this.\\nworker_start_ray_commands:\\n- ray stop\\n- ulimit -n 65536; ray start --address=$RAY_HEAD_IP:6379 --object-manager-port=8076'}]
|
(vanilla_ray_venv) richard@richard-desktop:~/improbable/vanillas/ray/python/ray/autoscaler/aws$ ray up aws_gpu_dummy.yaml
2020-08-12 20:12:39,383 INFO config.py:268 -- _configure_iam_role: Role not specified for head node, using arn:aws:iam::179622923911:instance-profile/ray-autoscaler-v1
2020-08-12 20:12:39,612 INFO config.py:346 -- _configure_key_pair: KeyName not specified for nodes, using ray-autoscaler_us-east-1
2020-08-12 20:12:39,745 INFO config.py:407 -- _configure_subnet: SubnetIds not specified for head node, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:39,746 INFO config.py:417 -- _configure_subnet: SubnetId not specified for workers, using [('subnet-f737f791', 'us-east-1a')]
2020-08-12 20:12:40,358 INFO config.py:590 -- _create_security_group: Created new security group ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:444 -- _configure_security_group: SecurityGroupIds not specified for head node, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
2020-08-12 20:12:40,739 INFO config.py:454 -- _configure_security_group: SecurityGroupIds not specified for workers, using ray-autoscaler-richard_cluster_gpu_dummy (sg-0061ca6aff182c1bf)
This will create a new cluster [y/N]: y
2020-08-12 20:12:42,619 INFO commands.py:531 -- get_or_create_head_node: Launching new head node...
2020-08-12 20:12:42,620 INFO node_provider.py:326 -- NodeProvider: calling create_instances with subnet-f737f791 (count=1).
2020-08-12 20:12:44,032 INFO node_provider.py:354 -- NodeProvider: Created instance [id=i-0729c7a86355d5ff8, name=pending, info=pending]
2020-08-12 20:12:44,223 INFO commands.py:570 -- get_or_create_head_node: Updating files on head node...
2020-08-12 20:12:44,320 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,409 INFO command_runner.py:331 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for IP...
2020-08-12 20:12:54,534 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got IP [LogTimer=10310ms]
2020-08-12 20:12:54,534 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && command -v docker'
Warning: Permanently added '3.226.253.119' (ECDSA) to the list of known hosts.
/usr/bin/docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:04,587 INFO updater.py:71 -- NodeUpdater: i-0729c7a86355d5ff8: Updating to 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9
2020-08-12 20:14:04,587 INFO updater.py:180 -- NodeUpdater: i-0729c7a86355d5ff8: Waiting for remote shell...
2020-08-12 20:14:04,587 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
2020-08-12 20:14:04,950 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=waiting-for-ssh on ['i-0729c7a86355d5ff8'] [LogTimer=361ms]
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:21,222 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:26,417 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:31,610 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:36,798 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:41,986 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:47,170 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:52,358 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:14:57,554 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:02,750 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:07,938 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:13,126 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:18,307 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:15:23,494 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:01,502 INFO command_runner.py:468 -- NodeUpdater: i-0729c7a86355d5ff8: Running ssh -tt -i /home/richard/.ssh/ray-autoscaler_us-east-1.pem -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o ExitOnForwardFailure=yes -o ServerAliveInterval=5 -o ServerAliveCountMax=3 -o ControlMaster=auto -o ControlPath=/tmp/ray_ssh_6ae199a93c/cfde1c79f1/%C -o ControlPersist=10s -o ConnectTimeout=120s ubuntu@3.226.253.119 bash --login -c -i 'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && docker exec -it pytorch_docker /bin/bash -c '"'"'bash --login -c -i '"'"'"'"'"'"'"'"'true && source ~/.bashrc && export OMP_NUM_THREADS=1 PYTHONWARNINGS=ignore && uptime'"'"'"'"'"'"'"'"''"'"' '
Error: No such container: pytorch_docker
Shared connection to 3.226.253.119 closed.
2020-08-12 20:19:06,689 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Got remote shell [LogTimer=302102ms]
2020-08-12 20:19:06,690 INFO log_timer.py:27 -- NodeUpdater: i-0729c7a86355d5ff8: Applied config 6b5fc8ee8c5dcdf3cfabe0bf90ba4e844f65a7c9 [LogTimer=302103ms]
2020-08-12 20:19:06,690 ERROR updater.py:88 -- NodeUpdater: i-0729c7a86355d5ff8: Error executing: Unable to connect to node
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 76, in run
self.do_update()
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 232, in do_update
self.wait_ready(deadline)
File "/home/richard/improbable/vanillas/ray/python/ray/autoscaler/updater.py", line 224, in wait_ready
assert False, "Unable to connect to node"
AssertionError: Unable to connect to node
2020-08-12 20:19:06,962 ERROR commands.py:650 -- get_or_create_head_node: Updating 3.226.253.119 failed
2020-08-12 20:19:07,002 INFO log_timer.py:27 -- AWSNodeProvider: Set tag ray-node-status=update-failed on ['i-0729c7a86355d5ff8'] [LogTimer=312ms]
|
AssertionError
|
def create_or_update_cluster(config_file: str,
override_min_workers: Optional[int],
override_max_workers: Optional[int],
no_restart: bool,
restart_only: bool,
yes: bool,
override_cluster_name: Optional[str],
no_config_cache: bool,
dump_command_output: bool = True,
use_login_shells: bool = True) -> None:
"""Create or updates an autoscaling Ray cluster from a config json."""
set_using_login_shells(use_login_shells)
cmd_output_util.set_output_redirected(not dump_command_output)
if use_login_shells:
cli_logger.warning(
"Commands running under a login shell can produce more "
"output than special processing can handle.")
cli_logger.warning(
"Thus, the output from subcommands will be logged as is.")
cli_logger.warning(
"Consider using {}, {}.", cf.bold("--use-normal-shells"),
cf.underlined("if you tested your workflow and it is compatible"))
cli_logger.newline()
cli_logger.detect_colors()
def handle_yaml_error(e):
cli_logger.error("Cluster config invalid\\n")
cli_logger.error("Failed to load YAML file " + cf.bold("{}"),
config_file)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("PyYAML error:"):
cli_logger.error(e)
cli_logger.abort()
try:
config = yaml.safe_load(open(config_file).read())
except FileNotFoundError:
cli_logger.abort(
"Provided cluster configuration file ({}) does not exist",
cf.bold(config_file))
except yaml.parser.ParserError as e:
handle_yaml_error(e)
except yaml.scanner.ScannerError as e:
handle_yaml_error(e)
# todo: validate file_mounts, ssh keys, etc.
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
cli_logger.abort(
"Unknown provider type " + cf.bold("{}") + "\\n"
"Available providers are: {}", config["provider"]["type"],
cli_logger.render_list([
k for k in NODE_PROVIDERS.keys()
if NODE_PROVIDERS[k] is not None
]))
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
cli_logger.success("Cluster configuration valid\\n")
printed_overrides = False
def handle_cli_override(key, override):
if override is not None:
if key in config:
nonlocal printed_overrides
printed_overrides = True
cli_logger.warning(
"`{}` override provided on the command line.\\n"
" Using " + cf.bold("{}") + cf.dimmed(
" [configuration file has " + cf.bold("{}") + "]"),
key, override, config[key])
config[key] = override
handle_cli_override("min_workers", override_min_workers)
handle_cli_override("max_workers", override_max_workers)
handle_cli_override("cluster_name", override_cluster_name)
if printed_overrides:
cli_logger.newline()
cli_logger.labeled_value("Cluster", config["cluster_name"])
# disable the cli_logger here if needed
# because it only supports aws
if config["provider"]["type"] != "aws":
cli_logger.old_style = True
cli_logger.newline()
config = _bootstrap_config(config, no_config_cache)
if config["provider"]["type"] != "aws":
cli_logger.old_style = False
try_logging_config(config)
get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name)
|
def create_or_update_cluster(
config_file: str, override_min_workers: Optional[int],
override_max_workers: Optional[int], no_restart: bool,
restart_only: bool, yes: bool, override_cluster_name: Optional[str],
no_config_cache: bool, dump_command_output: bool,
use_login_shells: bool) -> None:
"""Create or updates an autoscaling Ray cluster from a config json."""
set_using_login_shells(use_login_shells)
cmd_output_util.set_output_redirected(not dump_command_output)
if use_login_shells:
cli_logger.warning(
"Commands running under a login shell can produce more "
"output than special processing can handle.")
cli_logger.warning(
"Thus, the output from subcommands will be logged as is.")
cli_logger.warning(
"Consider using {}, {}.", cf.bold("--use-normal-shells"),
cf.underlined("if you tested your workflow and it is compatible"))
cli_logger.newline()
cli_logger.detect_colors()
def handle_yaml_error(e):
cli_logger.error("Cluster config invalid\\n")
cli_logger.error("Failed to load YAML file " + cf.bold("{}"),
config_file)
cli_logger.newline()
with cli_logger.verbatim_error_ctx("PyYAML error:"):
cli_logger.error(e)
cli_logger.abort()
try:
config = yaml.safe_load(open(config_file).read())
except FileNotFoundError:
cli_logger.abort(
"Provided cluster configuration file ({}) does not exist",
cf.bold(config_file))
except yaml.parser.ParserError as e:
handle_yaml_error(e)
except yaml.scanner.ScannerError as e:
handle_yaml_error(e)
# todo: validate file_mounts, ssh keys, etc.
importer = NODE_PROVIDERS.get(config["provider"]["type"])
if not importer:
cli_logger.abort(
"Unknown provider type " + cf.bold("{}") + "\\n"
"Available providers are: {}", config["provider"]["type"],
cli_logger.render_list([
k for k in NODE_PROVIDERS.keys()
if NODE_PROVIDERS[k] is not None
]))
raise NotImplementedError("Unsupported provider {}".format(
config["provider"]))
cli_logger.success("Cluster configuration valid\\n")
printed_overrides = False
def handle_cli_override(key, override):
if override is not None:
if key in config:
nonlocal printed_overrides
printed_overrides = True
cli_logger.warning(
"`{}` override provided on the command line.\\n"
" Using " + cf.bold("{}") + cf.dimmed(
" [configuration file has " + cf.bold("{}") + "]"),
key, override, config[key])
config[key] = override
handle_cli_override("min_workers", override_min_workers)
handle_cli_override("max_workers", override_max_workers)
handle_cli_override("cluster_name", override_cluster_name)
if printed_overrides:
cli_logger.newline()
cli_logger.labeled_value("Cluster", config["cluster_name"])
# disable the cli_logger here if needed
# because it only supports aws
if config["provider"]["type"] != "aws":
cli_logger.old_style = True
cli_logger.newline()
config = _bootstrap_config(config, no_config_cache)
if config["provider"]["type"] != "aws":
cli_logger.old_style = False
try_logging_config(config)
get_or_create_head_node(config, config_file, no_restart, restart_only, yes,
override_cluster_name)
|
[{'piece_type': 'error message', 'piece_content': '(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py\\nTraceback (most recent call last):\\nFile "/Users/alex/miniconda3/bin/ray", line 11, in <module>\\nload_entry_point(\\'ray\\', \\'console_scripts\\', \\'ray\\')()\\nFile "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main\\nreturn cli()\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit\\nTrue, cluster_name, False)\\nTypeError: create_or_update_cluster() missing 2 required positional arguments: \\'dump_command_output\\' and \\'use_login_shells\\''}]
|
(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py
Traceback (most recent call last):
File "/Users/alex/miniconda3/bin/ray", line 11, in <module>
load_entry_point('ray', 'console_scripts', 'ray')()
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main
return cli()
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit
True, cluster_name, False)
TypeError: create_or_update_cluster() missing 2 required positional arguments: 'dump_command_output' and 'use_login_shells'
|
TypeError
|
def submit(cluster_config_file, screen, tmux, stop, start, cluster_name,
port_forward, script, args, script_args, log_new_style, log_color,
verbose):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.old_style = not log_new_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
set_output_redirected(False)
cli_logger.doassert(not (screen and tmux),
"`{}` and `{}` are incompatible.", cf.bold("--screen"),
cf.bold("--tmux"))
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\\n"
"Example: `{2}`", cf.bold("--args"), cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.",
cf.bold("--args"))
cli_logger.warning("Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
cli_logger.newline()
cli_logger.old_warning(
logger,
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead.")
if start:
create_or_update_cluster(
config_file=cluster_config_file,
override_min_workers=None,
override_max_workers=None,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=cluster_name,
no_config_cache=False,
dump_command_output=True,
use_login_shells=True)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward)
|
def submit(cluster_config_file, screen, tmux, stop, start, cluster_name,
port_forward, script, args, script_args, log_new_style, log_color,
verbose):
"""Uploads and runs a script on the specified cluster.
The script is automatically synced to the following location:
os.path.join("~", os.path.basename(script))
Example:
>>> ray submit [CLUSTER.YAML] experiment.py -- --smoke-test
"""
cli_logger.old_style = not log_new_style
cli_logger.color_mode = log_color
cli_logger.verbosity = verbose
set_output_redirected(False)
cli_logger.doassert(not (screen and tmux),
"`{}` and `{}` are incompatible.", cf.bold("--screen"),
cf.bold("--tmux"))
cli_logger.doassert(
not (script_args and args),
"`{0}` and `{1}` are incompatible. Use only `{1}`.\\n"
"Example: `{2}`", cf.bold("--args"), cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
assert not (screen and tmux), "Can specify only one of `screen` or `tmux`."
assert not (script_args and args), "Use -- --arg1 --arg2 for script args."
if args:
cli_logger.warning(
"`{}` is deprecated and will be removed in the future.",
cf.bold("--args"))
cli_logger.warning("Use `{}` instead. Example: `{}`.",
cf.bold("-- <args ...>"),
cf.bold("ray submit script.py -- --arg=123 --flag"))
cli_logger.newline()
cli_logger.old_warning(
logger,
"ray submit [yaml] [script.py] --args=... is deprecated and "
"will be removed in a future version of Ray. Use "
"`ray submit [yaml] script.py -- --arg1 --arg2` instead.")
if start:
create_or_update_cluster(cluster_config_file, None, None, False, False,
True, cluster_name, False)
target = os.path.basename(script)
target = os.path.join("~", target)
rsync(cluster_config_file, script, target, cluster_name, down=False)
command_parts = ["python", target]
if script_args:
command_parts += list(script_args)
elif args is not None:
command_parts += [args]
port_forward = [(port, port) for port in list(port_forward)]
cmd = " ".join(command_parts)
exec_cluster(
cluster_config_file,
cmd=cmd,
run_env="docker",
screen=screen,
tmux=tmux,
stop=stop,
start=False,
override_cluster_name=cluster_name,
port_forward=port_forward)
|
[{'piece_type': 'error message', 'piece_content': '(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py\\nTraceback (most recent call last):\\nFile "/Users/alex/miniconda3/bin/ray", line 11, in <module>\\nload_entry_point(\\'ray\\', \\'console_scripts\\', \\'ray\\')()\\nFile "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main\\nreturn cli()\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__\\nreturn self.main(*args, **kwargs)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main\\nrv = self.invoke(ctx)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke\\nreturn _process_result(sub_ctx.command.invoke(sub_ctx))\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke\\nreturn ctx.invoke(self.callback, **ctx.params)\\nFile "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke\\nreturn callback(*args, **kwargs)\\nFile "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit\\nTrue, cluster_name, False)\\nTypeError: create_or_update_cluster() missing 2 required positional arguments: \\'dump_command_output\\' and \\'use_login_shells\\''}]
|
(base) Alexs-MacBook-Pro-2:ray alex$ ray submit --start multi.yaml test.py
Traceback (most recent call last):
File "/Users/alex/miniconda3/bin/ray", line 11, in <module>
load_entry_point('ray', 'console_scripts', 'ray')()
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1587, in main
return cli()
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/alex/miniconda3/lib/python3.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/Users/alex/anyscale/ray/python/ray/scripts/scripts.py", line 1237, in submit
True, cluster_name, False)
TypeError: create_or_update_cluster() missing 2 required positional arguments: 'dump_command_output' and 'use_login_shells'
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.