after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def get_jinja_env():
from datetime import datetime
from ..utils import readable_size
_jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")
),
)
def format_ts(value):
if value is None or np.isnan(value):
return None
return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
_jinja_env.filters["format_ts"] = format_ts
_jinja_env.filters["readable_size"] = readable_size
return _jinja_env
|
def get_jinja_env():
from datetime import datetime
from ..utils import readable_size
_jinja_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")
),
)
def format_ts(value):
if value is None:
return None
return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
_jinja_env.filters["format_ts"] = format_ts
_jinja_env.filters["readable_size"] = readable_size
return _jinja_env
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def format_ts(value):
if value is None or np.isnan(value):
return None
return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
|
def format_ts(value):
if value is None:
return None
return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def start_execution(self, session_id, graph_key, send_addresses=None, callback=None):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: key of the execution graph
:param send_addresses: targets to send results after execution
:param callback: promise callback
"""
graph_record = self._graph_records[(session_id, graph_key)]
if send_addresses:
graph_record.send_addresses = send_addresses
# add callbacks to callback store
if callback is None:
callback = []
elif not isinstance(callback, list):
callback = [callback]
graph_record.finish_callbacks.extend(callback)
try:
del self._result_cache[(session_id, graph_key)]
except KeyError:
pass
@log_unhandled
def _wait_free_slot(*_):
return self._dispatch_ref.get_free_slot("cpu", _promise=True)
@log_unhandled
def _handle_success(*_):
self._invoke_finish_callbacks(session_id, graph_key)
@log_unhandled
def _handle_rejection(*exc):
# some error occurred...
logger.debug("Entering _handle_rejection() for graph %s", graph_key)
self._dump_execution_states()
if graph_record.stop_requested:
graph_record.stop_requested = False
if not isinstance(exc[1], ExecutionInterrupted):
exc = build_exc_info(ExecutionInterrupted)
if isinstance(exc[1], ExecutionInterrupted):
logger.warning("Execution of graph %s interrupted.", graph_key)
else:
logger.exception(
"Unexpected error occurred in executing graph %s",
graph_key,
exc_info=exc,
)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(
*exc, **dict(succeeded=False)
)
self._invoke_finish_callbacks(session_id, graph_key)
# collect target data already computed
save_sizes = dict()
for target_key in graph_record.data_targets:
if self._chunk_store.contains(session_id, target_key):
save_sizes[target_key] = self._chunk_store.get_actual_size(
session_id, target_key
)
elif spill_exists(target_key):
save_sizes[target_key] = get_spill_data_size(target_key)
# when all target data are computed, report success directly
if all(k in save_sizes for k in graph_record.data_targets):
logger.debug(
"All predecessors of graph %s already computed, call finish directly.",
graph_key,
)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
_handle_success()
else:
promise.finished().then(
lambda: self._prepare_graph_inputs(session_id, graph_key)
).then(_wait_free_slot).then(
lambda uid: self._send_calc_request(session_id, graph_key, uid)
).then(
lambda uid, sizes: self._dump_cache(session_id, graph_key, uid, sizes)
).then(_handle_success, _handle_rejection)
|
def start_execution(self, session_id, graph_key, send_addresses=None, callback=None):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: key of the execution graph
:param send_addresses: targets to send results after execution
:param callback: promise callback
"""
graph_record = self._graph_records[(session_id, graph_key)]
if send_addresses:
graph_record.send_addresses = send_addresses
# add callbacks to callback store
if callback is None:
callback = []
elif not isinstance(callback, list):
callback = [callback]
graph_record.finish_callbacks.extend(callback)
try:
del self._result_cache[(session_id, graph_key)]
except KeyError:
pass
@log_unhandled
def _wait_free_slot(*_):
return self._dispatch_ref.get_free_slot("cpu", _promise=True)
@log_unhandled
def _handle_success(*_):
self._notify_successors(session_id, graph_key)
self._invoke_finish_callbacks(session_id, graph_key)
@log_unhandled
def _handle_rejection(*exc):
# some error occurred...
logger.debug("Entering _handle_rejection() for graph %s", graph_key)
self._dump_execution_states()
if graph_record.stop_requested:
graph_record.stop_requested = False
if not isinstance(exc[1], ExecutionInterrupted):
exc = build_exc_info(ExecutionInterrupted)
if isinstance(exc[1], ExecutionInterrupted):
logger.warning("Execution of graph %s interrupted.", graph_key)
else:
logger.exception(
"Unexpected error occurred in executing graph %s",
graph_key,
exc_info=exc,
)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(
*exc, **dict(succeeded=False)
)
self._invoke_finish_callbacks(session_id, graph_key)
# collect target data already computed
save_sizes = dict()
for target_key in graph_record.data_targets:
if self._chunk_store.contains(session_id, target_key):
save_sizes[target_key] = self._chunk_store.get_actual_size(
session_id, target_key
)
elif spill_exists(target_key):
save_sizes[target_key] = get_spill_data_size(target_key)
# when all target data are computed, report success directly
if all(k in save_sizes for k in graph_record.data_targets):
logger.debug(
"All predecessors of graph %s already computed, call finish directly.",
graph_key,
)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
_handle_success()
else:
promise.finished().then(
lambda: self._prepare_graph_inputs(session_id, graph_key)
).then(_wait_free_slot).then(
lambda uid: self._send_calc_request(session_id, graph_key, uid)
).then(
lambda uid, sizes: self._dump_cache(session_id, graph_key, uid, sizes)
).then(_handle_success, _handle_rejection)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def _handle_success(*_):
self._invoke_finish_callbacks(session_id, graph_key)
|
def _handle_success(*_):
self._notify_successors(session_id, graph_key)
self._invoke_finish_callbacks(session_id, graph_key)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def _dump_cache(self, session_id, graph_key, inproc_uid, save_sizes):
"""
Dump calc results into shared cache or spill
:param session_id: session id
:param graph_key: key of the execution graph
:param inproc_uid: uid of the InProcessCacheActor
:param save_sizes: sizes of data
"""
graph_record = self._graph_records[session_id, graph_key]
chunk_keys = graph_record.chunk_targets
calc_keys = list(save_sizes.keys())
send_addresses = graph_record.send_addresses
logger.debug(
"Graph %s: Start putting %r into shared cache. Target actor uid %s.",
graph_key,
chunk_keys,
inproc_uid,
)
self._update_state(session_id, graph_key, ExecutionState.STORING)
raw_inproc_ref = self.ctx.actor_ref(inproc_uid)
inproc_ref = self.promise_ref(raw_inproc_ref)
if graph_record.stop_requested:
logger.debug("Graph %s already marked for stop, quit.", graph_key)
if (
self._daemon_ref is None
or self._daemon_ref.is_actor_process_alive(raw_inproc_ref)
) and self.ctx.has_actor(raw_inproc_ref):
logger.debug("Try remove keys for graph %s.", graph_key)
raw_inproc_ref.remove_cache(session_id, list(calc_keys), _tell=True)
logger.debug("Graph %s already marked for stop, quit.", graph_key)
raise ExecutionInterrupted
self._chunk_holder_ref.unpin_chunks(
graph_key, list(set(c.key for c in graph_record.graph)), _tell=True
)
self._dump_execution_states()
if self._daemon_ref is not None and not self._daemon_ref.is_actor_process_alive(
raw_inproc_ref
):
raise WorkerProcessStopped
def _cache_result(result_sizes):
save_sizes.update(result_sizes)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
if not send_addresses:
# no endpoints to send, dump keys into shared memory and return
logger.debug(
"Worker graph %s(%s) finished execution. Dumping results into plasma...",
graph_key,
graph_record.op_string,
)
return (
inproc_ref.dump_cache(session_id, calc_keys, _promise=True)
.then(_cache_result)
.then(lambda *_: self._notify_successors(session_id, graph_key))
)
else:
# dump keys into shared memory and send
all_addresses = [
{v} if isinstance(v, six.string_types) else set(v)
for v in send_addresses.values()
]
all_addresses = list(reduce(lambda a, b: a | b, all_addresses, set()))
logger.debug(
"Worker graph %s(%s) finished execution. Dumping results into plasma "
"while actively transferring into %r...",
graph_key,
graph_record.op_string,
all_addresses,
)
data_to_addresses = dict(
(k, v) for k, v in send_addresses.items() if k in save_sizes
)
return (
inproc_ref.dump_cache(session_id, calc_keys, _promise=True)
.then(_cache_result)
.then(lambda *_: self._notify_successors(session_id, graph_key))
.then(
lambda *_: functools.partial(
self._do_active_transfer, session_id, graph_key, data_to_addresses
)
)
)
|
def _dump_cache(self, session_id, graph_key, inproc_uid, save_sizes):
"""
Dump calc results into shared cache or spill
:param session_id: session id
:param graph_key: key of the execution graph
:param inproc_uid: uid of the InProcessCacheActor
:param save_sizes: sizes of data
"""
graph_record = self._graph_records[session_id, graph_key]
chunk_keys = graph_record.chunk_targets
calc_keys = list(save_sizes.keys())
send_addresses = graph_record.send_addresses
logger.debug(
"Graph %s: Start putting %r into shared cache. Target actor uid %s.",
graph_key,
chunk_keys,
inproc_uid,
)
self._update_state(session_id, graph_key, ExecutionState.STORING)
raw_inproc_ref = self.ctx.actor_ref(inproc_uid)
inproc_ref = self.promise_ref(raw_inproc_ref)
if graph_record.stop_requested:
logger.debug("Graph %s already marked for stop, quit.", graph_key)
if (
self._daemon_ref is None
or self._daemon_ref.is_actor_process_alive(raw_inproc_ref)
) and self.ctx.has_actor(raw_inproc_ref):
logger.debug("Try remove keys for graph %s.", graph_key)
raw_inproc_ref.remove_cache(session_id, list(calc_keys), _tell=True)
logger.debug("Graph %s already marked for stop, quit.", graph_key)
raise ExecutionInterrupted
self._chunk_holder_ref.unpin_chunks(
graph_key, list(set(c.key for c in graph_record.graph)), _tell=True
)
self._dump_execution_states()
if self._daemon_ref is not None and not self._daemon_ref.is_actor_process_alive(
raw_inproc_ref
):
raise WorkerProcessStopped
def _cache_result(*_):
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
if not send_addresses:
# no endpoints to send, dump keys into shared memory and return
logger.debug(
"Worker graph %s(%s) finished execution. Dumping results into plasma...",
graph_key,
graph_record.op_string,
)
return inproc_ref.dump_cache(session_id, calc_keys, _promise=True).then(
_cache_result
)
else:
# dump keys into shared memory and send
all_addresses = [
{v} if isinstance(v, six.string_types) else set(v)
for v in send_addresses.values()
]
all_addresses = list(reduce(lambda a, b: a | b, all_addresses, set()))
logger.debug(
"Worker graph %s(%s) finished execution. Dumping results into plasma "
"while actively transferring into %r...",
graph_key,
graph_record.op_string,
all_addresses,
)
data_to_addresses = dict(
(k, v) for k, v in send_addresses.items() if k in save_sizes
)
return (
inproc_ref.dump_cache(session_id, calc_keys, _promise=True)
.then(save_sizes.update)
.then(
lambda *_: functools.partial(
self._do_active_transfer, session_id, graph_key, data_to_addresses
)
)
.then(_cache_result)
)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def _cache_result(result_sizes):
save_sizes.update(result_sizes)
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
|
def _cache_result(*_):
self._result_cache[(session_id, graph_key)] = GraphResultRecord(save_sizes)
|
https://github.com/mars-project/mars/issues/496
|
500 GET /worker?endpoint
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/tornado/web.py", line 1592, in _execute
result = yield result
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1133, in run
value = future.result()
File "/opt/conda/lib/python3.6/site-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/opt/conda/lib/python3.6/site-packages/bokeh/server/views/doc_handler.py", line 27, in get
template_variables=session.document.template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/server.py", line 230, in server_html_page_for_session
template=template, template_variables=template_variables)
File "/opt/conda/lib/python3.6/site-packages/bokeh/embed/elements.py", line 133, in html_page_for_render_items
html = template.render(context)
File "/opt/conda/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 76, in render
return original_render(self, *args, **kwargs)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 1008, in render
return self.environment.handle_exception(exc_info, True)
File "/opt/conda/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/conda/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 1, in top-level template code
{% extends "base.html" %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/base.html", line 65, in top-level template code
{% block body %}{% endblock %}
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/templates/worker_detail.html", line 252, in block "body"
<td>{{ stats['min_est_finish_time'] | format_ts }}</td>
File "/home/admin/work/public-mars-0.2.0b1-cupid.zip/mars/web/server.py", line 49, in format_ts
return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
ValueError: Invalid value NaN (not a number)
500 GET /worker?endpoint=10.101.216.148:53630 (10.101.202.142) 137.45ms
|
ValueError
|
def _get_chunk_index_min_max(cls, df, index_type, axis):
index = getattr(df, index_type)
chunk_index_min_max = []
for i in range(df.chunk_shape[axis]):
chunk_idx = [0, 0]
chunk_idx[axis] = i
chunk = df.cix[tuple(chunk_idx)]
chunk_index = getattr(chunk, index_type)
min_val = chunk_index.min_val
min_val_close = chunk_index.min_val_close
max_val = chunk_index.max_val
max_val_close = chunk_index.max_val_close
if min_val is None or max_val is None:
return
chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))
if index.is_monotonic_decreasing:
return list(reversed(chunk_index_min_max)), False
if cls._check_overlap(chunk_index_min_max):
return
return chunk_index_min_max, True
|
def _get_chunk_index_min_max(cls, df, index_type, axis):
index = getattr(df, index_type)
if not index.is_monotonic_increasing_or_decreasing and df.chunk_shape[axis] > 1:
return
chunk_index_min_max = []
for i in range(df.chunk_shape[axis]):
chunk_idx = [0, 0]
chunk_idx[axis] = i
chunk = df.cix[tuple(chunk_idx)]
chunk_index = getattr(chunk, index_type)
min_val = chunk_index.min_val
min_val_close = chunk_index.min_val_close
max_val = chunk_index.max_val
max_val_close = chunk_index.max_val_close
if min_val is None or max_val is None:
return
chunk_index_min_max.append((min_val, min_val_close, max_val, max_val_close))
if index.is_monotonic_decreasing:
return list(reversed(chunk_index_min_max)), False
if cls._check_overlap(chunk_index_min_max):
return
return chunk_index_min_max, True
|
https://github.com/mars-project/mars/issues/428
|
Traceback (most recent call last):
File "/Users/hetao/mars/mars/tiles.py", line 111, in _dispatch
handler = self._handlers[op_cls]
KeyError: <class 'mars.dataframe.expressions.arithmetic.add.DataFrameAdd'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/hetao/mars/mars/core.py", line 554, in execute
return session.run(self, **kw)
File "/Users/hetao/mars/mars/session.py", line 115, in run
result = self._sess.run(*tileables, **kw)
File "/Users/hetao/mars/mars/session.py", line 56, in run
res = self._executor.execute_tileables(tileables, **kw)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/executor.py", line 499, in execute_tileables
tileable.tiles()
File "/Users/hetao/mars/mars/core.py", line 474, in tiles
return handler.tiles(self)
File "/Users/hetao/mars/mars/tiles.py", line 174, in tiles
tiled = self._dispatch(node.op)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/tiles.py", line 116, in _dispatch
return op_cls.tile(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 527, in tile
return cls._tile_both_dataframes(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 516, in _tile_both_dataframes
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 461, in _gen_out_chunks_with_all_shuffle
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))
File "/Users/hetao/mars/mars/core.py", line 650, in new_chunk
return self.new_chunks(inputs, kws=kws, **kw)[0]
File "/Users/hetao/mars/mars/core.py", line 644, in new_chunks
return self._new_chunks(inputs, kws=kws, **kwargs)
File "/Users/hetao/mars/mars/core.py", line 618, in _new_chunks
chunk = self._create_chunk(j, index, **create_chunk_kw)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 188, in _create_chunk
all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 189, in <listcomp>
if c.index[0] == index_align_map_chunks[0].index[0]]
IndexError: list index out of range
|
KeyError
|
def _tile_both_dataframes(cls, op):
# if both of the inputs are DataFrames, axis is just ignored
left, right = op.inputs
df = op.outputs[0]
nsplits = [[], []]
splits = _MinMaxSplitInfo()
# first, we decide the chunk size on each axis
# we perform the same logic for both index and columns
for axis, index_type in enumerate(["index_value", "columns"]):
if not cls._need_shuffle_on_axis(left, right, index_type, axis):
left_chunk_index_min_max = cls._get_chunk_index_min_max(
left, index_type, axis
)
right_chunk_index_min_max = cls._get_chunk_index_min_max(
right, index_type, axis
)
# no need to do shuffle on this axis
if (
len(left_chunk_index_min_max[0]) == 1
and len(right_chunk_index_min_max[0]) == 1
):
# both left and right has only 1 chunk
left_splits, right_splits = (
[left_chunk_index_min_max[0]],
[right_chunk_index_min_max[0]],
)
else:
left_splits, right_splits = split_monotonic_index_min_max(
*(left_chunk_index_min_max + right_chunk_index_min_max)
)
left_increase = left_chunk_index_min_max[1]
right_increase = right_chunk_index_min_max[1]
splits[axis] = _AxisMinMaxSplitInfo(
left_splits, left_increase, right_splits, right_increase
)
nsplits[axis].extend(np.nan for _ in itertools.chain(*left_splits))
else:
# do shuffle
left_chunk_size = left.chunk_shape[axis]
right_chunk_size = right.chunk_shape[axis]
out_chunk_size = max(left_chunk_size, right_chunk_size)
nsplits[axis].extend(np.nan for _ in range(out_chunk_size))
out_shape = tuple(len(ns) for ns in nsplits)
if splits.all_axes_can_split():
# no shuffle for all axes
out_chunks = cls._gen_out_chunks_without_shuffle(
op, splits, out_shape, left, right
)
elif splits.one_axis_can_split():
# one axis needs shuffle
out_chunks = cls._gen_out_chunks_with_one_shuffle(
op, splits, out_shape, left, right
)
else:
# all axes need shuffle
assert splits.no_axis_can_split()
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
def _tile_both_dataframes(cls, op):
# if both of the inputs are DataFrames, axis is just ignored
left, right = op.inputs
df = op.outputs[0]
nsplits = [[], []]
splits = _MinMaxSplitInfo()
# first, we decide the chunk size on each axis
# we perform the same logic for both index and columns
for axis, index_type in enumerate(["index_value", "columns"]):
# if both of the indexes are monotonic increasing or decreasing
left_chunk_index_min_max = cls._get_chunk_index_min_max(left, index_type, axis)
right_chunk_index_min_max = cls._get_chunk_index_min_max(
right, index_type, axis
)
if (
left_chunk_index_min_max is not None
and right_chunk_index_min_max is not None
):
# no need to do shuffle on this axis
if (
len(left_chunk_index_min_max[0]) == 1
and len(right_chunk_index_min_max[0]) == 1
):
# both left and right has only 1 chunk
left_splits, right_splits = (
[left_chunk_index_min_max[0]],
[right_chunk_index_min_max[0]],
)
else:
left_splits, right_splits = split_monotonic_index_min_max(
*(left_chunk_index_min_max + right_chunk_index_min_max)
)
left_increase = left_chunk_index_min_max[1]
right_increase = right_chunk_index_min_max[1]
splits[axis] = _AxisMinMaxSplitInfo(
left_splits, left_increase, right_splits, right_increase
)
nsplits[axis].extend(np.nan for _ in itertools.chain(*left_splits))
else:
# do shuffle
left_chunk_size = left.chunk_shape[axis]
right_chunk_size = right.chunk_shape[axis]
out_chunk_size = max(left_chunk_size, right_chunk_size)
nsplits[axis].extend(np.nan for _ in range(out_chunk_size))
out_shape = tuple(len(ns) for ns in nsplits)
if splits.all_axes_can_split():
# no shuffle for all axes
out_chunks = cls._gen_out_chunks_without_shuffle(
op, splits, out_shape, left, right
)
elif splits.one_axis_can_split():
# one axis needs shuffle
out_chunks = cls._gen_out_chunks_with_one_shuffle(
op, splits, out_shape, left, right
)
else:
# all axes need shuffle
assert splits.no_axis_can_split()
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
new_op = op.copy()
return new_op.new_dataframes(
op.inputs,
df.shape,
nsplits=tuple(tuple(ns) for ns in nsplits),
chunks=out_chunks,
dtypes=df.dtypes,
index_value=df.index_value,
columns_value=df.columns,
)
|
https://github.com/mars-project/mars/issues/428
|
Traceback (most recent call last):
File "/Users/hetao/mars/mars/tiles.py", line 111, in _dispatch
handler = self._handlers[op_cls]
KeyError: <class 'mars.dataframe.expressions.arithmetic.add.DataFrameAdd'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/hetao/mars/mars/core.py", line 554, in execute
return session.run(self, **kw)
File "/Users/hetao/mars/mars/session.py", line 115, in run
result = self._sess.run(*tileables, **kw)
File "/Users/hetao/mars/mars/session.py", line 56, in run
res = self._executor.execute_tileables(tileables, **kw)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/executor.py", line 499, in execute_tileables
tileable.tiles()
File "/Users/hetao/mars/mars/core.py", line 474, in tiles
return handler.tiles(self)
File "/Users/hetao/mars/mars/tiles.py", line 174, in tiles
tiled = self._dispatch(node.op)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/tiles.py", line 116, in _dispatch
return op_cls.tile(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 527, in tile
return cls._tile_both_dataframes(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 516, in _tile_both_dataframes
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 461, in _gen_out_chunks_with_all_shuffle
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))
File "/Users/hetao/mars/mars/core.py", line 650, in new_chunk
return self.new_chunks(inputs, kws=kws, **kw)[0]
File "/Users/hetao/mars/mars/core.py", line 644, in new_chunks
return self._new_chunks(inputs, kws=kws, **kwargs)
File "/Users/hetao/mars/mars/core.py", line 618, in _new_chunks
chunk = self._create_chunk(j, index, **create_chunk_kw)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 188, in _create_chunk
all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 189, in <listcomp>
if c.index[0] == index_align_map_chunks[0].index[0]]
IndexError: list index out of range
|
KeyError
|
def _calc_properties(cls, x1, x2):
dtypes = columns = index = None
index_shape = column_shape = np.nan
if x1.columns.key == x2.columns.key:
dtypes = x1.dtypes
column_shape = len(dtypes)
columns = copy.copy(x1.columns)
columns.value.should_be_monotonic = True
elif x1.dtypes is not None and x2.dtypes is not None:
dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator)
column_shape = len(dtypes)
columns = parse_index(dtypes.index, store_data=True)
columns.value.should_be_monotonic = True
if x1.index_value.key == x2.index_value.key:
index = copy.copy(x1.index_value)
index.value.should_be_monotonic = True
index_shape = x1.shape[0]
elif x1.index_value is not None and x2.index_value is not None:
index = infer_index_value(x1.index_value, x2.index_value, cls._operator)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
|
def _calc_properties(cls, x1, x2):
dtypes = columns = index = None
index_shape = column_shape = np.nan
if x1.dtypes is not None and x2.dtypes is not None:
dtypes = infer_dtypes(x1.dtypes, x2.dtypes, cls._operator)
column_shape = len(dtypes)
columns = parse_index(dtypes.index, store_data=True)
columns.value.should_be_monotonic = True
if x1.index_value is not None and x2.index_value is not None:
index = infer_index_value(x1.index_value, x2.index_value, cls._operator)
index.value.should_be_monotonic = True
if index.key == x1.index_value.key == x2.index_value.key and (
not np.isnan(x1.shape[0]) or not np.isnan(x2.shape[0])
):
index_shape = x1.shape[0] if not np.isnan(x1.shape[0]) else x2.shape[0]
return {
"shape": (index_shape, column_shape),
"dtypes": dtypes,
"columns_value": columns,
"index_value": index,
}
|
https://github.com/mars-project/mars/issues/428
|
Traceback (most recent call last):
File "/Users/hetao/mars/mars/tiles.py", line 111, in _dispatch
handler = self._handlers[op_cls]
KeyError: <class 'mars.dataframe.expressions.arithmetic.add.DataFrameAdd'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/hetao/mars/mars/core.py", line 554, in execute
return session.run(self, **kw)
File "/Users/hetao/mars/mars/session.py", line 115, in run
result = self._sess.run(*tileables, **kw)
File "/Users/hetao/mars/mars/session.py", line 56, in run
res = self._executor.execute_tileables(tileables, **kw)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/executor.py", line 499, in execute_tileables
tileable.tiles()
File "/Users/hetao/mars/mars/core.py", line 474, in tiles
return handler.tiles(self)
File "/Users/hetao/mars/mars/tiles.py", line 174, in tiles
tiled = self._dispatch(node.op)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/tiles.py", line 116, in _dispatch
return op_cls.tile(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 527, in tile
return cls._tile_both_dataframes(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 516, in _tile_both_dataframes
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 461, in _gen_out_chunks_with_all_shuffle
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))
File "/Users/hetao/mars/mars/core.py", line 650, in new_chunk
return self.new_chunks(inputs, kws=kws, **kw)[0]
File "/Users/hetao/mars/mars/core.py", line 644, in new_chunks
return self._new_chunks(inputs, kws=kws, **kwargs)
File "/Users/hetao/mars/mars/core.py", line 618, in _new_chunks
chunk = self._create_chunk(j, index, **create_chunk_kw)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 188, in _create_chunk
all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 189, in <listcomp>
if c.index[0] == index_align_map_chunks[0].index[0]]
IndexError: list index out of range
|
KeyError
|
def hash_index(index, size):
def func(x, size):
return mmh_hash(bytes(x)) % size
f = functools.partial(func, size=size)
idx_to_grouped = dict(index.groupby(index.map(f)).items())
return [idx_to_grouped.get(i, list()) for i in range(size)]
|
def hash_index(index, size):
def func(x, size):
return mmh_hash(bytes(x)) % size
f = functools.partial(func, size=size)
grouped = sorted(index.groupby(index.map(f)).items(), key=operator.itemgetter(0))
return [g[1] for g in grouped]
|
https://github.com/mars-project/mars/issues/428
|
Traceback (most recent call last):
File "/Users/hetao/mars/mars/tiles.py", line 111, in _dispatch
handler = self._handlers[op_cls]
KeyError: <class 'mars.dataframe.expressions.arithmetic.add.DataFrameAdd'>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/hetao/mars/mars/core.py", line 554, in execute
return session.run(self, **kw)
File "/Users/hetao/mars/mars/session.py", line 115, in run
result = self._sess.run(*tileables, **kw)
File "/Users/hetao/mars/mars/session.py", line 56, in run
res = self._executor.execute_tileables(tileables, **kw)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/executor.py", line 499, in execute_tileables
tileable.tiles()
File "/Users/hetao/mars/mars/core.py", line 474, in tiles
return handler.tiles(self)
File "/Users/hetao/mars/mars/tiles.py", line 174, in tiles
tiled = self._dispatch(node.op)
File "/Users/hetao/mars/mars/utils.py", line 364, in _wrapped
return func(*args, **kwargs)
File "/Users/hetao/mars/mars/tiles.py", line 116, in _dispatch
return op_cls.tile(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 527, in tile
return cls._tile_both_dataframes(op)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 516, in _tile_both_dataframes
out_chunks = cls._gen_out_chunks_with_all_shuffle(op, out_shape, left, right)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 461, in _gen_out_chunks_with_all_shuffle
reduce_op.new_chunk([proxy_chunk], shape=(np.nan, np.nan), index=out_idx))
File "/Users/hetao/mars/mars/core.py", line 650, in new_chunk
return self.new_chunks(inputs, kws=kws, **kw)[0]
File "/Users/hetao/mars/mars/core.py", line 644, in new_chunks
return self._new_chunks(inputs, kws=kws, **kwargs)
File "/Users/hetao/mars/mars/core.py", line 618, in _new_chunks
chunk = self._create_chunk(j, index, **create_chunk_kw)
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 188, in _create_chunk
all_dtypes = [c.op.column_shuffle_segments[index[1]] for c in index_align_map_chunks
File "/Users/hetao/mars/mars/dataframe/expressions/arithmetic/core.py", line 189, in <listcomp>
if c.index[0] == index_align_map_chunks[0].index[0]]
IndexError: list index out of range
|
KeyError
|
def prepare_graph(self, compose=True):
"""
Tile and compose tileable graph into chunk graph
:param compose: if True, do compose after tiling
"""
tileable_graph = deserialize_graph(self._serialized_tileable_graph)
self._tileable_graph_cache = tileable_graph
logger.debug(
"Begin preparing graph %s with %d tileables to chunk graph.",
self._graph_key,
len(tileable_graph),
)
if not self._target_tileable_chunk_ops:
for tn in tileable_graph:
if not tileable_graph.count_successors(tn):
self._target_tileable_chunk_ops[tn.key] = set()
self._target_tileable_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tileable_key_opid_to_tiled = self._tileable_key_opid_to_tiled
for t in tileable_graph:
self._tileable_key_to_opid[t.key] = t.op.id
if (t.key, t.op.id) not in tileable_key_opid_to_tiled:
continue
t._chunks = [
key_to_chunk[k] for k in [tileable_key_opid_to_tiled[(t.key, t.op.id)][-1]]
]
tq = deque()
for t in tileable_graph:
if t.inputs and not all(
(ti.key, ti.op.id) in tileable_key_opid_to_tiled for ti in t.inputs
):
continue
tq.append(t)
while tq:
tileable = tq.popleft()
if (
not tileable.is_coarse()
or (tileable.key, tileable.op.id) in tileable_key_opid_to_tiled
):
continue
inputs = [
tileable_key_opid_to_tiled[(it.key, it.op.id)][-1]
for it in tileable.inputs or ()
]
op = tileable.op.copy()
_ = op.new_tileables(
inputs, # noqa: F841
kws=[o.params for o in tileable.op.outputs],
output_limit=len(tileable.op.outputs),
**tileable.extra_params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tileable.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
if isinstance(to_tile.op, Fetch):
td = self.tile_fetch_tileable(tileable)
else:
td = self._graph_analyze_pool.submit(
handler.dispatch, to_tile
).result()
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tileable_key_opid_to_tiled[(t.key, t.op.id)].append(tiled)
# add chunks to fine grained graph
q = deque(
[
tiled_c if isinstance(tiled_c, ChunkData) else tiled_c.data
for tiled_c in tiled.chunks
]
)
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tileable_graph.successors(t):
if any(
(t.key, t.op.id) not in tileable_key_opid_to_tiled
for t in succ.inputs
):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk, topid in tileable_key_opid_to_tiled:
if tk not in self._target_tileable_chunk_ops:
continue
for n in [
c.data for t in tileable_key_opid_to_tiled[(tk, topid)] for c in t.chunks
]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.update(n.op.outputs)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
elif isinstance(n.op, Fetch):
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk, topid in tileable_key_opid_to_tiled:
if tk not in self._target_tileable_chunk_ops:
continue
for n in tileable_key_opid_to_tiled[(tk, topid)][-1].chunks:
self._terminal_chunk_op_tileable[n.op.key].add(tk)
self._target_tileable_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
def prepare_graph(self, compose=True):
"""
Tile and compose tensor graph into chunk graph
:param compose: if True, do compose after tiling
"""
tileable_graph = deserialize_graph(self._serialized_tileable_graph)
self._tileable_graph_cache = tileable_graph
logger.debug(
"Begin preparing graph %s with %d tensors to chunk graph.",
self._graph_key,
len(tileable_graph),
)
# mark target tensor steps
if not self._target_tileable_chunk_ops:
for tn in tileable_graph:
if not tileable_graph.count_successors(tn):
self._target_tileable_chunk_ops[tn.key] = set()
self._target_tileable_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tileable_key_opid_to_tiled = self._tileable_key_opid_to_tiled
for t in tileable_graph:
self._tileable_key_to_opid[t.key] = t.op.id
if (t.key, t.op.id) not in tileable_key_opid_to_tiled:
continue
t._chunks = [
key_to_chunk[k] for k in [tileable_key_opid_to_tiled[(t.key, t.op.id)][-1]]
]
tq = deque()
for t in tileable_graph:
if t.inputs and not all(
(ti.key, ti.op.id) in tileable_key_opid_to_tiled for ti in t.inputs
):
continue
tq.append(t)
while tq:
tileable = tq.popleft()
if (
not tileable.is_coarse()
or (tileable.key, tileable.op.id) in tileable_key_opid_to_tiled
):
continue
inputs = [
tileable_key_opid_to_tiled[(it.key, it.op.id)][-1]
for it in tileable.inputs or ()
]
op = tileable.op.copy()
_ = op.new_tileables(
inputs, # noqa: F841
kws=[o.params for o in tileable.op.outputs],
output_limit=len(tileable.op.outputs),
**tileable.extra_params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tileable.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
if isinstance(to_tile.op, Fetch):
td = self.tile_fetch_tileable(tileable)
else:
td = self._graph_analyze_pool.submit(
handler.dispatch, to_tile
).result()
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tileable_key_opid_to_tiled[(t.key, t.op.id)].append(tiled)
# add chunks to fine grained graph
q = deque(
[
tiled_c if isinstance(tiled_c, ChunkData) else tiled_c.data
for tiled_c in tiled.chunks
]
)
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tileable_graph.successors(t):
if any(
(t.key, t.op.id) not in tileable_key_opid_to_tiled
for t in succ.inputs
):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk, topid in tileable_key_opid_to_tiled:
if tk not in self._target_tileable_chunk_ops:
continue
for n in [
c.data for t in tileable_key_opid_to_tiled[(tk, topid)] for c in t.chunks
]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.update(n.op.outputs)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
elif isinstance(n.op, Fetch):
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk, topid in tileable_key_opid_to_tiled:
if tk not in self._target_tileable_chunk_ops:
continue
for n in tileable_key_opid_to_tiled[(tk, topid)][-1].chunks:
self._terminal_chunk_op_tileable[n.op.key].add(tk)
self._target_tileable_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
https://github.com/mars-project/mars/issues/406
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 335, in execute_graph
self.prepare_graph(compose=compose)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 475, in prepare_graph
td = self.tile_fetch_tileable(tileable)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 936, in tile_fetch_tileable
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
File "mars/actors/core.pyx", line 65, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 37, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 657, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 682, in mars.actors.pool.gevent_pool.Communicator._send_local
File "src/gevent/greenlet.py", line 709, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 317, in gevent._greenlet.Greenlet._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 72, in mars.actors.pool.gevent_pool.MessageContext.result
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 928, in build_fetch_graph
return serialize_graph(graph)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 217, in serialize_graph
ser_graph = graph.to_pb().SerializeToString()
File "mars/graph.pyx", line 431, in mars.graph.DirectedGraph.to_pb
File "mars/serialize/core.pyx", line 563, in mars.serialize.core.Serializable.to_pb
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 476, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 447, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 467, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 485, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 338, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
TypeError: nan has type float, but expected one of: int, long
2019-05-21T10:36:49Z <Greenlet at 0xc20a0d598: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc20d32138>> failed with TypeError
|
TypeError
|
def build_fetch_graph(self, tileable_key):
"""
Convert single tileable node to tiled fetch tileable node and
put into a graph which only contains one tileable node
:param tileable_key: the key of tileable node
"""
tileable = self._get_tileable_by_key(tileable_key)
graph = DAG()
new_tileable = build_fetch_tileable(tileable)
graph.add_node(new_tileable)
return serialize_graph(graph)
|
def build_fetch_graph(self, tileable_key):
"""
Convert single tensor to tiled fetch tensor and put into a graph which only contains one tensor
:param tileable_key: the key of tensor
"""
tileable = self._get_tileable_by_key(tileable_key)
graph = DAG()
new_tileable = build_fetch_tileable(tileable)
graph.add_node(new_tileable)
return serialize_graph(graph)
|
https://github.com/mars-project/mars/issues/406
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 335, in execute_graph
self.prepare_graph(compose=compose)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 475, in prepare_graph
td = self.tile_fetch_tileable(tileable)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 936, in tile_fetch_tileable
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
File "mars/actors/core.pyx", line 65, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 37, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 657, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 682, in mars.actors.pool.gevent_pool.Communicator._send_local
File "src/gevent/greenlet.py", line 709, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 317, in gevent._greenlet.Greenlet._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 72, in mars.actors.pool.gevent_pool.MessageContext.result
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 928, in build_fetch_graph
return serialize_graph(graph)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 217, in serialize_graph
ser_graph = graph.to_pb().SerializeToString()
File "mars/graph.pyx", line 431, in mars.graph.DirectedGraph.to_pb
File "mars/serialize/core.pyx", line 563, in mars.serialize.core.Serializable.to_pb
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 476, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 447, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 467, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 485, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 338, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
TypeError: nan has type float, but expected one of: int, long
2019-05-21T10:36:49Z <Greenlet at 0xc20a0d598: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc20d32138>> failed with TypeError
|
TypeError
|
def tile_fetch_tileable(self, tileable):
"""
Find the owner of the input tileable node and ask for tiling
"""
tileable_key = tileable.key
graph_ref = self.ctx.actor_ref(
self._session_ref.get_graph_ref_by_tleable_key(tileable_key)
)
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
return list(fetch_graph)[0]
|
def tile_fetch_tileable(self, tileable):
"""
Find the owner of the input tensor and ask for tiling.
"""
tileable_key = tileable.key
graph_ref = self.ctx.actor_ref(
self._session_ref.get_graph_ref_by_tleable_key(tileable_key)
)
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
return list(fetch_graph)[0]
|
https://github.com/mars-project/mars/issues/406
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 335, in execute_graph
self.prepare_graph(compose=compose)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 475, in prepare_graph
td = self.tile_fetch_tileable(tileable)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 936, in tile_fetch_tileable
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
File "mars/actors/core.pyx", line 65, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 37, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 657, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 682, in mars.actors.pool.gevent_pool.Communicator._send_local
File "src/gevent/greenlet.py", line 709, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 317, in gevent._greenlet.Greenlet._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 72, in mars.actors.pool.gevent_pool.MessageContext.result
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 928, in build_fetch_graph
return serialize_graph(graph)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 217, in serialize_graph
ser_graph = graph.to_pb().SerializeToString()
File "mars/graph.pyx", line 431, in mars.graph.DirectedGraph.to_pb
File "mars/serialize/core.pyx", line 563, in mars.serialize.core.Serializable.to_pb
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 476, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 447, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 467, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 485, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 338, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
TypeError: nan has type float, but expected one of: int, long
2019-05-21T10:36:49Z <Greenlet at 0xc20a0d598: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc20d32138>> failed with TypeError
|
TypeError
|
def build_fetch_chunk(chunk, input_chunk_keys=None, **kwargs):
from .operands import ShuffleProxy
chunk_op = chunk.op
params = chunk.params.copy()
if isinstance(chunk_op, ShuffleProxy):
# for shuffle nodes, we build FetchShuffle chunks
# to replace ShuffleProxy
to_fetch_keys = [
pinp.key
for pinp in chunk.inputs
if input_chunk_keys is None or pinp.key in input_chunk_keys
]
op = get_fetch_op_cls(chunk_op)(to_fetch_keys=to_fetch_keys)
else:
# for non-shuffle nodes, we build Fetch chunks
# to replace original chunk
op = get_fetch_op_cls(chunk_op)(sparse=chunk.op.sparse)
return op.new_chunk(None, kws=[params], _key=chunk.key, _id=chunk.id, **kwargs)
|
def build_fetch_chunk(chunk, input_chunk_keys=None, **kwargs):
from .operands import ShuffleProxy
chunk_op = chunk.op
params = chunk.params.copy()
params.pop("index", None)
if isinstance(chunk_op, ShuffleProxy):
# for shuffle nodes, we build FetchShuffle chunks
# to replace ShuffleProxy
to_fetch_keys = [
pinp.key
for pinp in chunk.inputs
if input_chunk_keys is None or pinp.key in input_chunk_keys
]
op = get_fetch_op_cls(chunk_op)(to_fetch_keys=to_fetch_keys)
else:
# for non-shuffle nodes, we build Fetch chunks
# to replace original chunk
op = get_fetch_op_cls(chunk_op)(sparse=chunk.op.sparse)
return op.new_chunk(None, kws=[params], _key=chunk.key, _id=chunk.id, **kwargs)
|
https://github.com/mars-project/mars/issues/406
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 335, in execute_graph
self.prepare_graph(compose=compose)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 475, in prepare_graph
td = self.tile_fetch_tileable(tileable)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 936, in tile_fetch_tileable
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
File "mars/actors/core.pyx", line 65, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 37, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 657, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 682, in mars.actors.pool.gevent_pool.Communicator._send_local
File "src/gevent/greenlet.py", line 709, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 317, in gevent._greenlet.Greenlet._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 72, in mars.actors.pool.gevent_pool.MessageContext.result
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 928, in build_fetch_graph
return serialize_graph(graph)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 217, in serialize_graph
ser_graph = graph.to_pb().SerializeToString()
File "mars/graph.pyx", line 431, in mars.graph.DirectedGraph.to_pb
File "mars/serialize/core.pyx", line 563, in mars.serialize.core.Serializable.to_pb
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 476, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 447, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 467, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 485, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 338, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
TypeError: nan has type float, but expected one of: int, long
2019-05-21T10:36:49Z <Greenlet at 0xc20a0d598: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc20d32138>> failed with TypeError
|
TypeError
|
def build_fetch_tileable(tileable, coarse=False):
if coarse or tileable.is_coarse():
chunks = None
else:
chunks = []
for c in tileable.chunks:
fetch_chunk = build_fetch_chunk(c, index=c.index)
chunks.append(fetch_chunk)
tileable_op = tileable.op
params = tileable.params.copy()
new_op = get_fetch_op_cls(tileable_op)()
return new_op.new_tileables(
None,
chunks=chunks,
nsplits=tileable.nsplits,
_key=tileable.key,
_id=tileable.id,
**params,
)[0]
|
def build_fetch_tileable(tileable, coarse=False):
if coarse or tileable.is_coarse():
chunks = None
else:
chunks = []
for c in tileable.chunks:
fetch_chunk = build_fetch_chunk(c, index=c.index)
chunks.append(fetch_chunk)
tileable_op = tileable.op
params = tileable.params.copy()
new_op = get_fetch_op_cls(tileable_op)(**params)
return new_op.new_tileables(
None,
chunks=chunks,
nsplits=tileable.nsplits,
_key=tileable.key,
_id=tileable.id,
**params,
)[0]
|
https://github.com/mars-project/mars/issues/406
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 335, in execute_graph
self.prepare_graph(compose=compose)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 475, in prepare_graph
td = self.tile_fetch_tileable(tileable)
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 936, in tile_fetch_tileable
fetch_graph = deserialize_graph(graph_ref.build_fetch_graph(tileable_key))
File "mars/actors/core.pyx", line 65, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 37, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 657, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 682, in mars.actors.pool.gevent_pool.Communicator._send_local
File "src/gevent/greenlet.py", line 709, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 317, in gevent._greenlet.Greenlet._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 72, in mars.actors.pool.gevent_pool.MessageContext.result
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 112, in mars.actors.core._FunctionActor.on_receive
File "mars/actors/core.pyx", line 114, in mars.actors.core._FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 928, in build_fetch_graph
return serialize_graph(graph)
File "/Users/wenjun.swj/Code/mars/mars/utils.py", line 217, in serialize_graph
ser_graph = graph.to_pb().SerializeToString()
File "mars/graph.pyx", line 431, in mars.graph.DirectedGraph.to_pb
File "mars/serialize/core.pyx", line 563, in mars.serialize.core.Serializable.to_pb
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 476, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 447, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 467, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/core.pyx", line 547, in mars.serialize.core.Serializable.serialize
File "mars/serialize/core.pyx", line 665, in mars.serialize.core.Provider.serialize_model
File "mars/serialize/core.pyx", line 141, in mars.serialize.core.Field.serialize
File "mars/serialize/core.pyx", line 142, in mars.serialize.core.Field.serialize
File "mars/serialize/pbserializer.pyx", line 485, in mars.serialize.pbserializer.ProtobufSerializeProvider.serialize_field
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 365, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
File "mars/serialize/pbserializer.pyx", line 246, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_tuple
File "mars/serialize/pbserializer.pyx", line 428, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_value
File "mars/serialize/pbserializer.pyx", line 338, in mars.serialize.pbserializer.ProtobufSerializeProvider._set_typed_value
TypeError: nan has type float, but expected one of: int, long
2019-05-21T10:36:49Z <Greenlet at 0xc20a0d598: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc20d32138>> failed with TypeError
|
TypeError
|
def mars_serialize_context():
global _serialize_context
if _serialize_context is None:
ctx = pyarrow.default_serialization_context()
ctx.register_type(
SparseNDArray,
"mars.SparseNDArray",
custom_serializer=_serialize_sparse_csr_list,
custom_deserializer=_deserialize_sparse_csr_list,
)
_serialize_context = ctx
return _serialize_context
|
def mars_serialize_context():
global _serialize_context
if _serialize_context is None:
ctx = pyarrow.default_serialization_context()
ctx.register_type(
SparseNDArray,
"mars.SparseNDArray",
custom_serializer=_serialize_sparse_csr_list,
custom_deserializer=_deserialize_sparse_csr_list,
)
ctx.register_type(
DataTuple,
"mars.DataTuple",
custom_serializer=_serialize_data_tuple,
custom_deserializer=_deserialize_data_tuple,
)
_serialize_context = ctx
return _serialize_context
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def create(self, session_id, chunk_key, size):
from pyarrow.lib import PlasmaStoreFull
obj_id = self._new_object_id(session_id, chunk_key)
try:
self._plasma_client.evict(size)
buffer = self._plasma_client.create(obj_id, size)
return buffer
except PlasmaStoreFull:
exc_type = PlasmaStoreFull
self._mapper_ref.delete(session_id, chunk_key)
logger.warning(
"Chunk %s(%d) failed to store to plasma due to StoreFullError",
chunk_key,
size,
)
except: # noqa: E722 # pragma: no cover
self._mapper_ref.delete(session_id, chunk_key)
raise
if exc_type is PlasmaStoreFull:
raise StoreFull
|
def create(self, session_id, chunk_key, size):
from pyarrow.lib import PlasmaStoreFull
obj_id = self._new_object_id(session_id, chunk_key)
try:
buffer = self._plasma_client.create(obj_id, size)
return buffer
except PlasmaStoreFull:
exc_type = PlasmaStoreFull
logger.warning(
"Chunk %s(%d) failed to store to plasma due to StoreFullError",
chunk_key,
size,
)
if exc_type is PlasmaStoreFull:
raise StoreFull
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def seal(self, session_id, chunk_key):
from pyarrow.lib import PlasmaObjectNonexistent
obj_id = self._get_object_id(session_id, chunk_key)
try:
self._plasma_client.seal(obj_id)
except PlasmaObjectNonexistent:
raise KeyError((session_id, chunk_key))
|
def seal(self, session_id, chunk_key):
from pyarrow.lib import PlasmaObjectNonexistent
obj_id = self._get_object_id(session_id, chunk_key)
try:
self._plasma_client.seal(obj_id)
except PlasmaObjectNonexistent:
raise KeyError("(%r, %r)" % (session_id, chunk_key))
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def get(self, session_id, chunk_key):
"""
Get deserialized Mars object from plasma store
"""
from pyarrow.plasma import ObjectNotAvailable
obj_id = self._get_object_id(session_id, chunk_key)
obj = self._plasma_client.get(
obj_id, serialization_context=self._serialize_context, timeout_ms=10
)
if obj is ObjectNotAvailable:
raise KeyError((session_id, chunk_key))
return obj
|
def get(self, session_id, chunk_key):
"""
Get deserialized Mars object from plasma store
"""
from pyarrow.plasma import ObjectNotAvailable
obj_id = self._get_object_id(session_id, chunk_key)
obj = self._plasma_client.get(
obj_id, serialization_context=self._serialize_context, timeout_ms=10
)
if obj is ObjectNotAvailable:
raise KeyError("(%r, %r)" % (session_id, chunk_key))
return obj
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def get_buffer(self, session_id, chunk_key):
"""
Get raw buffer from plasma store
"""
obj_id = self._get_object_id(session_id, chunk_key)
[buf] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
if buf is None:
raise KeyError((session_id, chunk_key))
return buf
|
def get_buffer(self, session_id, chunk_key):
"""
Get raw buffer from plasma store
"""
obj_id = self._get_object_id(session_id, chunk_key)
[buf] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
if buf is None:
raise KeyError("(%r, %r)" % (session_id, chunk_key))
return buf
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def get_actual_size(self, session_id, chunk_key):
"""
Get actual size of Mars object from plasma store
"""
buf = None
try:
obj_id = self._get_object_id(session_id, chunk_key)
[buf] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
if buf is None:
raise KeyError((session_id, chunk_key))
size = buf.size
finally:
del buf
return size
|
def get_actual_size(self, session_id, chunk_key):
"""
Get actual size of Mars object from plasma store
"""
buf = None
try:
obj_id = self._get_object_id(session_id, chunk_key)
[buf] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
size = buf.size
finally:
del buf
return size
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def put(self, session_id, chunk_key, value):
"""
Put a Mars object into plasma store
:param session_id: session id
:param chunk_key: chunk key
:param value: Mars object to be put
"""
import pyarrow
from pyarrow.lib import PlasmaStoreFull
data_size = calc_data_size(value)
try:
obj_id = self._new_object_id(session_id, chunk_key)
except StoreKeyExists:
obj_id = self._get_object_id(session_id, chunk_key)
if self._plasma_client.contains(obj_id):
logger.debug("Chunk %s already exists, returning existing", chunk_key)
[buffer] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
return buffer
else:
logger.warning(
"Chunk %s registered but no data found, reconstructed", chunk_key
)
self.delete(session_id, chunk_key)
obj_id = self._new_object_id(session_id, chunk_key)
try:
serialized = pyarrow.serialize(value, self._serialize_context)
try:
buffer = self._plasma_client.create(obj_id, serialized.total_bytes)
stream = pyarrow.FixedSizeBufferWriter(buffer)
stream.set_memcopy_threads(6)
serialized.write_to(stream)
self._plasma_client.seal(obj_id)
finally:
del serialized
return buffer
except PlasmaStoreFull:
self._mapper_ref.delete(session_id, chunk_key)
logger.warning(
"Chunk %s(%d) failed to store to plasma due to StoreFullError",
chunk_key,
data_size,
)
exc = PlasmaStoreFull
except: # noqa: E722 # pragma: no cover
self._mapper_ref.delete(session_id, chunk_key)
raise
if exc is PlasmaStoreFull:
raise StoreFull
|
def put(self, session_id, chunk_key, value):
"""
Put a Mars object into plasma store
:param session_id: session id
:param chunk_key: chunk key
:param value: Mars object to be put
"""
import pyarrow
from pyarrow.lib import PlasmaStoreFull
from ..serialize.dataserializer import DataTuple
data_size = calc_data_size(value)
if isinstance(value, tuple):
value = DataTuple(*value)
try:
obj_id = self._new_object_id(session_id, chunk_key)
except StoreKeyExists:
obj_id = self._get_object_id(session_id, chunk_key)
if self._plasma_client.contains(obj_id):
logger.debug("Chunk %s already exists, returning existing", chunk_key)
[buffer] = self._plasma_client.get_buffers([obj_id], timeout_ms=10)
return buffer
else:
logger.warning(
"Chunk %s registered but no data found, reconstructed", chunk_key
)
self.delete(session_id, chunk_key)
obj_id = self._new_object_id(session_id, chunk_key)
try:
serialized = pyarrow.serialize(value, self._serialize_context)
try:
buffer = self._plasma_client.create(obj_id, serialized.total_bytes)
stream = pyarrow.FixedSizeBufferWriter(buffer)
stream.set_memcopy_threads(6)
serialized.write_to(stream)
self._plasma_client.seal(obj_id)
finally:
del serialized
return buffer
except PlasmaStoreFull:
self._mapper_ref.delete(session_id, chunk_key)
logger.warning(
"Chunk %s(%d) failed to store to plasma due to StoreFullError",
chunk_key,
data_size,
)
exc = PlasmaStoreFull
if exc is PlasmaStoreFull:
raise StoreFull
|
https://github.com/mars-project/mars/issues/370
|
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.1.zip/mars/scheduler/operand.py", line 461, in _rejecter
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/promise.py", line 92, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/utils.py", line 288, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/calc.py", line 72, in _try_put_chunk
ref = self._chunk_store.put(session_id, chunk_key, _calc_result_cache[chunk_key][1])
File "/home/admin/work/_public-mars-0.1.1.zip/mars/worker/chunkstore.py", line 184, in put
value = DataTuple(*value)
TypeError: tuple() takes at most 1 argument (2 given)
|
TypeError
|
def run(self, *tensors, **kw):
from . import tensor as mt
fetch = kw.get("fetch", True)
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
tensors = tuple(mt.tensor(t) for t in tensors)
result = self._sess.run(*tensors, **kw)
for t in tensors:
t._execute_session = self
for t in tensors:
if np.nan in t.shape:
self._sess._update_tensor_shape(t)
if fetch:
ret = []
for r, t in zip(result, tensors):
if t.isscalar() and hasattr(r, "item"):
ret.append(r.item())
else:
ret.append(r)
if ret_list:
return ret
return ret[0]
|
def run(self, *tensors, **kw):
from . import tensor as mt
fetch = kw.get("fetch", True)
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
tensors = tuple(mt.tensor(t) for t in tensors)
result = self._sess.run(*tensors, **kw)
for t in tensors:
t._execute_session = self
for t in tensors:
if np.nan in t.shape:
self._sess._update_tensor_shape(t)
if fetch:
ret = []
for r, t in zip(result, tensors):
if t.isscalar() and hasattr(r, "item"):
ret.append(np.asscalar(r))
else:
ret.append(r)
if ret_list:
return ret
return ret[0]
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def fetch(self, *tensors, **kw):
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
result = self._sess.fetch(*tensors, **kw)
ret = []
for r, t in zip(result, tensors):
if t.isscalar() and hasattr(r, "item"):
ret.append(r.item())
else:
ret.append(r)
if ret_list:
return ret
return ret[0]
|
def fetch(self, *tensors, **kw):
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
result = self._sess.fetch(*tensors, **kw)
ret = []
for r, t in zip(result, tensors):
if t.isscalar() and hasattr(r, "item"):
ret.append(np.asscalar(r))
else:
ret.append(r)
if ret_list:
return ret
return ret[0]
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = int(axis_offsets[axis])
axis_length = int(chunk.op.input.shape[axis])
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(
uri=uri, ctx=tiledb_ctx, mode="w", key=key, timestamp=timestamp
) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(
uri=uri, ctx=tiledb_ctx, mode="w", key=key, timestamp=timestamp
) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = (
to_store.row + axis_offsets[0],
to_store.col + axis_offsets[1],
)
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(
sps.csr_matrix((0, 0), dtype=chunk.dtype), shape=chunk.shape
)
|
def _store_tiledb(ctx, chunk):
tiledb_ctx = get_tiledb_ctx(chunk.op.tiledb_config)
uri = chunk.op.tiledb_uri
key = chunk.op.tiledb_key
timestamp = chunk.op.tiledb_timestamp
axis_offsets = chunk.op.axis_offsets
if not chunk.issparse():
# dense
to_store = np.ascontiguousarray(ctx[chunk.op.input.key])
slcs = []
for axis in range(chunk.ndim):
axis_offset = axis_offsets[axis]
axis_length = chunk.op.input.shape[axis]
slcs.append(slice(axis_offset, axis_offset + axis_length))
with tiledb.DenseArray(
uri=uri, ctx=tiledb_ctx, mode="w", key=key, timestamp=timestamp
) as arr:
arr[tuple(slcs)] = to_store
ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
else:
# sparse
to_store = ctx[chunk.op.input.key].spmatrix.tocoo()
if to_store.nnz > 0:
with tiledb.SparseArray(
uri=uri, ctx=tiledb_ctx, mode="w", key=key, timestamp=timestamp
) as arr:
if chunk.ndim == 1:
vec = to_store.col if to_store.shape[0] == 1 else to_store.row
vec += axis_offsets[0]
arr[vec] = to_store.data
else:
i, j = (
to_store.row + axis_offsets[0],
to_store.col + axis_offsets[1],
)
arr[i, j] = to_store.data
ctx[chunk.key] = SparseNDArray(
sps.csr_matrix((0, 0), dtype=chunk.dtype), shape=chunk.shape
)
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def scalar(data, dtype=None, gpu=False):
try:
arr = np.array(data, dtype=dtype)
op = Scalar(arr.item(), dtype=arr.dtype, gpu=gpu)
shape = ()
return op(shape)
except ValueError:
raise TypeError("Expect scalar, got: {0}".format(data))
|
def scalar(data, dtype=None, gpu=False):
try:
arr = np.array(data, dtype=dtype)
op = Scalar(np.asscalar(arr), dtype=arr.dtype, gpu=gpu)
shape = ()
return op(shape)
except ValueError:
raise TypeError("Expect scalar, got: {0}".format(data))
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def _partial_reduction(
cls, agg_op_type, tensor, axis, dtype, keepdims, combine_size, kw=None
):
from ..merge.concatenate import TensorConcatenate
kw = kw or {}
axes = sorted(combine_size.keys())
combine_blocks = [
cls._combine_split(i, combine_size, tensor.chunk_shape)
for i in range(tensor.ndim)
]
combine_blocks_idxes = [range(len(blocks)) for blocks in combine_blocks]
chunks = []
for combine_block_idx, combine_block in izip(
itertools.product(*combine_blocks_idxes), itertools.product(*combine_blocks)
):
chks = [tensor.cix[idx] for idx in itertools.product(*combine_block)]
if len(chks) > 1:
op = TensorConcatenate(axis=axes, dtype=chks[0].dtype)
chk = op.new_chunk(
chks, shape=cls._concatenate_shape(tensor, combine_block)
)
else:
chk = chks[0]
shape = tuple(
s if i not in combine_size else 1
for i, s in enumerate(chk.shape)
if keepdims or i not in combine_size
)
agg_op = agg_op_type(axis=axis, dtype=dtype, keepdims=keepdims, **kw)
chunk = agg_op.new_chunk(
[chk],
shape=shape,
index=tuple(
idx
for i, idx in enumerate(combine_block_idx)
if keepdims or i not in combine_size
),
)
chunks.append(chunk)
nsplits = [
tuple(
c.shape[i]
for c in chunks
if builtins.all(idx == 0 for j, idx in enumerate(c.index) if j != i)
)
for i in range(len(chunks[0].shape))
]
shape = tuple(builtins.sum(nsplit) for nsplit in nsplits)
agg_op = agg_op_type(
axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size, **kw
)
return agg_op.new_tensors([tensor], shape, chunks=chunks, nsplits=nsplits)
|
def _partial_reduction(
cls, agg_op_type, tensor, axis, dtype, keepdims, combine_size, kw=None
):
from ..merge.concatenate import TensorConcatenate
kw = kw or {}
axes = sorted(combine_size.keys())
combine_blocks = [
cls._combine_split(i, combine_size, tensor.chunk_shape)
for i in range(tensor.ndim)
]
combine_blocks_idxes = [range(len(blocks)) for blocks in combine_blocks]
chunks = []
for combine_block_idx, combine_block in izip(
itertools.product(*combine_blocks_idxes), itertools.product(*combine_blocks)
):
chks = [tensor.cix[idx] for idx in itertools.product(*combine_block)]
op = TensorConcatenate(axis=axes, dtype=chks[0].dtype)
chk = op.new_chunk(chks, shape=cls._concatenate_shape(tensor, combine_block))
shape = tuple(
s if i not in combine_size else 1
for i, s in enumerate(chk.shape)
if keepdims or i not in combine_size
)
agg_op = agg_op_type(axis=axis, dtype=dtype, keepdims=keepdims, **kw)
chunk = agg_op.new_chunk(
[chk],
shape=shape,
index=tuple(
idx
for i, idx in enumerate(combine_block_idx)
if keepdims or i not in combine_size
),
)
chunks.append(chunk)
nsplits = [
tuple(
c.shape[i]
for c in chunks
if builtins.all(idx == 0 for j, idx in enumerate(c.index) if j != i)
)
for i in range(len(chunks[0].shape))
]
shape = tuple(builtins.sum(nsplit) for nsplit in nsplits)
agg_op = agg_op_type(
axis=axis, dtype=dtype, keepdims=keepdims, combine_size=combine_size, **kw
)
return agg_op.new_tensors([tensor], shape, chunks=chunks, nsplits=nsplits)
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def decide_chunk_sizes(shape, chunk_size, itemsize):
"""
Decide how a given tensor can be split into chunk.
:param shape: tensor's shape
:param chunk_size: if dict provided, it's dimension id to chunk size;
if provided, it's the chunk size for each dimension.
:param itemsize: element size
:return: the calculated chunk size for each dimension
:rtype: tuple
"""
from ...config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
nleft = len(shape) - len(chunk_size)
if nleft < 0:
raise ValueError("chunks have more dimensions than input tensor")
if nleft == 0:
return normalize_chunk_sizes(
shape, tuple(chunk_size[j] for j in range(len(shape)))
)
max_chunk_size = options.tensor.chunk_store_limit
# normalize the dimension which specified first
dim_to_normalized = {
i: normalize_chunk_sizes((shape[i],), (c,))[0]
for i, c in six.iteritems(chunk_size)
}
left = {j: [] for j in range(len(shape)) if j not in dim_to_normalized}
left_unsplit = {j: shape[j] for j in left}
while True:
nbytes_occupied = (
np.prod([max(c) for c in six.itervalues(dim_to_normalized)]) * itemsize
)
dim_size = np.maximum(
int(np.power(max_chunk_size / nbytes_occupied, 1 / float(len(left)))), 1
)
for j, ns in six.iteritems(left.copy()):
unsplit = left_unsplit[j]
ns.append(int(np.minimum(unsplit, dim_size)))
left_unsplit[j] -= ns[-1]
if left_unsplit[j] <= 0:
dim_to_normalized[j] = tuple(ns)
del left[j]
if len(left) == 0:
break
return tuple(dim_to_normalized[i] for i in range(len(dim_to_normalized)))
|
def decide_chunk_sizes(shape, chunk_size, itemsize):
"""
Decide how a given tensor can be split into chunk.
:param shape: tensor's shape
:param chunk_size: if dict provided, it's dimension id to chunk size;
if provided, it's the chunk size for each dimension.
:param itemsize: element size
:return: the calculated chunk size for each dimension
:rtype: tuple
"""
from ...config import options
chunk_size = dictify_chunk_size(shape, chunk_size)
nleft = len(shape) - len(chunk_size)
if nleft < 0:
raise ValueError("chunks have more dimensions than input tensor")
if nleft == 0:
return normalize_chunk_sizes(
shape, tuple(chunk_size[j] for j in range(len(shape)))
)
max_chunk_size = options.tensor.chunk_store_limit
# normalize the dimension which specified first
dim_to_normalized = {
i: normalize_chunk_sizes((shape[i],), (c,))[0]
for i, c in six.iteritems(chunk_size)
}
left = {j: [] for j in range(len(shape)) if j not in dim_to_normalized}
left_unsplit = {j: shape[j] for j in left}
while True:
nbytes_occupied = (
np.prod([max(c) for c in six.itervalues(dim_to_normalized)]) * itemsize
)
dim_size = np.maximum(
int(np.power(max_chunk_size / nbytes_occupied, 1 / float(len(left)))), 1
)
for j, ns in six.iteritems(left.copy()):
unsplit = left_unsplit[j]
ns.append(np.minimum(unsplit, dim_size))
left_unsplit[j] -= ns[-1]
if left_unsplit[j] <= 0:
dim_to_normalized[j] = tuple(ns)
del left[j]
if len(left) == 0:
break
return tuple(dim_to_normalized[i] for i in range(len(dim_to_normalized)))
|
https://github.com/mars-project/mars/issues/334
|
In [1]: import mars.tensor as mt
In [2]: a = mt.arange(12)
In [3]: a.totiledb('test_tiledb').execute()
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-3-5174935f197d> in <module>
----> 1 a.totiledb('test_tiledb').execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
350 try:
351 _kernel_mode.eager = False
--> 352 return func(*args, **kwargs)
353 finally:
354 _kernel_mode.eager = None
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, compose)
530
531 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
--> 532 print_progress=print_progress, mock=mock)
533
534 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, retval)
464 print_progress=print_progress, mock=mock,
465 mock_max_memory=self._mock_max_memory)
--> 466 res = graph_execution.execute(retval)
467 self._mock_max_memory = max(self._mock_max_memory, graph_execution._mock_max_memory)
468 if mock:
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
377 # wait until all the futures completed
378 for future in executed_futures:
--> 379 future.result()
380
381 # update with the maximal memory cost during the whole execution
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433 else:
434 raise TimeoutError()
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263
264 # update maximal memory usage during execution
~/Documents/mars_dev/mars/mars/executor.py in handle(cls, chunk, results, mock)
450 def handle(cls, chunk, results, mock=False):
451 if not mock:
--> 452 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
453 else:
454 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Documents/mars_dev/mars/mars/tensor/execution/datastore.py in _store_tiledb(ctx, chunk)
44 with tiledb.DenseArray(uri=uri, ctx=tiledb_ctx, mode='w',
45 key=key, timestamp=timestamp) as arr:
---> 46 arr[tuple(slcs)] = to_store
47 ctx[chunk.key] = np.empty((0,) * chunk.ndim, dtype=chunk.dtype)
48 else:
tiledb/libtiledb.pyx in tiledb.libtiledb.DenseArray.__setitem__()
tiledb/libtiledb.pyx in tiledb.libtiledb.index_domain_subarray()
IndexError: too many indices for array
|
IndexError
|
def __init__(self, *args, **kwargs):
super(BaseWithKey, self).__init__(*args, **kwargs)
if self._init_update_key_ and (not hasattr(self, "_key") or not self._key):
self._update_key()
if not hasattr(self, "_id") or not self._id:
self._id = str(id(self))
|
def __init__(self, *args, **kwargs):
super(BaseWithKey, self).__init__(*args, **kwargs)
if self._init_update_key_ and (not hasattr(self, "_key") or not self._key):
self.update_key()
if not hasattr(self, "_id") or not self._id:
self._id = str(id(self))
|
https://github.com/mars-project/mars/issues/297
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='100M')
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 0.104858GB of memory.
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
In [3]: import mars.tensor as mt
In [4]: a = mt.random.rand(10, 10, chunk_size=3)
In [5]: a.tiles()
Out[5]: Tensor <op=TensorRand, shape=(10, 10), key=2d1101e0bd65a1ca3134ca872c6072b5>
In [6]: cluster.session.run(a)
Unexpected exception occurred in GraphActor.build_tensor_merge_graph. tensor_key='2d1101e0bd65a1ca3134ca872c6072b5'
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17966268: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc1796f818>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc179668c8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc17968408>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 446, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 450, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 457, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
return self.send((item,) + args + (kwargs,), wait=wait)
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
return self._ctx.send(self, message, wait=wait, callback=callback)
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17962378: <built-in method _send_remote of mars.actors.pool.gevent_pool.ActorRemoteHelper object at 0xc17860e58>('0.0.0.0:29790', [bytearray(b'\x05\x01 \x00\x00\x00\x00\x00\x00\x00)> failed with IndexError
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-06ec9ebba039> in <module>
----> 1 cluster.session.run(a)
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
103 return
104 else:
--> 105 return self.fetch(*tensors)
106
107 def fetch(self, *tensors):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in fetch(self, *tensors)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in <listcomp>(.0)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise(t, value, tb)
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._async_run.on_failure()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.run()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/lib/six.py in reraise()
700 value = tp()
701 if value.__traceback__ is not tb:
--> 702 raise value.with_traceback(tb)
703 raise value
704 finally:
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/scheduler/graph.py in fetch_tensor()
64
65 graph_actor = self.ctx.actor_ref(GraphActor.gen_name(session_id, graph_key))
---> 66 fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
67
68 if len(fetch_graph) == 1 and isinstance(next(fetch_graph.iter_nodes()).op, TensorFetch):
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.__getattr__._mt_call()
62 return self.tell((item,) + args + (kwargs,), delay=delay, wait=wait)
63 else:
---> 64 return self.send((item,) + args + (kwargs,), wait=wait)
65
66 return _mt_call
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.send()
34
35 cpdef object send(self, object message, bint wait=True, object callback=None):
---> 36 return self._ctx.send(self, message, wait=wait, callback=callback)
37
38 cpdef object tell(self, object message, object delay=None, bint wait=True,
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorContext.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._dispatch()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send_process()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.AsyncHandler.submit()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/utils.py in _wrapped()
296 def _wrapped(*args, **kwargs):
297 try:
--> 298 return func(*args, **kwargs)
299 except: # noqa: E722
300 kwcopy = kwargs.copy()
~/Documents/mars_dev/mars/mars/scheduler/graph.py in build_tensor_merge_graph()
817 from ..tensor.expressions.merge.concatenate import TensorConcatenate
818
--> 819 tiled_tensor = self._get_tensor_by_key(tensor_key)
820 graph = DAG()
821 if len(tiled_tensor.chunks) == 1:
~/Documents/mars_dev/mars/mars/scheduler/graph.py in _get_tensor_by_key()
802 def _get_tensor_by_key(self, key):
803 tid = self._tensor_key_to_opid[key]
--> 804 return self._tensor_key_opid_to_tiled[(key, tid)][-1]
805
806 @log_unhandled
IndexError: list index out of range
|
IndexError
|
def build_graph(
self, graph=None, cls=DAG, tiled=False, compose=True, executed_keys=None
):
from .tensor.expressions.utils import convert_to_fetch
executed_keys = executed_keys or []
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = list(self.op.outputs)
visited = set()
while len(nodes) > 0:
node = nodes.pop()
# replace executed tensor/chunk by tensor/chunk with fetch op
if node.key in executed_keys:
node = convert_to_fetch(node).data
visited.add(node)
if not graph.contains(node):
graph.add_node(node)
children = node.inputs or []
for c in children:
if c.key in executed_keys:
continue
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, node):
graph.add_edge(c, node)
nodes.extend(
[
c
for c in itertools.chain(*[inp.op.outputs for inp in node.inputs or []])
if c not in visited
]
)
if tiled and compose:
graph.compose(keys=keys)
if not tiled and any(not n.is_coarse() for n in graph):
return self._to_coarse_graph(graph)
return graph
|
def build_graph(
self, graph=None, cls=DAG, tiled=False, compose=True, executed_keys=None
):
from .tensor.expressions.utils import convert_to_fetch
executed_keys = executed_keys or []
if tiled and self.is_coarse():
self.tiles()
graph = graph if graph is not None else cls()
keys = None
if tiled:
nodes = list(c.data for c in self.chunks)
keys = list(c.key for c in self.chunks)
else:
nodes = list(self.op.outputs)
visited = set()
while len(nodes) > 0:
node = nodes.pop()
# replace executed tensor/chunk by tensor/chunk with fetch op
if node.key in executed_keys:
node = convert_to_fetch(node).data
visited.add(node)
if not graph.contains(node):
graph.add_node(node)
children = node.inputs or []
for c in children:
if c.key in executed_keys:
continue
if not graph.contains(c):
graph.add_node(c)
if not graph.has_successor(c, node):
graph.add_edge(c, node)
nodes.extend(
[
c
for c in itertools.chain(*[inp.op.outputs for inp in node.inputs or []])
if c not in visited
]
)
if tiled and compose:
graph.compose(keys=keys)
return graph
|
https://github.com/mars-project/mars/issues/297
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='100M')
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 0.104858GB of memory.
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
In [3]: import mars.tensor as mt
In [4]: a = mt.random.rand(10, 10, chunk_size=3)
In [5]: a.tiles()
Out[5]: Tensor <op=TensorRand, shape=(10, 10), key=2d1101e0bd65a1ca3134ca872c6072b5>
In [6]: cluster.session.run(a)
Unexpected exception occurred in GraphActor.build_tensor_merge_graph. tensor_key='2d1101e0bd65a1ca3134ca872c6072b5'
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17966268: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc1796f818>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc179668c8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc17968408>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 446, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 450, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 457, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
return self.send((item,) + args + (kwargs,), wait=wait)
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
return self._ctx.send(self, message, wait=wait, callback=callback)
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17962378: <built-in method _send_remote of mars.actors.pool.gevent_pool.ActorRemoteHelper object at 0xc17860e58>('0.0.0.0:29790', [bytearray(b'\x05\x01 \x00\x00\x00\x00\x00\x00\x00)> failed with IndexError
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-06ec9ebba039> in <module>
----> 1 cluster.session.run(a)
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
103 return
104 else:
--> 105 return self.fetch(*tensors)
106
107 def fetch(self, *tensors):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in fetch(self, *tensors)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in <listcomp>(.0)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise(t, value, tb)
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._async_run.on_failure()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.run()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/lib/six.py in reraise()
700 value = tp()
701 if value.__traceback__ is not tb:
--> 702 raise value.with_traceback(tb)
703 raise value
704 finally:
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/scheduler/graph.py in fetch_tensor()
64
65 graph_actor = self.ctx.actor_ref(GraphActor.gen_name(session_id, graph_key))
---> 66 fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
67
68 if len(fetch_graph) == 1 and isinstance(next(fetch_graph.iter_nodes()).op, TensorFetch):
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.__getattr__._mt_call()
62 return self.tell((item,) + args + (kwargs,), delay=delay, wait=wait)
63 else:
---> 64 return self.send((item,) + args + (kwargs,), wait=wait)
65
66 return _mt_call
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.send()
34
35 cpdef object send(self, object message, bint wait=True, object callback=None):
---> 36 return self._ctx.send(self, message, wait=wait, callback=callback)
37
38 cpdef object tell(self, object message, object delay=None, bint wait=True,
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorContext.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._dispatch()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send_process()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.AsyncHandler.submit()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/utils.py in _wrapped()
296 def _wrapped(*args, **kwargs):
297 try:
--> 298 return func(*args, **kwargs)
299 except: # noqa: E722
300 kwcopy = kwargs.copy()
~/Documents/mars_dev/mars/mars/scheduler/graph.py in build_tensor_merge_graph()
817 from ..tensor.expressions.merge.concatenate import TensorConcatenate
818
--> 819 tiled_tensor = self._get_tensor_by_key(tensor_key)
820 graph = DAG()
821 if len(tiled_tensor.chunks) == 1:
~/Documents/mars_dev/mars/mars/scheduler/graph.py in _get_tensor_by_key()
802 def _get_tensor_by_key(self, key):
803 tid = self._tensor_key_to_opid[key]
--> 804 return self._tensor_key_opid_to_tiled[(key, tid)][-1]
805
806 @log_unhandled
IndexError: list index out of range
|
IndexError
|
def _new_chunks(self, inputs, shape, index=None, output_limit=None, kws=None, **kw):
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "_update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
chunks = []
raw_index = index
for j, s in enumerate(shape):
create_chunk_kw = kw.copy()
if kws:
create_chunk_kw.update(kws[j])
index = create_chunk_kw.pop("index", raw_index)
chunk = self._create_chunk(j, index, s, **create_chunk_kw)
chunks.append(chunk)
setattr(self, "outputs", chunks)
return chunks
|
def _new_chunks(self, inputs, shape, index=None, output_limit=None, kws=None, **kw):
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
chunks = []
raw_index = index
for j, s in enumerate(shape):
create_chunk_kw = kw.copy()
if kws:
create_chunk_kw.update(kws[j])
index = create_chunk_kw.pop("index", raw_index)
chunk = self._create_chunk(j, index, s, **create_chunk_kw)
chunks.append(chunk)
setattr(self, "outputs", chunks)
return chunks
|
https://github.com/mars-project/mars/issues/297
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='100M')
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 0.104858GB of memory.
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
In [3]: import mars.tensor as mt
In [4]: a = mt.random.rand(10, 10, chunk_size=3)
In [5]: a.tiles()
Out[5]: Tensor <op=TensorRand, shape=(10, 10), key=2d1101e0bd65a1ca3134ca872c6072b5>
In [6]: cluster.session.run(a)
Unexpected exception occurred in GraphActor.build_tensor_merge_graph. tensor_key='2d1101e0bd65a1ca3134ca872c6072b5'
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17966268: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc1796f818>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc179668c8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc17968408>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 446, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 450, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 457, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
return self.send((item,) + args + (kwargs,), wait=wait)
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
return self._ctx.send(self, message, wait=wait, callback=callback)
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17962378: <built-in method _send_remote of mars.actors.pool.gevent_pool.ActorRemoteHelper object at 0xc17860e58>('0.0.0.0:29790', [bytearray(b'\x05\x01 \x00\x00\x00\x00\x00\x00\x00)> failed with IndexError
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-06ec9ebba039> in <module>
----> 1 cluster.session.run(a)
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
103 return
104 else:
--> 105 return self.fetch(*tensors)
106
107 def fetch(self, *tensors):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in fetch(self, *tensors)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in <listcomp>(.0)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise(t, value, tb)
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._async_run.on_failure()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.run()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/lib/six.py in reraise()
700 value = tp()
701 if value.__traceback__ is not tb:
--> 702 raise value.with_traceback(tb)
703 raise value
704 finally:
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/scheduler/graph.py in fetch_tensor()
64
65 graph_actor = self.ctx.actor_ref(GraphActor.gen_name(session_id, graph_key))
---> 66 fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
67
68 if len(fetch_graph) == 1 and isinstance(next(fetch_graph.iter_nodes()).op, TensorFetch):
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.__getattr__._mt_call()
62 return self.tell((item,) + args + (kwargs,), delay=delay, wait=wait)
63 else:
---> 64 return self.send((item,) + args + (kwargs,), wait=wait)
65
66 return _mt_call
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.send()
34
35 cpdef object send(self, object message, bint wait=True, object callback=None):
---> 36 return self._ctx.send(self, message, wait=wait, callback=callback)
37
38 cpdef object tell(self, object message, object delay=None, bint wait=True,
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorContext.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._dispatch()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send_process()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.AsyncHandler.submit()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/utils.py in _wrapped()
296 def _wrapped(*args, **kwargs):
297 try:
--> 298 return func(*args, **kwargs)
299 except: # noqa: E722
300 kwcopy = kwargs.copy()
~/Documents/mars_dev/mars/mars/scheduler/graph.py in build_tensor_merge_graph()
817 from ..tensor.expressions.merge.concatenate import TensorConcatenate
818
--> 819 tiled_tensor = self._get_tensor_by_key(tensor_key)
820 graph = DAG()
821 if len(tiled_tensor.chunks) == 1:
~/Documents/mars_dev/mars/mars/scheduler/graph.py in _get_tensor_by_key()
802 def _get_tensor_by_key(self, key):
803 tid = self._tensor_key_to_opid[key]
--> 804 return self._tensor_key_opid_to_tiled[(key, tid)][-1]
805
806 @log_unhandled
IndexError: list index out of range
|
IndexError
|
def _new_entities(
self, inputs, shape, chunks=None, nsplits=None, output_limit=None, kws=None, **kw
):
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "_update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if not np.isinf(output_limit) and len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
entities = []
raw_chunks = chunks
raw_nsplits = nsplits
for j, s in enumerate(shape):
create_tensor_kw = kw.copy()
if kws:
create_tensor_kw.update(kws[j])
chunks = create_tensor_kw.pop("chunks", raw_chunks)
nsplits = create_tensor_kw.pop("nsplits", raw_nsplits)
entity = self._create_entity(j, s, nsplits, chunks, **create_tensor_kw)
entities.append(entity)
setattr(self, "outputs", entities)
if len(entities) > 1:
# for each output tensor, hold the reference to the other outputs
# so that either no one or everyone are gc collected
for j, t in enumerate(entities):
t.data._siblings = [
tensor.data for tensor in entities[:j] + entities[j + 1 :]
]
return entities
|
def _new_entities(
self, inputs, shape, chunks=None, nsplits=None, output_limit=None, kws=None, **kw
):
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if not np.isinf(output_limit) and len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
entities = []
raw_chunks = chunks
raw_nsplits = nsplits
for j, s in enumerate(shape):
create_tensor_kw = kw.copy()
if kws:
create_tensor_kw.update(kws[j])
chunks = create_tensor_kw.pop("chunks", raw_chunks)
nsplits = create_tensor_kw.pop("nsplits", raw_nsplits)
entity = self._create_entity(j, s, nsplits, chunks, **create_tensor_kw)
entities.append(entity)
setattr(self, "outputs", entities)
if len(entities) > 1:
# for each output tensor, hold the reference to the other outputs
# so that either no one or everyone are gc collected
for j, t in enumerate(entities):
t.data._siblings = [
tensor.data for tensor in entities[:j] + entities[j + 1 :]
]
return entities
|
https://github.com/mars-project/mars/issues/297
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='100M')
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 0.104858GB of memory.
/Users/travis/build/wesm/crossbow/arrow/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
In [3]: import mars.tensor as mt
In [4]: a = mt.random.rand(10, 10, chunk_size=3)
In [5]: a.tiles()
Out[5]: Tensor <op=TensorRand, shape=(10, 10), key=2d1101e0bd65a1ca3134ca872c6072b5>
In [6]: cluster.session.run(a)
Unexpected exception occurred in GraphActor.build_tensor_merge_graph. tensor_key='2d1101e0bd65a1ca3134ca872c6072b5'
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17966268: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc1796f818>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc179668c8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0xc17968408>> failed with IndexError
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 446, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 450, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 457, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/lib/six.py", line 702, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 66, in fetch_tensor
fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
File "mars/actors/core.pyx", line 64, in mars.actors.core.ActorRef.__getattr__._mt_call
return self.send((item,) + args + (kwargs,), wait=wait)
File "mars/actors/core.pyx", line 36, in mars.actors.core.ActorRef.send
return self._ctx.send(self, message, wait=wait, callback=callback)
File "mars/actors/pool/gevent_pool.pyx", line 179, in mars.actors.pool.gevent_pool.ActorContext.send
File "mars/actors/pool/gevent_pool.pyx", line 754, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 755, in mars.actors.pool.gevent_pool.Communicator.send
File "mars/actors/pool/gevent_pool.pyx", line 750, in mars.actors.pool.gevent_pool.Communicator._send
File "mars/actors/pool/gevent_pool.pyx", line 659, in mars.actors.pool.gevent_pool.Communicator._dispatch
File "mars/actors/pool/gevent_pool.pyx", line 730, in mars.actors.pool.gevent_pool.Communicator._send_process
File "mars/actors/pool/gevent_pool.pyx", line 278, in mars.actors.pool.gevent_pool.AsyncHandler.submit
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 942, in mars.actors.pool.gevent_pool.Communicator._on_receive_send
File "mars/actors/pool/gevent_pool.pyx", line 73, in mars.actors.pool.gevent_pool.MessageContext.result
File "src/gevent/event.py", line 344, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 268, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 296, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 286, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 266, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.7/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
cpdef on_receive(self, message):
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
return getattr(self, method)(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 819, in build_tensor_merge_graph
tiled_tensor = self._get_tensor_by_key(tensor_key)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 804, in _get_tensor_by_key
return self._tensor_key_opid_to_tiled[(key, tid)][-1]
IndexError: list index out of range
2019-03-18T03:29:21Z <Greenlet at 0xc17962378: <built-in method _send_remote of mars.actors.pool.gevent_pool.ActorRemoteHelper object at 0xc17860e58>('0.0.0.0:29790', [bytearray(b'\x05\x01 \x00\x00\x00\x00\x00\x00\x00)> failed with IndexError
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-6-06ec9ebba039> in <module>
----> 1 cluster.session.run(a)
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
103 return
104 else:
--> 105 return self.fetch(*tensors)
106
107 def fetch(self, *tensors):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in fetch(self, *tensors)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/Documents/mars_dev/mars/mars/deploy/local/session.py in <listcomp>(.0)
116 future = self._api.fetch_data(self._session_id, graph_key, key, wait=False)
117 futures.append(future)
--> 118 return [dataserializer.loads(f.result()) for f in futures]
119
120 def decref(self, *keys):
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise(t, value, tb)
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._async_run.on_failure()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/miniconda3/lib/python3.7/site-packages/gevent/_greenlet.cpython-37m-darwin.so in gevent._greenlet.Greenlet.run()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote()
~/Documents/mars_dev/mars/mars/lib/six.py in reraise()
700 value = tp()
701 if value.__traceback__ is not tb:
--> 702 raise value.with_traceback(tb)
703 raise value
704 finally:
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/scheduler/graph.py in fetch_tensor()
64
65 graph_actor = self.ctx.actor_ref(GraphActor.gen_name(session_id, graph_key))
---> 66 fetch_graph = deserialize_graph(graph_actor.build_tensor_merge_graph(tensor_key))
67
68 if len(fetch_graph) == 1 and isinstance(next(fetch_graph.iter_nodes()).op, TensorFetch):
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.__getattr__._mt_call()
62 return self.tell((item,) + args + (kwargs,), delay=delay, wait=wait)
63 else:
---> 64 return self.send((item,) + args + (kwargs,), wait=wait)
65
66 return _mt_call
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.ActorRef.send()
34
35 cpdef object send(self, object message, bint wait=True, object callback=None):
---> 36 return self._ctx.send(self, message, wait=wait, callback=callback)
37
38 cpdef object tell(self, object message, object delay=None, bint wait=True,
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorContext.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator.send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._dispatch()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._send_process()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.AsyncHandler.submit()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.Communicator._on_receive_send()
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.MessageContext.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.result()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult.get()
~/miniconda3/lib/python3.7/site-packages/gevent/_event.cpython-37m-darwin.so in gevent._event.AsyncResult._raise_exception()
~/miniconda3/lib/python3.7/site-packages/gevent/_compat.py in reraise()
45 def reraise(t, value, tb=None): # pylint:disable=unused-argument
46 if value.__traceback__ is not tb and tb is not None:
---> 47 raise value.with_traceback(tb)
48 raise value
49 def exc_clear():
~/Documents/mars_dev/mars/mars/actors/pool/gevent_pool.pyx in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run()
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
106
107 cdef class FunctionActor(Actor):
--> 108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
110 return getattr(self, method)(*args, **kwargs)
~/Documents/mars_dev/mars/mars/actors/core.pyx in mars.actors.core.FunctionActor.on_receive()
108 cpdef on_receive(self, message):
109 method, args, kwargs = message[0], message[1:-1], message[-1]
--> 110 return getattr(self, method)(*args, **kwargs)
111
112
~/Documents/mars_dev/mars/mars/utils.py in _wrapped()
296 def _wrapped(*args, **kwargs):
297 try:
--> 298 return func(*args, **kwargs)
299 except: # noqa: E722
300 kwcopy = kwargs.copy()
~/Documents/mars_dev/mars/mars/scheduler/graph.py in build_tensor_merge_graph()
817 from ..tensor.expressions.merge.concatenate import TensorConcatenate
818
--> 819 tiled_tensor = self._get_tensor_by_key(tensor_key)
820 graph = DAG()
821 if len(tiled_tensor.chunks) == 1:
~/Documents/mars_dev/mars/mars/scheduler/graph.py in _get_tensor_by_key()
802 def _get_tensor_by_key(self, key):
803 tid = self._tensor_key_to_opid[key]
--> 804 return self._tensor_key_opid_to_tiled[(key, tid)][-1]
805
806 @log_unhandled
IndexError: list index out of range
|
IndexError
|
def _build_elementwise(op):
def _handle(ctx, chunk):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True
)
if isinstance(op, six.string_types):
func = getattr(xp, op)
else:
func = op
with device(device_id):
kw = {"casting": chunk.op.casting} if chunk.op.out else {}
if chunk.op.out and chunk.op.where:
inputs, kw["out"], kw["where"] = (
inputs[:-2],
inputs[-2].copy(),
inputs[-1],
)
elif chunk.op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
elif chunk.op.where:
inputs, kw["where"] = inputs[:-1], inputs[-1]
with np.errstate(**chunk.op.err):
if len(inputs) == 1:
try:
ctx[chunk.key] = _handle_out_dtype(
func(inputs[0], **kw), chunk.op.dtype
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, func(inputs[0]), out), chunk.op.dtype
)
else:
try:
if is_sparse_module(xp):
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
else:
if "out" not in kw:
dest_value = xp.empty(chunk.shape, chunk.dtype)
kw["out"] = dest_value
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(
where, reduce(lambda a, b: func(a, b), inputs), out
),
chunk.op.dtype,
)
return _handle
|
def _build_elementwise(op):
def _handle(ctx, chunk):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True
)
if isinstance(op, six.string_types):
func = getattr(xp, op)
else:
func = op
with device(device_id):
kw = {"casting": chunk.op.casting} if chunk.op.out else {}
if chunk.op.out and chunk.op.where:
inputs, kw["out"], kw["where"] = (
inputs[:-2],
inputs[-2].copy(),
inputs[-1],
)
elif chunk.op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
with np.errstate(**chunk.op.err):
if len(inputs) == 1:
try:
ctx[chunk.key] = _handle_out_dtype(
func(inputs[0], **kw), chunk.op.dtype
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, func(inputs[0]), out), chunk.op.dtype
)
else:
try:
if is_sparse_module(xp):
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
else:
if "out" not in kw:
dest_value = xp.empty(chunk.shape, chunk.dtype)
kw["out"] = dest_value
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(
where, reduce(lambda a, b: func(a, b), inputs), out
),
chunk.op.dtype,
)
return _handle
|
https://github.com/mars-project/mars/issues/282
|
In [1]: import numpy as np
In [2]: import mars.tensor as mt
In [3]: a = np.array([[0, -2, -1], [-3, 0, 0]])
In [4]: mt.absolute(a, where=a > -2).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-f56bac3cbb7e> in <module>
----> 1 mt.absolute(a, where=a > -2).execute()
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
362
363 def execute(self, session=None, **kw):
--> 364 return self._data.execute(session, **kw)
365
366
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
513 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
514 print_progress=print_progress, mock=mock,
--> 515 sparse_mock_percent=sparse_mock_percent)
516
517 results = self._chunk_result
~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, sparse_mock_percent)
447 print_progress=print_progress, mock=mock,
448 sparse_mock_percent=sparse_mock_percent)
--> 449 res = graph_execution.execute(True)
450 if mock:
451 self._chunk_result.clear()
~/Workspace/mars/mars/executor.py in execute(self, retval)
369 # wait until all the futures completed
370 for future in executed_futures:
--> 371 future.result()
372
373 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Workspace/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263 executed_chunk_keys.update([c.key for c in first_op.outputs])
264 op_keys.add(first_op.key)
~/Workspace/mars/mars/executor.py in handle(cls, chunk, results, mock)
433 def handle(cls, chunk, results, mock=False):
434 if not mock:
--> 435 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
436 else:
437 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Workspace/mars/mars/tensor/execution/arithmetic.py in _handle(ctx, chunk)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
~/Workspace/mars/mars/tensor/execution/arithmetic.py in <lambda>(a, b)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
ValueError: cannot specify 'out' as both a positional and keyword argument
|
ValueError
|
def _handle(ctx, chunk):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True
)
if isinstance(op, six.string_types):
func = getattr(xp, op)
else:
func = op
with device(device_id):
kw = {"casting": chunk.op.casting} if chunk.op.out else {}
if chunk.op.out and chunk.op.where:
inputs, kw["out"], kw["where"] = inputs[:-2], inputs[-2].copy(), inputs[-1]
elif chunk.op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
elif chunk.op.where:
inputs, kw["where"] = inputs[:-1], inputs[-1]
with np.errstate(**chunk.op.err):
if len(inputs) == 1:
try:
ctx[chunk.key] = _handle_out_dtype(
func(inputs[0], **kw), chunk.op.dtype
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, func(inputs[0]), out), chunk.op.dtype
)
else:
try:
if is_sparse_module(xp):
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
else:
if "out" not in kw:
dest_value = xp.empty(chunk.shape, chunk.dtype)
kw["out"] = dest_value
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, reduce(lambda a, b: func(a, b), inputs), out),
chunk.op.dtype,
)
|
def _handle(ctx, chunk):
inputs, device_id, xp = as_same_device(
[ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True
)
if isinstance(op, six.string_types):
func = getattr(xp, op)
else:
func = op
with device(device_id):
kw = {"casting": chunk.op.casting} if chunk.op.out else {}
if chunk.op.out and chunk.op.where:
inputs, kw["out"], kw["where"] = inputs[:-2], inputs[-2].copy(), inputs[-1]
elif chunk.op.out:
inputs, kw["out"] = inputs[:-1], inputs[-1].copy()
with np.errstate(**chunk.op.err):
if len(inputs) == 1:
try:
ctx[chunk.key] = _handle_out_dtype(
func(inputs[0], **kw), chunk.op.dtype
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, func(inputs[0]), out), chunk.op.dtype
)
else:
try:
if is_sparse_module(xp):
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
else:
if "out" not in kw:
dest_value = xp.empty(chunk.shape, chunk.dtype)
kw["out"] = dest_value
ctx[chunk.key] = _handle_out_dtype(
reduce(lambda a, b: func(a, b, **kw), inputs),
chunk.op.dtype,
)
except TypeError:
if kw.get("where") is None:
raise
out, where = kw.pop("out"), kw.pop("where")
ctx[chunk.key] = _handle_out_dtype(
xp.where(where, reduce(lambda a, b: func(a, b), inputs), out),
chunk.op.dtype,
)
|
https://github.com/mars-project/mars/issues/282
|
In [1]: import numpy as np
In [2]: import mars.tensor as mt
In [3]: a = np.array([[0, -2, -1], [-3, 0, 0]])
In [4]: mt.absolute(a, where=a > -2).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-f56bac3cbb7e> in <module>
----> 1 mt.absolute(a, where=a > -2).execute()
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
362
363 def execute(self, session=None, **kw):
--> 364 return self._data.execute(session, **kw)
365
366
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
513 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
514 print_progress=print_progress, mock=mock,
--> 515 sparse_mock_percent=sparse_mock_percent)
516
517 results = self._chunk_result
~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, sparse_mock_percent)
447 print_progress=print_progress, mock=mock,
448 sparse_mock_percent=sparse_mock_percent)
--> 449 res = graph_execution.execute(True)
450 if mock:
451 self._chunk_result.clear()
~/Workspace/mars/mars/executor.py in execute(self, retval)
369 # wait until all the futures completed
370 for future in executed_futures:
--> 371 future.result()
372
373 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Workspace/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263 executed_chunk_keys.update([c.key for c in first_op.outputs])
264 op_keys.add(first_op.key)
~/Workspace/mars/mars/executor.py in handle(cls, chunk, results, mock)
433 def handle(cls, chunk, results, mock=False):
434 if not mock:
--> 435 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
436 else:
437 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Workspace/mars/mars/tensor/execution/arithmetic.py in _handle(ctx, chunk)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
~/Workspace/mars/mars/tensor/execution/arithmetic.py in <lambda>(a, b)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
ValueError: cannot specify 'out' as both a positional and keyword argument
|
ValueError
|
def _call(self, x1, x2, out=None, where=None):
# if x1 or x2 is scalar, and out is none, to constant
if (np.isscalar(x1) or np.isscalar(x2)) and not out and not where:
return self.to_constant(x1, x2)
x1, x2, out, where = self._process_inputs(x1, x2, out, where)
# check broadcast
shape = broadcast_shape(x1.shape, x2.shape)
t = self.new_tensor([x1, x2, out, where], shape)
if out is None:
return t
check_out_param(out, t, getattr(self, "_casting"))
out_shape, out_dtype = out.shape, out.dtype
# if `out` is specified, use out's dtype and shape
if t.shape != out_shape:
t = self.new_tensor([x1, x2, out, where], out_shape)
setattr(self, "_dtype", out_dtype)
out.data = t.data
return out
|
def _call(self, x1, x2, out=None, where=None):
# if x1 or x2 is scalar, and out is none, to constant
if (np.isscalar(x1) or np.isscalar(x2)) and not out:
return self.to_constant(x1, x2)
x1, x2, out, where = self._process_inputs(x1, x2, out, where)
# check broadcast
shape = broadcast_shape(x1.shape, x2.shape)
t = self.new_tensor([x1, x2, out, where], shape)
if out is None:
return t
check_out_param(out, t, getattr(self, "_casting"))
out_shape, out_dtype = out.shape, out.dtype
# if `out` is specified, use out's dtype and shape
if t.shape != out_shape:
t = self.new_tensor([x1, x2, out, where], out_shape)
setattr(self, "_dtype", out_dtype)
out.data = t.data
return out
|
https://github.com/mars-project/mars/issues/282
|
In [1]: import numpy as np
In [2]: import mars.tensor as mt
In [3]: a = np.array([[0, -2, -1], [-3, 0, 0]])
In [4]: mt.absolute(a, where=a > -2).execute()
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-f56bac3cbb7e> in <module>
----> 1 mt.absolute(a, where=a > -2).execute()
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
362
363 def execute(self, session=None, **kw):
--> 364 return self._data.execute(session, **kw)
365
366
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
168 if session is None:
169 session = Session.default_or_local()
--> 170 return session.run(self, **kw)
171
172 def fetch(self, session=None, **kw):
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
513 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
514 print_progress=print_progress, mock=mock,
--> 515 sparse_mock_percent=sparse_mock_percent)
516
517 results = self._chunk_result
~/Workspace/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, sparse_mock_percent)
447 print_progress=print_progress, mock=mock,
448 sparse_mock_percent=sparse_mock_percent)
--> 449 res = graph_execution.execute(True)
450 if mock:
451 self._chunk_result.clear()
~/Workspace/mars/mars/executor.py in execute(self, retval)
369 # wait until all the futures completed
370 for future in executed_futures:
--> 371 future.result()
372
373 if retval:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Workspace/mars/mars/executor.py in _execute_operand(self, op)
260 # so we pass the first operand's first output to Executor.handle
261 first_op = ops[0]
--> 262 Executor.handle(first_op.outputs[0], results, self._mock)
263 executed_chunk_keys.update([c.key for c in first_op.outputs])
264 op_keys.add(first_op.key)
~/Workspace/mars/mars/executor.py in handle(cls, chunk, results, mock)
433 def handle(cls, chunk, results, mock=False):
434 if not mock:
--> 435 return cls._get_op_runner(chunk, cls._op_runners)(results, chunk)
436 else:
437 return cls._get_op_runner(chunk, cls._op_size_estimators)(results, chunk)
~/Workspace/mars/mars/tensor/execution/arithmetic.py in _handle(ctx, chunk)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
~/Workspace/mars/mars/tensor/execution/arithmetic.py in <lambda>(a, b)
169 dest_value = xp.empty(chunk.shape, chunk.dtype)
170 kw['out'] = dest_value
--> 171 ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),
172 chunk.op.dtype)
173 except TypeError:
ValueError: cannot specify 'out' as both a positional and keyword argument
|
ValueError
|
def _collect_operand_io_meta(graph, chunks):
# collect operand i/o information
predecessor_keys = set()
successor_keys = set()
input_chunk_keys = set()
shared_input_chunk_keys = set()
chunk_keys = set()
for c in chunks:
# handling predecessor args
for pn in graph.iter_predecessors(c):
predecessor_keys.add(pn.op.key)
input_chunk_keys.add(pn.key)
if graph.count_successors(pn) > 1:
shared_input_chunk_keys.add(pn.key)
# handling successor args
for sn in graph.iter_successors(c):
successor_keys.add(sn.op.key)
chunk_keys.update(co.key for co in c.op.outputs)
io_meta = dict(
predecessors=set(predecessor_keys),
successors=set(successor_keys),
input_chunks=set(input_chunk_keys),
shared_input_chunks=set(shared_input_chunk_keys),
chunks=set(chunk_keys),
)
return io_meta
|
def _collect_operand_io_meta(graph, chunks):
# collect operand i/o information
predecessor_keys = set()
successor_keys = set()
input_chunk_keys = set()
shared_input_chunk_keys = set()
chunk_keys = set()
for c in chunks:
# handling predecessor args
for pn in graph.iter_predecessors(c):
predecessor_keys.add(pn.op.key)
input_chunk_keys.add(pn.key)
if graph.count_successors(pn) > 1:
shared_input_chunk_keys.add(pn.key)
# handling successor args
for sn in graph.iter_successors(c):
successor_keys.add(sn.op.key)
chunk_keys.update(co.key for co in c.op.outputs)
io_meta = dict(
predecessors=list(predecessor_keys),
successors=list(successor_keys),
input_chunks=list(input_chunk_keys),
shared_input_chunks=list(shared_input_chunk_keys),
chunks=list(chunk_keys),
)
return io_meta
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def create_operand_actors(self, _clean_io_meta=True, _start=True):
"""
Create operand actors for all operands
"""
logger.debug("Creating operand actors for graph %s", self._graph_key)
chunk_graph = self.get_chunk_graph()
operand_infos = self._operand_infos
op_refs = dict()
initial_keys = []
for op_key in self._op_key_to_chunk:
chunks = self._op_key_to_chunk[op_key]
op = chunks[0].op
op_name = type(op).__name__
op_info = operand_infos[op_key]
io_meta = self._collect_operand_io_meta(chunk_graph, chunks)
op_info["op_name"] = op_name
op_info["io_meta"] = io_meta
if io_meta["predecessors"]:
state = "UNSCHEDULED"
else:
initial_keys.append(op_key)
state = "READY"
op_info["retries"] = 0
op_info["state"] = state
position = None
if op_key in self._terminal_chunk_op_tensor:
position = OperandPosition.TERMINAL
elif not io_meta["predecessors"]:
position = OperandPosition.INITIAL
op_cls = get_operand_actor_class(type(op))
op_uid = op_cls.gen_uid(self._session_id, op_key)
scheduler_addr = self.get_scheduler(op_uid)
# if operand actor exists, the behavior depends on the existing operand state.
op_ref = self.ctx.actor_ref(op_uid, address=scheduler_addr)
if self.ctx.has_actor(op_ref):
op_ref.append_graph(self._graph_key, op_info, position=position)
op_refs[op_key] = op_ref
else:
op_refs[op_key] = self.ctx.create_actor(
op_cls,
self._session_id,
self._graph_key,
op_key,
op_info,
position=position,
uid=op_uid,
address=scheduler_addr,
wait=False,
)
op_info["state"] = getattr(OperandState, state.upper())
if _clean_io_meta:
del op_info["io_meta"]
self.state = GraphState.RUNNING
if _start:
op_refs = dict(
(k, v) if isinstance(v, ActorRef) else (k, v.result())
for k, v in op_refs.items()
)
start_futures = [
op_refs[op_key].start_operand(_tell=True, _wait=False)
for op_key in initial_keys
]
[future.result() for future in start_futures]
|
def create_operand_actors(self, _clean_io_meta=True, _start=True):
"""
Create operand actors for all operands
"""
logger.debug("Creating operand actors for graph %s", self._graph_key)
chunk_graph = self.get_chunk_graph()
operand_infos = self._operand_infos
op_refs = dict()
initial_keys = []
for op_key in self._op_key_to_chunk:
chunks = self._op_key_to_chunk[op_key]
op = chunks[0].op
op_name = type(op).__name__
op_info = operand_infos[op_key]
io_meta = self._collect_operand_io_meta(chunk_graph, chunks)
op_info["op_name"] = op_name
op_info["io_meta"] = io_meta
if io_meta["predecessors"]:
state = "UNSCHEDULED"
else:
initial_keys.append(op_key)
state = "READY"
op_info["retries"] = 0
op_info["state"] = state
position = None
if op_key in self._terminal_chunk_op_tensor:
position = OperandPosition.TERMINAL
elif not io_meta["predecessors"]:
position = OperandPosition.INITIAL
op_cls = get_operand_actor_class(type(op))
op_uid = op_cls.gen_uid(self._session_id, op_key)
scheduler_addr = self.get_scheduler(op_uid)
op_refs[op_key] = self.ctx.create_actor(
op_cls,
self._session_id,
self._graph_key,
op_key,
op_info,
position=position,
uid=op_uid,
address=scheduler_addr,
wait=False,
)
op_info["state"] = getattr(OperandState, state.upper())
if _clean_io_meta:
del op_info["io_meta"]
self.state = GraphState.RUNNING
if _start:
op_refs = dict((k, v.result()) for k, v in op_refs.items())
start_futures = [
op_refs[op_key].start_operand(_tell=True, _wait=False)
for op_key in initial_keys
]
[future.result() for future in start_futures]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def get_operand_states(self, op_keys):
return [
self._operand_infos[k]["state"] for k in op_keys if k in self._operand_infos
]
|
def get_operand_states(self, op_keys):
return [self._operand_infos[k]["state"] for k in op_keys]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def check_operand_can_be_freed(self, succ_op_keys):
"""
Check if the data of an operand can be freed.
:param succ_op_keys: keys of successor operands
:return: True if can be freed, False if cannot. None when the result
is not determinant and we need to test later.
"""
operand_infos = self._operand_infos
for k in succ_op_keys:
if k not in operand_infos:
continue
op_info = operand_infos[k]
op_state = op_info.get("state")
if op_state not in OperandState.SUCCESSFUL_STATES:
return False
failover_state = op_info.get("failover_state")
if failover_state and failover_state not in OperandState.SUCCESSFUL_STATES:
return False
# if can be freed but blocked by an ongoing fail-over step,
# we try later.
if self._operand_free_paused:
return None
return True
|
def check_operand_can_be_freed(self, succ_op_keys):
"""
Check if the data of an operand can be freed.
:param succ_op_keys: keys of successor operands
:return: True if can be freed, False if cannot. None when the result
is not determinant and we need to test later.
"""
operand_infos = self._operand_infos
for k in succ_op_keys:
op_info = operand_infos[k]
op_state = op_info.get("state")
if op_state not in OperandState.SUCCESSFUL_STATES:
return False
failover_state = op_info.get("failover_state")
if failover_state and failover_state not in OperandState.SUCCESSFUL_STATES:
return False
# if can be freed but blocked by an ongoing fail-over step,
# we try later.
if self._operand_free_paused:
return None
return True
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def __init__(self, session_id, graph_id, op_key, op_info, worker=None, position=None):
super(BaseOperandActor, self).__init__()
self._session_id = session_id
self._graph_ids = [graph_id]
self._info = copy.deepcopy(op_info)
self._op_key = op_key
self._op_path = "/sessions/%s/operands/%s" % (self._session_id, self._op_key)
self._position = position
# worker actually assigned
self._worker = worker
self._op_name = op_info["op_name"]
self._state = self._last_state = OperandState(op_info["state"].lower())
io_meta = self._io_meta = op_info["io_meta"]
self._pred_keys = io_meta["predecessors"]
self._succ_keys = io_meta["successors"]
# set of finished predecessors, used to decide whether we should move the operand to ready
self._finish_preds = set()
# set of finished successors, used to detect whether we can do clean up
self._finish_succs = set()
# handlers of states. will be called when the state of the operand switches
# from one to another
self._state_handlers = {
OperandState.UNSCHEDULED: self._on_unscheduled,
OperandState.READY: self._on_ready,
OperandState.RUNNING: self._on_running,
OperandState.FINISHED: self._on_finished,
OperandState.FREED: self._on_freed,
OperandState.FATAL: self._on_fatal,
OperandState.CANCELLING: self._on_cancelling,
OperandState.CANCELLED: self._on_cancelled,
}
self._graph_refs = []
self._cluster_info_ref = None
self._assigner_ref = None
self._resource_ref = None
self._kv_store_ref = None
self._chunk_meta_ref = None
|
def __init__(self, session_id, graph_id, op_key, op_info, worker=None, position=None):
super(BaseOperandActor, self).__init__()
self._session_id = session_id
self._graph_id = graph_id
self._info = copy.deepcopy(op_info)
self._op_key = op_key
self._op_path = "/sessions/%s/operands/%s" % (self._session_id, self._op_key)
self._position = position
# worker actually assigned
self._worker = worker
self._op_name = op_info["op_name"]
self._state = self._last_state = OperandState(op_info["state"].lower())
io_meta = self._io_meta = op_info["io_meta"]
self._pred_keys = io_meta["predecessors"]
self._succ_keys = io_meta["successors"]
# set of finished predecessors, used to decide whether we should move the operand to ready
self._finish_preds = set()
# set of finished successors, used to detect whether we can do clean up
self._finish_succs = set()
# handlers of states. will be called when the state of the operand switches
# from one to another
self._state_handlers = {
OperandState.UNSCHEDULED: self._on_unscheduled,
OperandState.READY: self._on_ready,
OperandState.RUNNING: self._on_running,
OperandState.FINISHED: self._on_finished,
OperandState.FREED: self._on_freed,
OperandState.FATAL: self._on_fatal,
OperandState.CANCELLING: self._on_cancelling,
OperandState.CANCELLED: self._on_cancelled,
}
self._cluster_info_ref = None
self._assigner_ref = None
self._graph_ref = None
self._resource_ref = None
self._kv_store_ref = None
self._chunk_meta_ref = None
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def post_create(self):
from ..graph import GraphActor
from ..assigner import AssignerActor
from ..chunkmeta import ChunkMetaActor
from ..kvstore import KVStoreActor
from ..resource import ResourceActor
self.set_cluster_info_ref()
self._assigner_ref = self.ctx.actor_ref(AssignerActor.default_name())
self._chunk_meta_ref = self.ctx.actor_ref(ChunkMetaActor.default_name())
self._graph_refs.append(
self.get_actor_ref(GraphActor.gen_name(self._session_id, self._graph_ids[0]))
)
self._resource_ref = self.get_actor_ref(ResourceActor.default_name())
self._kv_store_ref = self.ctx.actor_ref(KVStoreActor.default_name())
if not self.ctx.has_actor(self._kv_store_ref):
self._kv_store_ref = None
|
def post_create(self):
from ..graph import GraphActor
from ..assigner import AssignerActor
from ..chunkmeta import ChunkMetaActor
from ..kvstore import KVStoreActor
from ..resource import ResourceActor
self.set_cluster_info_ref()
self._assigner_ref = self.ctx.actor_ref(AssignerActor.default_name())
self._chunk_meta_ref = self.ctx.actor_ref(ChunkMetaActor.default_name())
self._graph_ref = self.get_actor_ref(
GraphActor.gen_name(self._session_id, self._graph_id)
)
self._resource_ref = self.get_actor_ref(ResourceActor.default_name())
self._kv_store_ref = self.ctx.actor_ref(KVStoreActor.default_name())
if not self.ctx.has_actor(self._kv_store_ref):
self._kv_store_ref = None
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def state(self, value):
self._last_state = self._state
if value != self._last_state:
logger.debug(
"Operand %s(%s) state from %s to %s.",
self._op_key,
self._op_name,
self._last_state,
value,
)
self._state = value
self._info["state"] = value.name
futures = []
for graph_ref in self._graph_refs:
futures.append(
graph_ref.set_operand_state(
self._op_key, value.value, _tell=True, _wait=False
)
)
if self._kv_store_ref is not None:
futures.append(
self._kv_store_ref.write(
"%s/state" % self._op_path, value.name, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
def state(self, value):
self._last_state = self._state
if value != self._last_state:
logger.debug(
"Operand %s(%s) state from %s to %s.",
self._op_key,
self._op_name,
self._last_state,
value,
)
self._state = value
self._info["state"] = value.name
futures = [
self._graph_ref.set_operand_state(
self._op_key, value.value, _tell=True, _wait=False
),
]
if self._kv_store_ref is not None:
futures.append(
self._kv_store_ref.write(
"%s/state" % self._op_path, value.name, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def worker(self, value):
futures = []
for graph_ref in self._graph_refs:
futures.append(
graph_ref.set_operand_worker(self._op_key, value, _tell=True, _wait=False)
)
if self._kv_store_ref is not None:
if value:
futures.append(
self._kv_store_ref.write(
"%s/worker" % self._op_path, value, _tell=True, _wait=False
)
)
elif self._worker is not None:
futures.append(
self._kv_store_ref.delete(
"%s/worker" % self._op_path, silent=True, _tell=True, _wait=False
)
)
[f.result() for f in futures]
self._worker = value
|
def worker(self, value):
futures = [
self._graph_ref.set_operand_worker(self._op_key, value, _tell=True, _wait=False)
]
if self._kv_store_ref is not None:
if value:
futures.append(
self._kv_store_ref.write(
"%s/worker" % self._op_path, value, _tell=True, _wait=False
)
)
elif self._worker is not None:
futures.append(
self._kv_store_ref.delete(
"%s/worker" % self._op_path, silent=True, _tell=True, _wait=False
)
)
[f.result() for f in futures]
self._worker = value
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def add_running_predecessor(self, op_key, worker):
self._running_preds.add(op_key)
self._pred_workers.add(worker)
if len(self._pred_workers) > 1:
# we do not push when multiple workers in input
self._pred_workers = set()
self._running_preds = set()
return
if self.state != OperandState.UNSCHEDULED:
return
if all(k in self._running_preds for k in self._pred_keys):
try:
if worker in self._assigned_workers:
return
serialized_exec_graph = self._graph_refs[0].get_executable_operand_dag(
self._op_key
)
self._get_execution_ref(address=worker).enqueue_graph(
self._session_id,
self._op_key,
serialized_exec_graph,
self._io_meta,
dict(),
self._info["optimize"],
succ_keys=self._succ_keys,
pred_keys=self._pred_keys,
_promise=True,
).then(functools.partial(self._handle_worker_accept, worker))
self._assigned_workers.add(worker)
logger.debug("Pre-push operand %s into worker %s.", self._op_key, worker)
except: # noqa: E722
logger.exception("Failed to pre-push operand %s", self._op_key)
finally:
self._pred_workers = set()
self._running_preds = set()
|
def add_running_predecessor(self, op_key, worker):
self._running_preds.add(op_key)
self._pred_workers.add(worker)
if len(self._pred_workers) > 1:
# we do not push when multiple workers in input
self._pred_workers = set()
self._running_preds = set()
return
if self.state != OperandState.UNSCHEDULED:
return
if all(k in self._running_preds for k in self._pred_keys):
try:
if worker in self._assigned_workers:
return
serialized_exec_graph = self._graph_ref.get_executable_operand_dag(
self._op_key
)
self._get_execution_ref(address=worker).enqueue_graph(
self._session_id,
self._op_key,
serialized_exec_graph,
self._io_meta,
dict(),
self._info["optimize"],
succ_keys=self._succ_keys,
pred_keys=self._pred_keys,
_promise=True,
).then(functools.partial(self._handle_worker_accept, worker))
self._assigned_workers.add(worker)
logger.debug("Pre-push operand %s into worker %s.", self._op_key, worker)
except: # noqa: E722
logger.exception("Failed to pre-push operand %s", self._op_key)
finally:
self._pred_workers = set()
self._running_preds = set()
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def add_finished_successor(self, op_key):
super(OperandActor, self).add_finished_successor(op_key)
if self._position != OperandPosition.TERMINAL and all(
k in self._finish_succs for k in self._succ_keys
):
# make sure that all prior states are terminated (in case of failover)
states = []
for graph_ref in self._graph_refs:
states.extend(graph_ref.get_operand_states(self._succ_keys))
# non-terminal operand with all successors done, the data can be freed
if (
all(k in OperandState.TERMINATED_STATES for k in states)
and self._is_worker_alive()
):
self.ref().free_data(_tell=True)
|
def add_finished_successor(self, op_key):
super(OperandActor, self).add_finished_successor(op_key)
if self._position != OperandPosition.TERMINAL and all(
k in self._finish_succs for k in self._succ_keys
):
# make sure that all prior states are terminated (in case of failover)
states = self._graph_ref.get_operand_states(self._succ_keys)
# non-terminal operand with all successors done, the data can be freed
if (
all(k in OperandState.TERMINATED_STATES for k in states)
and self._is_worker_alive()
):
self.ref().free_data(_tell=True)
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def move_failover_state(self, from_states, state, new_target, dead_workers):
"""
Move the operand into new state when executing fail-over step
:param from_states: the source states the operand should be in, when not match, we stopped.
:param state: the target state to move
:param new_target: new target worker proposed for worker
:param dead_workers: list of dead workers
:return:
"""
dead_workers = set(dead_workers)
if self.state not in from_states:
logger.debug(
"From state not matching (%s not in %r), operand %s skips failover step",
self.state.name,
[s.name for s in from_states],
self._op_key,
)
return
if self.state in (OperandState.RUNNING, OperandState.FINISHED):
if state != OperandState.UNSCHEDULED and self.worker not in dead_workers:
logger.debug(
"Worker %s of operand %s still alive, skip failover step",
self.worker,
self._op_key,
)
return
elif state == OperandState.RUNNING:
# move running operand in dead worker to ready
state = OperandState.READY
if new_target and self._target_worker != new_target:
logger.debug("Target worker of %s reassigned to %s", self._op_key, new_target)
self._target_worker = new_target
self._info["target_worker"] = new_target
target_updated = True
else:
target_updated = False
if self.state == state == OperandState.READY:
if not self._target_worker:
if self._assigned_workers - dead_workers:
logger.debug(
"Operand %s still have alive workers assigned %r, skip failover step",
self._op_key,
list(self._assigned_workers - dead_workers),
)
return
else:
if not target_updated and self._target_worker not in dead_workers:
logger.debug(
"Target of operand %s (%s) not dead, skip failover step",
self._op_key,
self._target_worker,
)
return
if dead_workers:
futures = []
# remove executed traces in neighbor operands
for out_key in self._succ_keys:
futures.append(
self._get_operand_actor(out_key).remove_finished_predecessor(
self._op_key, _tell=True, _wait=False
)
)
for in_key in self._pred_keys:
futures.append(
self._get_operand_actor(in_key).remove_finished_successor(
self._op_key, _tell=True, _wait=False
)
)
if self._position == OperandPosition.TERMINAL:
for graph_ref in self._graph_refs:
futures.append(
graph_ref.remove_finished_terminal(
self._op_key, _tell=True, _wait=False
)
)
[f.result() for f in futures]
# actual start the new state
self.start_operand(state)
|
def move_failover_state(self, from_states, state, new_target, dead_workers):
"""
Move the operand into new state when executing fail-over step
:param from_states: the source states the operand should be in, when not match, we stopped.
:param state: the target state to move
:param new_target: new target worker proposed for worker
:param dead_workers: list of dead workers
:return:
"""
dead_workers = set(dead_workers)
if self.state not in from_states:
logger.debug(
"From state not matching (%s not in %r), operand %s skips failover step",
self.state.name,
[s.name for s in from_states],
self._op_key,
)
return
if self.state in (OperandState.RUNNING, OperandState.FINISHED):
if state != OperandState.UNSCHEDULED and self.worker not in dead_workers:
logger.debug(
"Worker %s of operand %s still alive, skip failover step",
self.worker,
self._op_key,
)
return
elif state == OperandState.RUNNING:
# move running operand in dead worker to ready
state = OperandState.READY
if new_target and self._target_worker != new_target:
logger.debug("Target worker of %s reassigned to %s", self._op_key, new_target)
self._target_worker = new_target
self._info["target_worker"] = new_target
target_updated = True
else:
target_updated = False
if self.state == state == OperandState.READY:
if not self._target_worker:
if self._assigned_workers - dead_workers:
logger.debug(
"Operand %s still have alive workers assigned %r, skip failover step",
self._op_key,
list(self._assigned_workers - dead_workers),
)
return
else:
if not target_updated and self._target_worker not in dead_workers:
logger.debug(
"Target of operand %s (%s) not dead, skip failover step",
self._op_key,
self._target_worker,
)
return
if dead_workers:
futures = []
# remove executed traces in neighbor operands
for out_key in self._succ_keys:
futures.append(
self._get_operand_actor(out_key).remove_finished_predecessor(
self._op_key, _tell=True, _wait=False
)
)
for in_key in self._pred_keys:
futures.append(
self._get_operand_actor(in_key).remove_finished_successor(
self._op_key, _tell=True, _wait=False
)
)
if self._position == OperandPosition.TERMINAL:
futures.append(
self._graph_ref.remove_finished_terminal(
self._op_key, _tell=True, _wait=False
)
)
[f.result() for f in futures]
# actual start the new state
self.start_operand(state)
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def free_data(self, state=OperandState.FREED):
"""
Free output data of current operand
:param state: target state
"""
if self.state == OperandState.FREED:
return
if state == OperandState.CANCELLED:
can_be_freed = True
else:
can_be_freed_states = [
graph_ref.check_operand_can_be_freed(self._succ_keys)
for graph_ref in self._graph_refs
]
if None in can_be_freed_states:
can_be_freed = None
else:
can_be_freed = all(can_be_freed_states)
if can_be_freed is None:
self.ref().free_data(state, _delay=1, _tell=True)
return
elif not can_be_freed:
return
self.start_operand(state)
endpoint_lists = self._chunk_meta_ref.batch_get_workers(
self._session_id, self._chunks
)
futures = []
for chunk_key, endpoints in zip(self._chunks, endpoint_lists):
if endpoints is None:
continue
for ep in endpoints:
futures.append((self._free_worker_data(ep, chunk_key), ep))
dead_workers = []
for f, ep in futures:
try:
with rewrite_worker_errors():
f.result()
except WorkerDead:
dead_workers.append(ep)
if dead_workers:
self._resource_ref.detach_dead_workers(list(dead_workers), _tell=True)
self._assigned_workers.difference_update(dead_workers)
self._chunk_meta_ref.batch_delete_meta(self._session_id, self._chunks, _tell=True)
|
def free_data(self, state=OperandState.FREED):
"""
Free output data of current operand
:param state: target state
"""
if self.state == OperandState.FREED:
return
if state == OperandState.CANCELLED:
can_be_freed = True
else:
can_be_freed = self._graph_ref.check_operand_can_be_freed(self._succ_keys)
if can_be_freed is None:
self.ref().free_data(state, _delay=1, _tell=True)
return
elif not can_be_freed:
return
self.start_operand(state)
endpoint_lists = self._chunk_meta_ref.batch_get_workers(
self._session_id, self._chunks
)
futures = []
for chunk_key, endpoints in zip(self._chunks, endpoint_lists):
if endpoints is None:
continue
for ep in endpoints:
futures.append((self._free_worker_data(ep, chunk_key), ep))
dead_workers = []
for f, ep in futures:
try:
with rewrite_worker_errors():
f.result()
except WorkerDead:
dead_workers.append(ep)
if dead_workers:
self._resource_ref.detach_dead_workers(list(dead_workers), _tell=True)
self._assigned_workers.difference_update(dead_workers)
self._chunk_meta_ref.batch_delete_meta(self._session_id, self._chunks, _tell=True)
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def _handle_worker_accept(self, worker):
def _dequeue_worker(endpoint, wait=True):
try:
with rewrite_worker_errors():
return self._get_execution_ref(address=endpoint).dequeue_graph(
self._session_id, self._op_key, _tell=True, _wait=wait
)
finally:
self._assigned_workers.difference_update((worker,))
if self._position == OperandPosition.INITIAL:
new_worker = self._graph_refs[0].get_operand_target_worker(self._op_key)
if new_worker and new_worker != self._target_worker:
logger.debug(
"Cancelling running operand %s on %s, new_target %s",
self._op_key,
worker,
new_worker,
)
_dequeue_worker(worker)
return
if (self.worker and self.worker != worker) or (
self._target_worker and worker != self._target_worker
):
logger.debug(
"Cancelling running operand %s on %s, op_worker %s, op_target %s",
self._op_key,
worker,
self.worker,
self._target_worker,
)
_dequeue_worker(worker)
return
elif self.worker is not None:
logger.debug("Worker for operand %s already assigned", self._op_key)
return
# worker assigned, submit job
if self.state in (OperandState.CANCELLED, OperandState.CANCELLING):
self.ref().start_operand(_tell=True)
return
if worker != self.worker:
self._execution_ref = None
self.worker = worker
cancel_futures = []
for w in list(self._assigned_workers):
if w != worker:
logger.debug(
"Cancelling running operand %s on %s, when deciding to run on %s",
self._op_key,
w,
worker,
)
cancel_futures.append(_dequeue_worker(w, wait=False))
for f in cancel_futures:
with rewrite_worker_errors(ignore_error=True):
f.result()
self._assigned_workers = set()
target_predicts = self._get_target_predicts(worker)
# prepare meta broadcasts
broadcast_eps = set()
for succ_key in self._succ_keys:
broadcast_eps.add(self.get_scheduler(self.gen_uid(self._session_id, succ_key)))
broadcast_eps.difference_update({self.address})
broadcast_eps = tuple(broadcast_eps)
chunk_keys, broadcast_ep_groups = [], []
for chunk_key in self._chunks:
chunk_keys.append(chunk_key)
broadcast_ep_groups.append(broadcast_eps)
self._chunk_meta_ref.batch_set_chunk_broadcasts(
self._session_id, chunk_keys, broadcast_ep_groups, _tell=True, _wait=False
)
# submit job
logger.debug("Start running operand %s on %s", self._op_key, worker)
self._execution_ref = self._get_execution_ref()
try:
with rewrite_worker_errors():
self._execution_ref.start_execution(
self._session_id,
self._op_key,
send_addresses=target_predicts,
_promise=True,
)
except WorkerDead:
self._resource_ref.detach_dead_workers([self.worker], _tell=True)
return
# here we start running immediately to avoid accidental state change
# and potential submission
self.start_operand(OperandState.RUNNING)
|
def _handle_worker_accept(self, worker):
def _dequeue_worker(endpoint, wait=True):
try:
with rewrite_worker_errors():
return self._get_execution_ref(address=endpoint).dequeue_graph(
self._session_id, self._op_key, _tell=True, _wait=wait
)
finally:
self._assigned_workers.difference_update((worker,))
if self._position == OperandPosition.INITIAL:
new_worker = self._graph_ref.get_operand_target_worker(self._op_key)
if new_worker and new_worker != self._target_worker:
logger.debug(
"Cancelling running operand %s on %s, new_target %s",
self._op_key,
worker,
new_worker,
)
_dequeue_worker(worker)
return
if (self.worker and self.worker != worker) or (
self._target_worker and worker != self._target_worker
):
logger.debug(
"Cancelling running operand %s on %s, op_worker %s, op_target %s",
self._op_key,
worker,
self.worker,
self._target_worker,
)
_dequeue_worker(worker)
return
elif self.worker is not None:
logger.debug("Worker for operand %s already assigned", self._op_key)
return
# worker assigned, submit job
if self.state in (OperandState.CANCELLED, OperandState.CANCELLING):
self.ref().start_operand(_tell=True)
return
if worker != self.worker:
self._execution_ref = None
self.worker = worker
cancel_futures = []
for w in list(self._assigned_workers):
if w != worker:
logger.debug(
"Cancelling running operand %s on %s, when deciding to run on %s",
self._op_key,
w,
worker,
)
cancel_futures.append(_dequeue_worker(w, wait=False))
for f in cancel_futures:
with rewrite_worker_errors(ignore_error=True):
f.result()
self._assigned_workers = set()
target_predicts = self._get_target_predicts(worker)
# prepare meta broadcasts
broadcast_eps = set()
for succ_key in self._succ_keys:
broadcast_eps.add(self.get_scheduler(self.gen_uid(self._session_id, succ_key)))
broadcast_eps.difference_update({self.address})
broadcast_eps = tuple(broadcast_eps)
chunk_keys, broadcast_ep_groups = [], []
for chunk_key in self._chunks:
chunk_keys.append(chunk_key)
broadcast_ep_groups.append(broadcast_eps)
self._chunk_meta_ref.batch_set_chunk_broadcasts(
self._session_id, chunk_keys, broadcast_ep_groups, _tell=True, _wait=False
)
# submit job
logger.debug("Start running operand %s on %s", self._op_key, worker)
self._execution_ref = self._get_execution_ref()
try:
with rewrite_worker_errors():
self._execution_ref.start_execution(
self._session_id,
self._op_key,
send_addresses=target_predicts,
_promise=True,
)
except WorkerDead:
self._resource_ref.detach_dead_workers([self.worker], _tell=True)
return
# here we start running immediately to avoid accidental state change
# and potential submission
self.start_operand(OperandState.RUNNING)
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def _on_ready(self):
self.worker = None
self._execution_ref = None
# if under retry, give application a delay
delay = options.scheduler.retry_delay if self.retries else 0
# Send resource application. Submit job when worker assigned
try:
new_assignment = self._assigner_ref.get_worker_assignments(
self._session_id, self._info
)
except DependencyMissing:
logger.warning(
"DependencyMissing met, operand %s will be back to UNSCHEDULED.",
self._op_key,
)
self._assigned_workers = set()
self.ref().start_operand(OperandState.UNSCHEDULED, _tell=True)
return
chunk_sizes = self._chunk_meta_ref.batch_get_chunk_size(
self._session_id, self._input_chunks
)
if any(v is None for v in chunk_sizes):
logger.warning(
"DependencyMissing met, operand %s will be back to UNSCHEDULED.",
self._op_key,
)
self._assigned_workers = set()
self.ref().start_operand(OperandState.UNSCHEDULED, _tell=True)
return
new_assignment = [a for a in new_assignment if a not in self._assigned_workers]
self._assigned_workers.update(new_assignment)
logger.debug(
"Operand %s assigned to run on workers %r, now it has %r",
self._op_key,
new_assignment,
self._assigned_workers,
)
data_sizes = dict(zip(self._input_chunks, chunk_sizes))
dead_workers = set()
serialized_exec_graph = self._graph_refs[0].get_executable_operand_dag(self._op_key)
for worker_ep in new_assignment:
try:
with rewrite_worker_errors():
self._get_execution_ref(address=worker_ep).enqueue_graph(
self._session_id,
self._op_key,
serialized_exec_graph,
self._io_meta,
data_sizes,
self._info["optimize"],
succ_keys=self._succ_keys,
_delay=delay,
_promise=True,
).then(functools.partial(self._handle_worker_accept, worker_ep))
except WorkerDead:
logger.debug(
"Worker %s dead when submitting operand %s into queue",
worker_ep,
self._op_key,
)
dead_workers.add(worker_ep)
self._assigned_workers.difference_update([worker_ep])
if dead_workers:
self._resource_ref.detach_dead_workers(list(dead_workers), _tell=True)
if not self._assigned_workers:
self.ref().start_operand(_tell=True)
|
def _on_ready(self):
self.worker = None
self._execution_ref = None
# if under retry, give application a delay
delay = options.scheduler.retry_delay if self.retries else 0
# Send resource application. Submit job when worker assigned
try:
new_assignment = self._assigner_ref.get_worker_assignments(
self._session_id, self._info
)
except DependencyMissing:
logger.warning(
"DependencyMissing met, operand %s will be back to UNSCHEDULED.",
self._op_key,
)
self._assigned_workers = set()
self.ref().start_operand(OperandState.UNSCHEDULED, _tell=True)
return
chunk_sizes = self._chunk_meta_ref.batch_get_chunk_size(
self._session_id, self._input_chunks
)
if any(v is None for v in chunk_sizes):
logger.warning(
"DependencyMissing met, operand %s will be back to UNSCHEDULED.",
self._op_key,
)
self._assigned_workers = set()
self.ref().start_operand(OperandState.UNSCHEDULED, _tell=True)
return
new_assignment = [a for a in new_assignment if a not in self._assigned_workers]
self._assigned_workers.update(new_assignment)
logger.debug(
"Operand %s assigned to run on workers %r, now it has %r",
self._op_key,
new_assignment,
self._assigned_workers,
)
data_sizes = dict(zip(self._input_chunks, chunk_sizes))
dead_workers = set()
serialized_exec_graph = self._graph_ref.get_executable_operand_dag(self._op_key)
for worker_ep in new_assignment:
try:
with rewrite_worker_errors():
self._get_execution_ref(address=worker_ep).enqueue_graph(
self._session_id,
self._op_key,
serialized_exec_graph,
self._io_meta,
data_sizes,
self._info["optimize"],
succ_keys=self._succ_keys,
_delay=delay,
_promise=True,
).then(functools.partial(self._handle_worker_accept, worker_ep))
except WorkerDead:
logger.debug(
"Worker %s dead when submitting operand %s into queue",
worker_ep,
self._op_key,
)
dead_workers.add(worker_ep)
self._assigned_workers.difference_update([worker_ep])
if dead_workers:
self._resource_ref.detach_dead_workers(list(dead_workers), _tell=True)
if not self._assigned_workers:
self.ref().start_operand(_tell=True)
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def _on_finished(self):
if self._last_state == OperandState.CANCELLING:
self.start_operand(OperandState.CANCELLING)
return
futures = []
# update pred & succ finish records to trigger further actions
# record if successors can be executed
for out_key in self._succ_keys:
futures.append(
self._get_operand_actor(out_key).add_finished_predecessor(
self._op_key, self.worker, _tell=True, _wait=False
)
)
for in_key in self._pred_keys:
futures.append(
self._get_operand_actor(in_key).add_finished_successor(
self._op_key, _tell=True, _wait=False
)
)
# require more chunks to execute if the completion caused no successors to run
if self._position == OperandPosition.TERMINAL:
# update records in GraphActor to help decide if the whole graph finished execution
futures.extend(self._add_finished_terminal())
[f.result() for f in futures]
|
def _on_finished(self):
if self._last_state == OperandState.CANCELLING:
self.start_operand(OperandState.CANCELLING)
return
futures = []
# update pred & succ finish records to trigger further actions
# record if successors can be executed
for out_key in self._succ_keys:
futures.append(
self._get_operand_actor(out_key).add_finished_predecessor(
self._op_key, self.worker, _tell=True, _wait=False
)
)
for in_key in self._pred_keys:
futures.append(
self._get_operand_actor(in_key).add_finished_successor(
self._op_key, _tell=True, _wait=False
)
)
# require more chunks to execute if the completion caused no successors to run
if self._position == OperandPosition.TERMINAL:
# update records in GraphActor to help decide if the whole graph finished execution
futures.append(
self._graph_ref.add_finished_terminal(self._op_key, _tell=True, _wait=False)
)
[f.result() for f in futures]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def _on_fatal(self):
if self._last_state == OperandState.FATAL:
return
futures = []
if self._position == OperandPosition.TERMINAL:
# update records in GraphActor to help decide if the whole graph finished execution
futures.extend(self._add_finished_terminal(final_state=GraphState.FAILED))
# set successors to FATAL
for k in self._succ_keys:
futures.append(
self._get_operand_actor(k).stop_operand(
OperandState.FATAL, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
def _on_fatal(self):
if self._last_state == OperandState.FATAL:
return
futures = []
if self._position == OperandPosition.TERMINAL:
# update records in GraphActor to help decide if the whole graph finished execution
futures.append(
self._graph_ref.add_finished_terminal(
self._op_key, final_state=GraphState.FAILED, _tell=True, _wait=False
)
)
# set successors to FATAL
for k in self._succ_keys:
futures.append(
self._get_operand_actor(k).stop_operand(
OperandState.FATAL, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def _on_cancelled(self):
futures = []
if self._position == OperandPosition.TERMINAL:
futures.extend(self._add_finished_terminal(final_state=GraphState.CANCELLED))
for k in self._succ_keys:
futures.append(
self._get_operand_actor(k).stop_operand(
OperandState.CANCELLING, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
def _on_cancelled(self):
futures = []
if self._position == OperandPosition.TERMINAL:
futures.append(
self._graph_ref.add_finished_terminal(
self._op_key, final_state=GraphState.CANCELLED, _tell=True, _wait=False
)
)
for k in self._succ_keys:
futures.append(
self._get_operand_actor(k).stop_operand(
OperandState.CANCELLING, _tell=True, _wait=False
)
)
[f.result() for f in futures]
|
https://github.com/mars-project/mars/issues/179
|
In [1]: from mars.deploy.local import new_cluster
In [2]: import mars.tensor as mt
In [3]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=2)
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:971: Allowing the Plasma store to use up to 3.43597GB of memory.
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/store.cc:1001: Starting object store with directory /tmp and huge page support disabled
/Users/travis/miniconda3/conda-bld/arrow-cpp_1540410566532/work/cpp/src/plasma/eviction_policy.cc:85: There is not enough space to create this object, so evicting 0 objects to free up 0 bytes. The number of bytes in use (before this eviction) is 2130706432.
In [4]: a = mt.ones((3, 3), chunk_size=2)
In [5]: cluster.session.run(a)
Out[5]:
array([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]])
In [6]: cluster.session.run(a.dot(a))
Failed to start graph execution.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
cpdef object fire_run(self):
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
with self.lock:
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
raise
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
res = actor.on_receive(message_ctx.message)
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 249, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in create_operand_actors
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 679, in <genexpr>
op_refs = dict((k, v.result()) for k, v in op_refs.items())
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 409, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 984, in mars.actors.pool.gevent_pool.Communicator._on_receive_create_actor
self.pool.create_actor(message.actor_cls, message.actor_ref.uid,
File "mars/actors/pool/gevent_pool.pyx", line 241, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
raise ActorAlreadyExist('Actor {0} already exist, cannot create'.format(actor.uid))
mars.actors.errors.ActorAlreadyExist: Actor s:operator$8a307641-78a8-43a9-a612-8edca02ca309$058707656a679b3a0ab03c10d40abb0e already exist, cannot create
2019-01-24T12:05:09Z <Greenlet "Greenlet-0" at 0x119218448: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x319839688>> failed with ActorAlreadyExist
---------------------------------------------------------------------------
SystemError Traceback (most recent call last)
<ipython-input-6-d3363e4685e3> in <module>()
----> 1 cluster.session.run(a.dot(a))
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
115 # execute the non-executed tensors
116 if idx_to_run_tensors:
--> 117 execute_result = self._sess.run(*idx_to_run_tensors.values(), **kw)
118 if execute_result:
119 # fetch is True
~/Documents/mars_dev/mars/mars/deploy/local/session.py in run(self, *tensors, **kw)
80 if graph_state == GraphState.FAILED:
81 # TODO(qin): add traceback
---> 82 raise SystemError('Graph execution failed with unknown reason')
83
84 if 0 < timeout < time.time() - exec_start_time:
SystemError: Graph execution failed with unknown reason
|
SystemError
|
def fetch_tensors(self, tensors, **kw):
from .tensor.expressions.fetch import TensorFetch
results = []
to_concat_tensors = OrderedDict()
for i, tensor in enumerate(tensors):
if tensor.key not in self.stored_tensors:
# check if the tensor is executed before
raise ValueError(
"Tensor to fetch must be executed before, got {0}".format(tensor)
)
if len(tensor.chunks) == 1:
result = self._chunk_result[tensor.chunks[0].key]
results.append(result)
continue
# generate TensorFetch op for each chunk
chunks = []
for c in tensor.chunks:
op = TensorFetch(dtype=c.dtype, sparse=c.op.sparse)
chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key)
chunks.append(chunk)
new_op = TensorFetch(dtype=tensor.dtype, sparse=tensor.op.sparse)
# copy key and id to ensure that fetch tensor won't add the count of executed tensor
tensor = new_op.new_tensor(
None,
tensor.shape,
chunks=chunks,
nsplits=tensor.nsplits,
_key=tensor.key,
_id=tensor.id,
)
# add this concat tensor into the list which shall be executed later
to_concat_tensors[i] = tensor
results.append(None)
# execute the concat tensors together
if to_concat_tensors:
concat_results = self.execute_tensors(list(to_concat_tensors.values()), **kw)
for j, concat_result in zip(to_concat_tensors, concat_results):
results[j] = concat_result
return results
|
def fetch_tensors(self, tensors, **kw):
from .tensor.expressions.fetch import TensorFetch
results = []
to_concat_tensors = OrderedDict()
for i, tensor in enumerate(tensors):
if tensor.key not in self.stored_tensors:
# check if the tensor is executed before
raise ValueError(
"Tensor to fetch must be executed before, got {0}".format(tensor)
)
if len(tensor.chunks) == 1:
result = self._chunk_result[tensor.chunks[0].key]
results.append(result)
continue
# generate TensorFetch op for each chunk
chunks = []
for c in tensor.chunks:
op = TensorFetch(dtype=c.dtype)
chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key)
chunks.append(chunk)
new_op = TensorFetch(dtype=tensor.dtype)
# copy key and id to ensure that fetch tensor won't add the count of executed tensor
tensor = new_op.new_tensor(
None,
tensor.shape,
chunks=chunks,
nsplits=tensor.nsplits,
_key=tensor.key,
_id=tensor.id,
)
# add this concat tensor into the list which shall be executed later
to_concat_tensors[i] = tensor
results.append(None)
# execute the concat tensors together
if to_concat_tensors:
concat_results = self.execute_tensors(list(to_concat_tensors.values()), **kw)
for j, concat_result in zip(to_concat_tensors, concat_results):
results[j] = concat_result
return results
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def default_size_estimator(ctx, chunk, multiplier=1):
exec_size = int(sum(ctx[inp.key][0] for inp in chunk.inputs or ()) * multiplier)
total_out_size = 0
chunk_sizes = dict()
outputs = chunk.op.outputs
for out in outputs:
try:
chunk_size = out.nbytes if not out.is_sparse() else exec_size
if np.isnan(chunk_size):
raise TypeError
chunk_sizes[out.key] = chunk_size
total_out_size += chunk_size
except (AttributeError, TypeError, ValueError):
pass
exec_size = max(exec_size, total_out_size)
for out in outputs:
if out.key in ctx:
continue
if out.key in chunk_sizes:
store_size = chunk_sizes[out.key]
else:
store_size = max(
exec_size // len(outputs), total_out_size // max(len(chunk_sizes), 1)
)
try:
max_sparse_size = (
out.nbytes + np.dtype(np.int64).itemsize * np.prod(out.shape) * out.ndim
)
except TypeError: # pragma: no cover
max_sparse_size = np.nan
if not np.isnan(max_sparse_size):
store_size = min(store_size, max_sparse_size)
ctx[out.key] = (store_size, exec_size // len(outputs))
|
def default_size_estimator(ctx, chunk, multiplier=1):
exec_size = int(sum(ctx[inp.key][0] for inp in chunk.inputs or ()) * multiplier)
total_out_size = 0
chunk_sizes = dict()
outputs = chunk.op.outputs
for out in outputs:
try:
chunk_size = out.nbytes if not out.is_sparse() else exec_size
if np.isnan(chunk_size):
raise TypeError
chunk_sizes[out.key] = chunk_size
total_out_size += chunk_size
except (AttributeError, TypeError, ValueError):
pass
exec_size = max(exec_size, total_out_size)
for out in outputs:
if out.key in chunk_sizes:
store_size = chunk_sizes[out.key]
else:
store_size = max(
exec_size // len(outputs), total_out_size // max(len(chunk_sizes), 1)
)
try:
max_sparse_size = (
out.nbytes + np.dtype(np.int64).itemsize * np.prod(out.shape) * out.ndim
)
except TypeError: # pragma: no cover
max_sparse_size = np.nan
if not np.isnan(max_sparse_size):
store_size = min(store_size, max_sparse_size)
ctx[out.key] = (store_size, exec_size // len(outputs))
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def get_executable_operand_dag(self, op_key, serialize=True):
"""
Make an operand into a worker-executable dag
:param op_key: operand key
:param serialize: whether to return serialized dag
"""
graph = DAG()
inputs_to_copied = dict()
for c in self._op_key_to_chunk[op_key]:
for inp in set(c.inputs or ()):
op = TensorFetch(dtype=inp.dtype, sparse=inp.op.sparse)
inp_chunk = op.new_chunk(None, inp.shape, _key=inp.key).data
inputs_to_copied[inp] = inp_chunk
graph.add_node(inp_chunk)
inputs = [inputs_to_copied[inp] for inp in (c.inputs or ())]
new_op = c.op.copy()
kws = []
for o in c.op.outputs:
kw = dict(_key=o.key, dtype=o.dtype, index=o.index)
composed = []
# copy composed
for j, com in enumerate(o.composed or []):
new_com_op = com.op.copy()
if j == 0:
inps = inputs
else:
# if more than 1 inputs, means they are exactly the same object
inps = [composed[j - 1]] * len(com.inputs)
new_com = new_com_op.new_chunk(
inps, com.shape, index=com.index, dtype=com.dtype, _key=com.key
)
composed.append(new_com)
kw["_composed"] = composed
kws.append(kw)
new_outputs = new_op.new_chunks(
inputs, [o.shape for o in c.op.outputs], kws=kws
)
for co in new_outputs:
exec_chunk = co.data
graph.add_node(exec_chunk)
for inp in inputs:
graph.add_edge(inp, exec_chunk)
if serialize:
return serialize_graph(graph)
else:
return graph
|
def get_executable_operand_dag(self, op_key, serialize=True):
"""
Make an operand into a worker-executable dag
:param op_key: operand key
:param serialize: whether to return serialized dag
"""
graph = DAG()
inputs_to_copied = dict()
for c in self._op_key_to_chunk[op_key]:
for inp in set(c.inputs or ()):
op = TensorFetch(dtype=inp.dtype)
inp_chunk = op.new_chunk(None, inp.shape, _key=inp.key).data
inputs_to_copied[inp] = inp_chunk
graph.add_node(inp_chunk)
inputs = [inputs_to_copied[inp] for inp in (c.inputs or ())]
new_op = c.op.copy()
kws = []
for o in c.op.outputs:
kw = dict(_key=o.key, dtype=o.dtype, index=o.index)
composed = []
# copy composed
for j, com in enumerate(o.composed or []):
new_com_op = com.op.copy()
if j == 0:
inps = inputs
else:
# if more than 1 inputs, means they are exactly the same object
inps = [composed[j - 1]] * len(com.inputs)
new_com = new_com_op.new_chunk(
inps, com.shape, index=com.index, dtype=com.dtype, _key=com.key
)
composed.append(new_com)
kw["_composed"] = composed
kws.append(kw)
new_outputs = new_op.new_chunks(
inputs, [o.shape for o in c.op.outputs], kws=kws
)
for co in new_outputs:
exec_chunk = co.data
graph.add_node(exec_chunk)
for inp in inputs:
graph.add_edge(inp, exec_chunk)
if serialize:
return serialize_graph(graph)
else:
return graph
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def build_tensor_merge_graph(self, tensor_key):
from ..tensor.expressions.merge.concatenate import TensorConcatenate
tiled_tensor = self._get_tensor_by_key(tensor_key)
graph = DAG()
if len(tiled_tensor.chunks) == 1:
# only one chunk, just trigger fetch
c = tiled_tensor.chunks[0]
op = TensorFetch(dtype=c.dtype, sparse=c.op.sparse)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
else:
fetch_chunks = []
for c in tiled_tensor.chunks:
op = TensorFetch(dtype=c.dtype, sparse=c.op.sparse)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
fetch_chunks.append(fetch_chunk)
chunk = (
TensorConcatenate(dtype=tiled_tensor.op.dtype)
.new_chunk(fetch_chunks, tiled_tensor.shape)
.data
)
graph.add_node(chunk)
[graph.add_edge(fetch_chunk, chunk) for fetch_chunk in fetch_chunks]
return serialize_graph(graph)
|
def build_tensor_merge_graph(self, tensor_key):
from ..tensor.expressions.merge.concatenate import TensorConcatenate
tiled_tensor = self._get_tensor_by_key(tensor_key)
graph = DAG()
if len(tiled_tensor.chunks) == 1:
# only one chunk, just trigger fetch
c = tiled_tensor.chunks[0]
op = TensorFetch(dtype=c.dtype)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
else:
fetch_chunks = []
for c in tiled_tensor.chunks:
op = TensorFetch(dtype=c.dtype)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
fetch_chunks.append(fetch_chunk)
chunk = (
TensorConcatenate(dtype=tiled_tensor.op.dtype)
.new_chunk(fetch_chunks, tiled_tensor.shape)
.data
)
graph.add_node(chunk)
[graph.add_edge(fetch_chunk, chunk) for fetch_chunk in fetch_chunks]
return serialize_graph(graph)
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def build_fetch_graph(self, tensor_key):
"""
Convert single tensor to tiled fetch tensor and put into a graph which only contains one tensor
:param tensor_key: the key of tensor
"""
tiled_tensor = self._get_tensor_by_key(tensor_key)
graph = DAG()
chunks = []
for c in tiled_tensor.chunks:
fetch_op = TensorFetch(dtype=c.dtype, sparse=c.op.sparse)
fetch_chunk = fetch_op.new_chunk(None, c.shape, c.index, _key=c.key)
chunks.append(fetch_chunk)
new_op = TensorFetch(dtype=tiled_tensor.dtype, sparse=tiled_tensor.op.sparse)
new_tensor = new_op.new_tensor(
None,
tiled_tensor.shape,
chunks=chunks,
nsplits=tiled_tensor.nsplits,
_key=tiled_tensor.key,
)
graph.add_node(new_tensor)
return serialize_graph(graph)
|
def build_fetch_graph(self, tensor_key):
"""
Convert single tensor to tiled fetch tensor and put into a graph which only contains one tensor
:param tensor_key: the key of tensor
"""
tiled_tensor = self._get_tensor_by_key(tensor_key)
graph = DAG()
chunks = []
for c in tiled_tensor.chunks:
fetch_op = TensorFetch(dtype=c.dtype)
fetch_chunk = fetch_op.new_chunk(None, c.shape, c.index, _key=c.key)
chunks.append(fetch_chunk)
new_op = TensorFetch(dtype=tiled_tensor.dtype)
new_tensor = new_op.new_tensor(
None,
tiled_tensor.shape,
chunks=chunks,
nsplits=tiled_tensor.nsplits,
_key=tiled_tensor.key,
)
graph.add_node(new_tensor)
return serialize_graph(graph)
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def __init__(self, dtype=None, to_fetch_key=None, sparse=False, **kw):
super(TensorFetch, self).__init__(
_dtype=dtype, _to_fetch_key=to_fetch_key, _sparse=sparse, **kw
)
|
def __init__(self, dtype=None, to_fetch_key=None, **kw):
super(TensorFetch, self).__init__(_dtype=dtype, _to_fetch_key=to_fetch_key, **kw)
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def convert_to_fetch(entity):
from ..core import CHUNK_TYPE, TENSOR_TYPE
from .fetch import TensorFetch
if isinstance(entity, CHUNK_TYPE):
new_op = TensorFetch(dtype=entity.dtype, sparse=entity.op.sparse)
return new_op.new_chunk(
None, entity.shape, index=entity.index, _key=entity.key, _id=entity.id
)
elif isinstance(entity, TENSOR_TYPE):
new_op = TensorFetch(dtype=entity.dtype, sparse=entity.op.sparse)
return new_op.new_tensor(None, entity.shape, _key=entity.key, _id=entity.id)
else:
raise ValueError("Now only support tensor or chunk type.")
|
def convert_to_fetch(entity):
from ..core import CHUNK_TYPE, TENSOR_TYPE
from .fetch import TensorFetch
if isinstance(entity, CHUNK_TYPE):
new_op = TensorFetch(dtype=entity.dtype)
return new_op.new_chunk(
None, entity.shape, index=entity.index, _key=entity.key, _id=entity.id
)
elif isinstance(entity, TENSOR_TYPE):
new_op = TensorFetch(dtype=entity.dtype)
return new_op.new_tensor(None, entity.shape, _key=entity.key, _id=entity.id)
else:
raise ValueError("Now only support tensor or chunk type.")
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def merge_tensor_chunks(input_tensor, ctx):
from .executor import Executor
from .tensor.expressions.fetch import TensorFetch
if len(input_tensor.chunks) == 1:
return ctx[input_tensor.chunks[0].key]
chunks = []
for c in input_tensor.chunks:
op = TensorFetch(dtype=c.dtype, sparse=c.op.sparse)
chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key)
chunks.append(chunk)
new_op = TensorFetch(dtype=input_tensor.dtype, sparse=input_tensor.op.sparse)
tensor = new_op.new_tensor(
None, input_tensor.shape, chunks=chunks, nsplits=input_tensor.nsplits
)
executor = Executor(storage=ctx)
concat_result = executor.execute_tensor(tensor, concat=True)
return concat_result[0]
|
def merge_tensor_chunks(input_tensor, ctx):
from .executor import Executor
from .tensor.expressions.fetch import TensorFetch
if len(input_tensor.chunks) == 1:
return ctx[input_tensor.chunks[0].key]
chunks = []
for c in input_tensor.chunks:
op = TensorFetch(dtype=c.dtype)
chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key)
chunks.append(chunk)
new_op = TensorFetch(dtype=input_tensor.dtype)
tensor = new_op.new_tensor(
None, input_tensor.shape, chunks=chunks, nsplits=input_tensor.nsplits
)
executor = Executor(storage=ctx)
concat_result = executor.execute_tensor(tensor, concat=True)
return concat_result[0]
|
https://github.com/mars-project/mars/issues/276
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, shared_memory='20M',)
In [3]: import mars.tensor as mt
In [4]: import scipy.sparse as sps
In [5]: a = sps.csr_matrix((10000, 10000))
In [6]: b = sps.csr_matrix((10000, 1))
In [7]: t1 = mt.tensor(a)
In [8]: t2 = mt.tensor(b)
In [9]: from mars.session import new_session
In [10]: cluster.session.run(t1 * t2)
Unexpected exception occurred in ChunkHolderActor.spill_size.
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
Unhandled exception in promise call
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/promise.py", line 304, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 298, in _wrapped
return func(*args, **kwargs)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/worker/chunkholder.py", line 123, in spill_size
raise SpillExhausted
mars.errors.SpillExhausted
....
|
mars.error
|
def kernel_mode(func):
"""
A decorator for kernel functions.
When eager mode is on, expressions will be executed after `new_entities`, however
`new_entities` is also called in `Executor` and `OperandTilesHandler`, this decorator
provides an options context for kernel functions to avoid execution.
"""
def _wrapped(*args, **kwargs):
try:
_kernel_mode.eager = False
return func(*args, **kwargs)
finally:
_kernel_mode.eager = None
return _wrapped
|
def kernel_mode(func):
"""
A decorator for kernel functions.
When eager mode is on, expressions will be executed after `new_entities`, however
`new_entities` is also called in `Executor` and `OperandTilesHandler`, this decorator
provides an options context for kernel functions to avoid execution.
"""
def _wrapped(*args, **kwargs):
_kernel_mode.eager = False
return_value = func(*args, **kwargs)
_kernel_mode.eager = None
return return_value
return _wrapped
|
https://github.com/mars-project/mars/issues/268
|
In [1]: import mars.tensor as mt
In [2]: from mars.config import options
In [3]: a = mt.array(
...: [[0.1, 0.2, 0.3],
...: [0.3, 0.4, 0.2]]
...: )
In [4]: (a[:, mt.newaxis, :] - a[mt.newaxis, ...]).execute()
Out[4]:
array([[[ 0. , 0. , 0. ],
[-0.2, -0.2, 0.1]],
[[ 0.2, 0.2, -0.1],
[ 0. , 0. , 0. ]]])
In [5]: options.eager_mode = True
In [6]: mt.sqrt(mt.sum((a[:, mt.newaxis, :] - a[mt.newaxis, ...]) ** 2, axis=-1))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
110 try:
--> 111 handler = self._handlers[op_cls]
112 return handler(op)
KeyError: <class 'mars.tensor.expressions.arithmetic.sqrt.TensorSqrt'>
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-6-de975d9f2cf8> in <module>
----> 1 mt.sqrt(mt.sum((a[:, mt.newaxis, :] - a[mt.newaxis, ...]) ** 2, axis=-1))
~/Workspace/mars/mars/tensor/expressions/utils.py in h(*tensors, **kw)
157 kw['dtype'] = dtype
158
--> 159 ret = func(*tensors, **kw)
160 if ret is NotImplemented:
161 reverse_func = getattr(inspect.getmodule(func), 'r{0}'.format(func.__name__), None) \
~/Workspace/mars/mars/tensor/expressions/arithmetic/sqrt.py in sqrt(x, out, where, **kwargs)
78 """
79 op = TensorSqrt(**kwargs)
---> 80 return op(x, out=out, where=where)
81
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in __call__(self, x, out, where)
348
349 def __call__(self, x, out=None, where=None):
--> 350 return self._call(x, out=out, where=where)
351
352
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in _call(self, x, out, where)
331 shape = x.shape
332
--> 333 t = self.new_tensor([x, out, where], shape)
334
335 if out is None:
~/Workspace/mars/mars/tensor/expressions/core.py in new_tensor(self, inputs, shape, dtype, **kw)
54 raise TypeError('cannot new tensor with more than 1 outputs')
55
---> 56 return self.new_tensors(inputs, shape, dtype=dtype, **kw)[0]
57
58 def calc_shape(self, *inputs_shape):
~/Workspace/mars/mars/tensor/expressions/core.py in new_tensors(self, inputs, shape, dtype, chunks, nsplits, output_limit, kws, **kw)
48 output_limit=None, kws=None, **kw):
49 return self.new_entities(inputs, shape, chunks=chunks, nsplits=nsplits,
---> 50 output_limit=output_limit, kws=kws, dtype=dtype, **kw)
51
52 def new_tensor(self, inputs, shape, dtype=None, **kw):
~/Workspace/mars/mars/core.py in new_entities(self, inputs, shape, **kwargs)
578 entities = self._new_entities(inputs, shape, **kwargs)
579 if is_eager_mode():
--> 580 ExecutableTuple(entities).execute(fetch=False)
581 return entities
582
~/Workspace/mars/mars/core.py in execute(self, session, **kw)
594 if session is None:
595 session = Session.default_or_local()
--> 596 return session.run(*self, **kw)
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
451 concat_keys = []
452 for tensor in tensors:
--> 453 tensor.tiles()
454 chunk_keys = [c.key for c in tensor.chunks]
455 result_keys.extend(chunk_keys)
~/Workspace/mars/mars/tensor/core.py in tiles(self)
223
224 def tiles(self):
--> 225 return handler.tiles(self)
226
227 def single_tiles(self):
~/Workspace/mars/mars/tiles.py in tiles(self, tiles_obj)
172 if not preds or accessible:
173 if node.is_coarse() and node.op:
--> 174 tiled = self._dispatch(node.op)
175 self._assign_to([t.data for t in tiled], node.op.outputs)
176 visited.add(node)
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
114 if hasattr(op_cls, 'tile'):
115 # has tile implementation
--> 116 return op_cls.tile(op)
117 for op_clz in self._handlers.keys():
118 if issubclass(op_cls, op_clz):
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in tile(cls, op)
43 for out_index in itertools.product(*(map(range, out_chunk_shape))):
44 in_chunks = [t.cix[get_index(out_index[-t.ndim:], t)] if t.ndim != 0 else t.chunks[0]
---> 45 for t in inputs]
46 chunk_op = op.copy().reset_key()
47 chunk_shape = broadcast_shape(*(c.shape for c in in_chunks))
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in <listcomp>(.0)
43 for out_index in itertools.product(*(map(range, out_chunk_shape))):
44 in_chunks = [t.cix[get_index(out_index[-t.ndim:], t)] if t.ndim != 0 else t.chunks[0]
---> 45 for t in inputs]
46 chunk_op = op.copy().reset_key()
47 chunk_shape = broadcast_shape(*(c.shape for c in in_chunks))
~/Workspace/mars/mars/core.py in __getitem__(self, item)
457 if len(item) != self._tilesable.ndim:
458 raise ValueError('Cannot get tensor chunk by %s, expect length %d' % (
--> 459 item, self._tilesable.ndim))
460
461 s = self._tilesable.chunk_shape
ValueError: Cannot get tensor chunk by (0, 0), expect length 3
In [7]: a[:, mt.newaxis, :] - a[mt.newaxis, ...]
Out[7]: Tensor <op=TensorSubtract, shape=(2, 2, 3), key=fe6279614688ba864daa99368caf12af>
|
KeyError
|
def _wrapped(*args, **kwargs):
try:
_kernel_mode.eager = False
return func(*args, **kwargs)
finally:
_kernel_mode.eager = None
|
def _wrapped(*args, **kwargs):
_kernel_mode.eager = False
return_value = func(*args, **kwargs)
_kernel_mode.eager = None
return return_value
|
https://github.com/mars-project/mars/issues/268
|
In [1]: import mars.tensor as mt
In [2]: from mars.config import options
In [3]: a = mt.array(
...: [[0.1, 0.2, 0.3],
...: [0.3, 0.4, 0.2]]
...: )
In [4]: (a[:, mt.newaxis, :] - a[mt.newaxis, ...]).execute()
Out[4]:
array([[[ 0. , 0. , 0. ],
[-0.2, -0.2, 0.1]],
[[ 0.2, 0.2, -0.1],
[ 0. , 0. , 0. ]]])
In [5]: options.eager_mode = True
In [6]: mt.sqrt(mt.sum((a[:, mt.newaxis, :] - a[mt.newaxis, ...]) ** 2, axis=-1))
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
110 try:
--> 111 handler = self._handlers[op_cls]
112 return handler(op)
KeyError: <class 'mars.tensor.expressions.arithmetic.sqrt.TensorSqrt'>
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-6-de975d9f2cf8> in <module>
----> 1 mt.sqrt(mt.sum((a[:, mt.newaxis, :] - a[mt.newaxis, ...]) ** 2, axis=-1))
~/Workspace/mars/mars/tensor/expressions/utils.py in h(*tensors, **kw)
157 kw['dtype'] = dtype
158
--> 159 ret = func(*tensors, **kw)
160 if ret is NotImplemented:
161 reverse_func = getattr(inspect.getmodule(func), 'r{0}'.format(func.__name__), None) \
~/Workspace/mars/mars/tensor/expressions/arithmetic/sqrt.py in sqrt(x, out, where, **kwargs)
78 """
79 op = TensorSqrt(**kwargs)
---> 80 return op(x, out=out, where=where)
81
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in __call__(self, x, out, where)
348
349 def __call__(self, x, out=None, where=None):
--> 350 return self._call(x, out=out, where=where)
351
352
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in _call(self, x, out, where)
331 shape = x.shape
332
--> 333 t = self.new_tensor([x, out, where], shape)
334
335 if out is None:
~/Workspace/mars/mars/tensor/expressions/core.py in new_tensor(self, inputs, shape, dtype, **kw)
54 raise TypeError('cannot new tensor with more than 1 outputs')
55
---> 56 return self.new_tensors(inputs, shape, dtype=dtype, **kw)[0]
57
58 def calc_shape(self, *inputs_shape):
~/Workspace/mars/mars/tensor/expressions/core.py in new_tensors(self, inputs, shape, dtype, chunks, nsplits, output_limit, kws, **kw)
48 output_limit=None, kws=None, **kw):
49 return self.new_entities(inputs, shape, chunks=chunks, nsplits=nsplits,
---> 50 output_limit=output_limit, kws=kws, dtype=dtype, **kw)
51
52 def new_tensor(self, inputs, shape, dtype=None, **kw):
~/Workspace/mars/mars/core.py in new_entities(self, inputs, shape, **kwargs)
578 entities = self._new_entities(inputs, shape, **kwargs)
579 if is_eager_mode():
--> 580 ExecutableTuple(entities).execute(fetch=False)
581 return entities
582
~/Workspace/mars/mars/core.py in execute(self, session, **kw)
594 if session is None:
595 session = Session.default_or_local()
--> 596 return session.run(*self, **kw)
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
451 concat_keys = []
452 for tensor in tensors:
--> 453 tensor.tiles()
454 chunk_keys = [c.key for c in tensor.chunks]
455 result_keys.extend(chunk_keys)
~/Workspace/mars/mars/tensor/core.py in tiles(self)
223
224 def tiles(self):
--> 225 return handler.tiles(self)
226
227 def single_tiles(self):
~/Workspace/mars/mars/tiles.py in tiles(self, tiles_obj)
172 if not preds or accessible:
173 if node.is_coarse() and node.op:
--> 174 tiled = self._dispatch(node.op)
175 self._assign_to([t.data for t in tiled], node.op.outputs)
176 visited.add(node)
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
114 if hasattr(op_cls, 'tile'):
115 # has tile implementation
--> 116 return op_cls.tile(op)
117 for op_clz in self._handlers.keys():
118 if issubclass(op_cls, op_clz):
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in tile(cls, op)
43 for out_index in itertools.product(*(map(range, out_chunk_shape))):
44 in_chunks = [t.cix[get_index(out_index[-t.ndim:], t)] if t.ndim != 0 else t.chunks[0]
---> 45 for t in inputs]
46 chunk_op = op.copy().reset_key()
47 chunk_shape = broadcast_shape(*(c.shape for c in in_chunks))
~/Workspace/mars/mars/tensor/expressions/arithmetic/core.py in <listcomp>(.0)
43 for out_index in itertools.product(*(map(range, out_chunk_shape))):
44 in_chunks = [t.cix[get_index(out_index[-t.ndim:], t)] if t.ndim != 0 else t.chunks[0]
---> 45 for t in inputs]
46 chunk_op = op.copy().reset_key()
47 chunk_shape = broadcast_shape(*(c.shape for c in in_chunks))
~/Workspace/mars/mars/core.py in __getitem__(self, item)
457 if len(item) != self._tilesable.ndim:
458 raise ValueError('Cannot get tensor chunk by %s, expect length %d' % (
--> 459 item, self._tilesable.ndim))
460
461 s = self._tilesable.chunk_shape
ValueError: Cannot get tensor chunk by (0, 0), expect length 3
In [7]: a[:, mt.newaxis, :] - a[mt.newaxis, ...]
Out[7]: Tensor <op=TensorSubtract, shape=(2, 2, 3), key=fe6279614688ba864daa99368caf12af>
|
KeyError
|
def tile(cls, op):
"""
Use LU decomposition to compute inverse of matrix.
Given a square matrix A:
P, L, U = lu(A)
b_eye is an identity matrix with the same shape as matrix A, then,
(P * L * U) * A_inv = b_eye
L * (U * A_inv) = P.T * b_eye
use `solve_triangular` twice to compute the inverse of matrix A.
"""
from .lu import lu
from ..datasource import eye
from ..base.transpose import TensorTranspose
from .tensordot import tensordot
from .solve_triangular import solve_triangular
in_tensor = op.input
b_eye = eye(in_tensor.shape[0], chunk_size=in_tensor.nsplits)
b_eye.single_tiles()
p, l, u = lu(in_tensor)
p.single_tiles()
# transposed p equals to inverse of p
p_transpose = TensorTranspose(
dtype=p.dtype, sparse=p.op.sparse, axes=list(range(in_tensor.ndim))[::-1]
).new_tensor([p], p.shape)
p_transpose.single_tiles()
b = tensordot(p_transpose, b_eye, axes=((p_transpose.ndim - 1,), (b_eye.ndim - 2,)))
b.single_tiles()
# as `l` is a lower matrix, `lower=True` should be specified.
uy = solve_triangular(l, b, lower=True)
uy.single_tiles()
a_inv = solve_triangular(u, uy)
a_inv.single_tiles()
return [a_inv]
|
def tile(cls, op):
"""
Use LU decomposition to compute inverse of matrix.
Given a square matrix A:
P, L, U = lu(A)
b_eye is an identity matrix with the same shape as matrix A, then,
(P * L * U) * A_inv = b_eye
L * (U * A_inv) = P.T * b_eye
use `solve_triangular` twice to compute the inverse of matrix A.
"""
from .lu import lu
from ..datasource import eye
from ..base.transpose import TensorTranspose
from .tensordot import tensordot
from .solve_triangular import solve_triangular
in_tensor = op.input
b_eye = eye(in_tensor.shape[0], chunk_size=in_tensor.chunks[0].shape)
b_eye.single_tiles()
p, l, u = lu(in_tensor)
p.single_tiles()
# transposed p equals to inverse of p
p_transpose = TensorTranspose(
dtype=p.dtype, sparse=p.op.sparse, axes=list(range(in_tensor.ndim))[::-1]
).new_tensor([p], p.shape)
p_transpose.single_tiles()
b = tensordot(p_transpose, b_eye, axes=((p_transpose.ndim - 1,), (b_eye.ndim - 2,)))
b.single_tiles()
# as `l` is a lower matrix, `lower=True` should be specified.
uy = solve_triangular(l, b, lower=True)
uy.single_tiles()
a_inv = solve_triangular(u, uy)
a_inv.single_tiles()
return [a_inv]
|
https://github.com/mars-project/mars/issues/250
|
In [32]: a = mt.tensor(sps.csr_matrix([[0, 0], [1, 0]]))
In [33]: b = mt.linalg.inv(a)
In [34]: b.execute()
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
110 try:
--> 111 handler = self._handlers[op_cls]
112 return handler(op)
KeyError: <class 'mars.tensor.expressions.linalg.inv.TensorInv'>
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
110 try:
--> 111 handler = self._handlers[op_cls]
112 return handler(op)
KeyError: <class 'mars.tensor.expressions.datasource.eye.TensorEye'>
During handling of the above exception, another exception occurred:
AssertionError Traceback (most recent call last)
<ipython-input-34-be8c8c42b9b4> in <module>
----> 1 b.execute()
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Workspace/mars/mars/tensor/core.py in execute(self, session, **kw)
184 if session is None:
185 session = Session.default_or_local()
--> 186 return session.run(self, **kw)
187
188 def fetch(self, session=None, **kw):
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Workspace/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
451 concat_keys = []
452 for tensor in tensors:
--> 453 tensor.tiles()
454 chunk_keys = [c.key for c in tensor.chunks]
455 result_keys.extend(chunk_keys)
~/Workspace/mars/mars/core.py in tiles(self)
390
391 def tiles(self):
--> 392 return handler.tiles(self)
393
394 def single_tiles(self):
~/Workspace/mars/mars/tiles.py in tiles(self, tiles_obj)
172 if not preds or accessible:
173 if node.is_coarse() and node.op:
--> 174 tiled = self._dispatch(node.op)
175 self._assign_to([t.data for t in tiled], node.op.outputs)
176 visited.add(node)
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
114 if hasattr(op_cls, 'tile'):
115 # has tile implementation
--> 116 return op_cls.tile(op)
117 for op_clz in self._handlers.keys():
118 if issubclass(op_cls, op_clz):
~/Workspace/mars/mars/tensor/expressions/linalg/inv.py in tile(cls, op)
53
54 b_eye = eye(in_tensor.shape[0], chunk_size=in_tensor.chunks[0].shape)
---> 55 b_eye.single_tiles()
56
57 p, l, u = lu(in_tensor)
~/Workspace/mars/mars/tensor/core.py in single_tiles(self)
226
227 def single_tiles(self):
--> 228 return handler.single_tiles(self)
229
230 @property
~/Workspace/mars/mars/tiles.py in single_tiles(self, to_tiles)
127 def single_tiles(self, to_tiles):
128 if to_tiles.is_coarse() and to_tiles.op:
--> 129 dispatched = self._dispatch(to_tiles.op)
130 self._assign_to([d.data for d in dispatched], to_tiles.op.outputs)
131
~/Workspace/mars/mars/utils.py in _wrapped(*args, **kwargs)
352 def _wrapped(*args, **kwargs):
353 _kernel_mode.eager = False
--> 354 return_value = func(*args, **kwargs)
355 _kernel_mode.eager = None
356 return return_value
~/Workspace/mars/mars/tiles.py in _dispatch(self, op)
114 if hasattr(op_cls, 'tile'):
115 # has tile implementation
--> 116 return op_cls.tile(op)
117 for op_clz in self._handlers.keys():
118 if issubclass(op_cls, op_clz):
~/Workspace/mars/mars/tensor/expressions/datasource/eye.py in tile(cls, op)
51 @classmethod
52 def tile(cls, op):
---> 53 return TensorDiagBase.tile(op)
54
55
~/Workspace/mars/mars/tensor/expressions/datasource/diag.py in tile(cls, op)
61 # op can be TensorDiag or TensorEye
62 k = op.k
---> 63 nsplits = op._get_nsplits(op)
64
65 fx = lambda x, y: x - y + k
~/Workspace/mars/mars/tensor/expressions/datasource/eye.py in _get_nsplits(cls, op)
42 tensor = op.outputs[0]
43 chunk_size = tensor.params.raw_chunk_size or options.tensor.chunk_size
---> 44 return decide_chunk_sizes(tensor.shape, chunk_size, tensor.dtype.itemsize)
45
46 @classmethod
~/Workspace/mars/mars/tensor/expressions/utils.py in decide_chunk_sizes(shape, chunk_size, itemsize)
434 raise ValueError("chunks have more dimensions than input tensor")
435 if nleft == 0:
--> 436 return normalize_chunk_sizes(shape, tuple(chunk_size[j] for j in range(len(shape))))
437
438 max_chunk_size = options.tensor.chunk_store_limit
~/Workspace/mars/mars/tensor/expressions/utils.py in normalize_chunk_sizes(shape, chunk_size)
57 chunk_sizes.append(chunk)
58 else:
---> 59 assert isinstance(chunk, six.integer_types)
60
61 sizes = tuple(chunk for _ in range(int(size / chunk))) + \
AssertionError:
|
KeyError
|
def _execute_operand(self, op):
results = self._chunk_results
ref_counts = self._chunk_key_ref_counts
op_keys = self._executed_op_keys
executed_chunk_keys = set()
deleted_chunk_keys = set()
try:
ops = list(self._op_key_to_ops[op.key])
if not self._mock:
# do real execution
# note that currently execution is the chunk-level
# so we pass the first operand's first output to Executor.handle
first_op = ops[0]
Executor.handle(first_op.outputs[0], results)
executed_chunk_keys.update([c.key for c in first_op.outputs])
op_keys.add(first_op.key)
# handle other operands
for rest_op in ops[1:]:
for op_output, rest_op_output in zip(first_op.outputs, rest_op.outputs):
# if the op's outputs have been stored,
# other same key ops' results will be the same
if rest_op_output.key not in executed_chunk_keys:
results[rest_op_output.key] = results[op_output.key]
else:
sparse_percent = self._sparse_mock_percent if op.sparse else 1.0
for output in op.outputs:
results[output.key] = output.nbytes * sparse_percent
with self._lock:
for output in itertools.chain(*[op.outputs for op in ops]):
# the output not in the graph will be skipped
if output not in self._graph:
continue
# in case that operand has multiple outputs
# and some of the output not in result keys, delete them
if ref_counts.get(output.key) == 0:
# if the result has been deleted, it should be skipped
if output.key not in deleted_chunk_keys:
deleted_chunk_keys.add(output.key)
del results[output.key]
# clean the predecessors' results if ref counts equals 0
for pred_chunk in self._graph.iter_predecessors(output):
if pred_chunk.key in ref_counts:
ref_counts[pred_chunk.key] -= 1
if ref_counts[pred_chunk.key] == 0:
del results[pred_chunk.key]
# add successors' operands to queue
for succ_chunk in self._graph.iter_successors(output):
preds = self._graph.predecessors(succ_chunk)
if succ_chunk.op.key not in self._submitted_op_keys and (
len(preds) == 0 or all(pred.op.key in op_keys for pred in preds)
):
self._queue.insert(0, succ_chunk.op)
except Exception:
self._has_error.set()
raise
finally:
self._semaphore.release()
|
def _execute_operand(self, op):
results = self._chunk_results
ref_counts = self._chunk_key_ref_counts
op_keys = self._executed_op_keys
try:
ops = list(self._op_key_to_ops[op.key])
if not self._mock:
# do real execution
# note that currently execution is the chunk-level
# so we pass the first operand's first output to Executor.handle
first_op = ops[0]
Executor.handle(first_op.outputs[0], results)
op_keys.add(first_op.key)
# handle other operands
for rest_op in ops[1:]:
for op_output, rest_op_output in zip(first_op.outputs, rest_op.outputs):
results[rest_op_output.key] = results[op_output.key]
else:
sparse_percent = self._sparse_mock_percent if op.sparse else 1.0
for output in op.outputs:
results[output.key] = output.nbytes * sparse_percent
with self._lock:
for output in itertools.chain(*[op.outputs for op in ops]):
# the output not in the graph will be skipped
if output not in self._graph:
continue
# in case that operand has multiple outputs
# and some of the output not in result keys, delete them
if ref_counts.get(output.key) == 0:
del results[output.key]
# clean the predecessors' results if ref counts equals 0
for pred_chunk in self._graph.iter_predecessors(output):
if pred_chunk.key in ref_counts:
ref_counts[pred_chunk.key] -= 1
if ref_counts[pred_chunk.key] == 0:
del results[pred_chunk.key]
# add successors' operands to queue
for succ_chunk in self._graph.iter_successors(output):
preds = self._graph.predecessors(succ_chunk)
if succ_chunk.op.key not in self._submitted_op_keys and (
len(preds) == 0 or all(pred.op.key in op_keys for pred in preds)
):
self._queue.insert(0, succ_chunk.op)
except Exception:
self._has_error.set()
raise
finally:
self._semaphore.release()
|
https://github.com/mars-project/mars/issues/248
|
In [1]: import mars.tensor as mt
In [2]: a = mt.ones((10, 5), chunk_size=5)
In [3]: s = mt.linalg.svd(a)[1]
In [4]: s.execute()
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-4-4a5966ced510> in <module>
----> 1 s.execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
378
379 def execute(self, session=None, **kw):
--> 380 return self._data.execute(session, **kw)
381
382
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
184 if session is None:
185 session = Session.default_or_local()
--> 186 return session.run(self, **kw)
187
188 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
343 def _wrapped(*args, **kwargs):
344 _kernel_mode.eager = False
--> 345 return_value = func(*args, **kwargs)
346 _kernel_mode.eager = None
347 return return_value
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
473 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
474 print_progress=print_progress, mock=mock,
--> 475 sparse_mock_percent=sparse_mock_percent)
476
477 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, sparse_mock_percent)
410 print_progress=print_progress, mock=mock,
411 sparse_mock_percent=sparse_mock_percent)
--> 412 return graph_execution.execute(True)
413
414 @kernel_mode
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
339 # wait until all the futures completed
340 for future in executed_futures:
--> 341 future.result()
342
343 if self._mock:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
246 # and some of the output not in result keys, delete them
247 if ref_counts.get(output.key) == 0:
--> 248 del results[output.key]
249
250 # clean the predecessors' results if ref counts equals 0
KeyError: '4ec5b76155f38976c84e36d156349503'
|
KeyError
|
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Examples
--------
>>> import mars.tensor as mt
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = mt.linalg.inv(a)
>>> mt.allclose(mt.dot(a, ainv), mt.eye(2)).execute()
True
>>> mt.allclose(mt.dot(ainv, a), mt.eye(2)).execute()
True
>>> ainv.execute()
array([[ -2. , 1. ],
[ 1.5, -0.5]])
"""
# TODO: using some parallel algorithm for matrix inversion.
a = astensor(a)
if a.ndim != 2:
raise LinAlgError(
"{0}-dimensional array given. Tensor must be two-dimensional".format(a.ndim)
)
if a.shape[0] != a.shape[1]:
raise LinAlgError("Input must be square")
tiny_inv = np.linalg.inv(np.array([[1, 2], [2, 5]], dtype=a.dtype))
op = TensorInv(dtype=tiny_inv.dtype, sparse=a.is_sparse())
return op(a)
|
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> import mars.tensor as mt
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = mt.linalg.inv(a)
>>> mt.allclose(mt.dot(a, ainv), mt.eye(2)).execute()
True
>>> mt.allclose(mt.dot(ainv, a), mt.eye(2)).execute()
True
>>> ainv.execute()
array([[ -2. , 1. ],
[ 1.5, -0.5]])
"""
# TODO: using some parallel algorithm for matrix inversion.
from ..datasource import eye
a = astensor(a)
return solve(a, eye(a.shape[0], chunk_size=a.params.raw_chunk_size))
|
https://github.com/mars-project/mars/issues/230
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.randint(1, 10, (6, 6), chunk_size=3)
In [3]: b = a.dot(a)
In [4]: r = mt.linalg.inv(b)
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 65, in __getattr__
return self[item]
KeyError: 'raw_chunk_size'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-7-7491bd48bc7b>", line 1, in <module>
mt.linalg.inv(a.dot(a)).execute()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/linalg/inv.py", line 61, in inv
return solve(a, eye(a.shape[0], chunk_size=a.params.raw_chunk_size))
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 68, in __getattr__
"'AttributeDict' object has no attribute {0}".format(item))
AttributeError: 'AttributeDict' object has no attribute raw_chunk_size
|
KeyError
|
def tile(cls, op):
a, b = op.inputs
tensor = op.outputs[0]
# the axes to align on
a_axes = lrange(a.ndim - 2)[::-1] + [tensor.ndim - 2, tensor.ndim - 1]
b_axes = lrange(b.ndim - 2)[::-1] + [tensor.ndim - 1, tensor.ndim]
a, b = unify_chunks((a, a_axes), (b, b_axes))
get_nsplit = lambda i: a.nsplits[i] if a.nsplits[i] != (1,) else b.nsplits[i]
get_idx = lambda ch, idx: tuple(
0 if ch.nsplits[j] == (1,) else ix for j, ix in enumerate(idx)
)
prefix_idxes = [range(len(get_nsplit(i))) for i in range(a.ndim - 2)]
out_idxes = prefix_idxes + [range(len(a.nsplits[-2])), range(len(b.nsplits[-1]))]
out_chunks = []
for out_idx in itertools.product(*out_idxes):
chunks = []
get_s = lambda x, idx: x[idx] if x != (1,) else x[0]
shape = tuple(
max(get_s(a_s, j), get_s(b_s, j))
for a_s, b_s, j in zip(a.nsplits[:-2], b.nsplits[:-2], out_idx[:-2])
) + (get_s(a.nsplits[-2], out_idx[-2]), get_s(b.nsplits[-1], out_idx[-1]))
for contract_idx in range(len(a.nsplits[-1])):
a_idx = get_idx(a, out_idx[: a.ndim - 1] + (contract_idx,))
a_chunk = a.cix[a_idx]
b_idx = get_idx(b, out_idx[: b.ndim - 2] + (contract_idx,) + out_idx[-1:])
b_chunk = b.cix[b_idx]
chunk_op = op.copy().reset_key()
c = chunk_op.new_chunk([a_chunk, b_chunk], shape)
chunks.append(c)
if len(chunks) == 1:
c = chunks[0]
out_chunk_op = c.op.copy()
out_chunk = out_chunk_op.new_chunk(
out_chunk_op.inputs, c.shape, index=out_idx
)
else:
out_chunk = tree_add(
tensor.op.dtype, chunks, out_idx, shape, sparse=tensor.op.sparse
)
out_chunks.append(out_chunk)
nsplits = tuple(get_nsplit(i) for i in range(a.ndim - 2)) + (
a.nsplits[-2],
b.nsplits[-1],
)
new_op = op.copy()
return new_op.new_tensors([a, b], tensor.shape, chunks=out_chunks, nsplits=nsplits)
|
def tile(cls, op):
a, b = op.inputs
tensor = op.outputs[0]
# the axes to align on
a_axes = lrange(a.ndim - 2)[::-1] + [tensor.ndim - 2, tensor.ndim - 1]
b_axes = lrange(b.ndim - 2)[::-1] + [tensor.ndim - 1, tensor.ndim]
a, b = unify_chunks((a, a_axes), (b, b_axes))
get_nsplit = lambda i: a.nsplits[i] if a.nsplits[i] != (1,) else b.nsplits[i]
get_idx = lambda ch, idx: tuple(
0 if ch.nsplits[j] == (1,) else ix for j, ix in enumerate(idx)
)
prefix_idxes = [range(len(get_nsplit(i))) for i in range(a.ndim - 2)]
out_idxes = prefix_idxes + [range(len(a.nsplits[-2])), range(len(b.nsplits[-1]))]
out_chunks = []
for out_idx in itertools.product(*out_idxes):
chunks = []
get_s = lambda x, idx: x[idx] if x != (1,) else x[0]
shape = tuple(
max(get_s(a_s, j), get_s(b_s, j))
for a_s, b_s, j in zip(a.nsplits[:-2], b.nsplits[:-2], out_idx[:-2])
) + (get_s(a.nsplits[-2], out_idx[-2]), get_s(b.nsplits[-1], out_idx[-1]))
for contract_idx in range(len(a.nsplits[-1])):
a_idx = get_idx(a, out_idx[: a.ndim - 1] + (contract_idx,))
a_chunk = a.cix[a_idx]
b_idx = get_idx(b, out_idx[: b.ndim - 2] + (contract_idx,) + out_idx[-1:])
b_chunk = b.cix[b_idx]
chunk_op = op.copy().reset_key()
c = chunk_op.new_chunk([a_chunk, b_chunk], shape)
chunks.append(c)
if len(chunks) == 1:
c = chunks[0]
out_chunk_op = c.op.copy()
out_chunk = out_chunk_op.new_chunk(
out_chunk_op.inputs, c.shape, index=out_idx
)
else:
out_chunk = tree_add(tensor.op.dtype, chunks, out_idx, shape)
out_chunks.append(out_chunk)
nsplits = tuple(get_nsplit(i) for i in range(a.ndim - 2)) + (
a.nsplits[-2],
b.nsplits[-1],
)
new_op = op.copy()
return new_op.new_tensors([a, b], tensor.shape, chunks=out_chunks, nsplits=nsplits)
|
https://github.com/mars-project/mars/issues/230
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.randint(1, 10, (6, 6), chunk_size=3)
In [3]: b = a.dot(a)
In [4]: r = mt.linalg.inv(b)
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 65, in __getattr__
return self[item]
KeyError: 'raw_chunk_size'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-7-7491bd48bc7b>", line 1, in <module>
mt.linalg.inv(a.dot(a)).execute()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/linalg/inv.py", line 61, in inv
return solve(a, eye(a.shape[0], chunk_size=a.params.raw_chunk_size))
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 68, in __getattr__
"'AttributeDict' object has no attribute {0}".format(item))
AttributeError: 'AttributeDict' object has no attribute raw_chunk_size
|
KeyError
|
def tile(cls, op):
a, b, a_axes, b_axes = op.a, op.b, op.a_axes, op.b_axes
c = itertools.count(max(a.ndim, b.ndim))
a_ax = tuple(a_axes.index(i) if i in a_axes else next(c) for i in range(a.ndim))
b_ax = tuple(b_axes.index(i) if i in b_axes else next(c) for i in range(b.ndim))
a, b = unify_chunks((a, a_ax), (b, b_ax))
a_output_indexes = [
range(len(a.nsplits[i])) for i in range(a.ndim) if i not in a_axes
]
b_output_indexes = [
range(len(b.nsplits[i])) for i in range(b.ndim) if i not in b_axes
]
output_axes = [(0, i) for i in range(a.ndim) if i not in a_axes] + [
(1, i) for i in range(b.ndim) if i not in b_axes
]
out_chunks = []
for out_idx in itertools.product(
*itertools.chain(a_output_indexes, b_output_indexes)
):
a_indexes = [None] * a.ndim
b_indexes = [None] * b.ndim
tensor_shape = []
for i, idx in enumerate(out_idx):
t_idx, axis = output_axes[i]
t = (a, b)[t_idx]
(a_indexes if t_idx == 0 else b_indexes)[axis] = idx
tensor_shape.append(t.nsplits[axis][idx])
tensor_shape = tuple(tensor_shape)
tensordot_chunks = []
for contract_indexes in itertools.product(
*[range(len(a.nsplits[ax])) for ax in a_axes]
):
a_indices, b_indices = list(a_indexes), list(b_indexes)
for a_axis, contract_index in izip(a_axes, contract_indexes):
a_indices[a_axis] = contract_index
for b_axis, contract_index in izip(b_axes, contract_indexes):
b_indices[b_axis] = contract_index
tensordot_chunk_op = op.copy().reset_key()
tensordot_chunk = tensordot_chunk_op.new_chunk(
[a.cix[tuple(a_indices)], b.cix[tuple(b_indices)]], tensor_shape
)
tensordot_chunks.append(tensordot_chunk)
if len(tensordot_chunks) == 1:
c = tensordot_chunks[0]
chunk_op = c.op.copy()
chunk = chunk_op.new_chunk(c.inputs, c.shape, index=out_idx)
else:
chunk = tree_add(
op.dtype, tensordot_chunks, out_idx, tensor_shape, sparse=op.sparse
)
out_chunks.append(chunk)
get_nsplits = lambda t_idx, i: (a, b)[t_idx].nsplits[i]
nsplits = [get_nsplits(*it) for it in output_axes]
new_op = op.copy()
return new_op.new_tensors(
[a, b], op.outputs[0].shape, chunks=out_chunks, nsplits=nsplits
)
|
def tile(cls, op):
a, b, a_axes, b_axes = op.a, op.b, op.a_axes, op.b_axes
c = itertools.count(max(a.ndim, b.ndim))
a_ax = tuple(a_axes.index(i) if i in a_axes else next(c) for i in range(a.ndim))
b_ax = tuple(b_axes.index(i) if i in b_axes else next(c) for i in range(b.ndim))
a, b = unify_chunks((a, a_ax), (b, b_ax))
a_output_indexes = [
range(len(a.nsplits[i])) for i in range(a.ndim) if i not in a_axes
]
b_output_indexes = [
range(len(b.nsplits[i])) for i in range(b.ndim) if i not in b_axes
]
output_axes = [(0, i) for i in range(a.ndim) if i not in a_axes] + [
(1, i) for i in range(b.ndim) if i not in b_axes
]
out_chunks = []
for out_idx in itertools.product(
*itertools.chain(a_output_indexes, b_output_indexes)
):
a_indexes = [None] * a.ndim
b_indexes = [None] * b.ndim
tensor_shape = []
for i, idx in enumerate(out_idx):
t_idx, axis = output_axes[i]
t = (a, b)[t_idx]
(a_indexes if t_idx == 0 else b_indexes)[axis] = idx
tensor_shape.append(t.nsplits[axis][idx])
tensor_shape = tuple(tensor_shape)
tensordot_chunks = []
for contract_indexes in itertools.product(
*[range(len(a.nsplits[ax])) for ax in a_axes]
):
a_indices, b_indices = list(a_indexes), list(b_indexes)
for a_axis, contract_index in izip(a_axes, contract_indexes):
a_indices[a_axis] = contract_index
for b_axis, contract_index in izip(b_axes, contract_indexes):
b_indices[b_axis] = contract_index
tensordot_chunk_op = op.copy().reset_key()
tensordot_chunk = tensordot_chunk_op.new_chunk(
[a.cix[tuple(a_indices)], b.cix[tuple(b_indices)]], tensor_shape
)
tensordot_chunks.append(tensordot_chunk)
if len(tensordot_chunks) == 1:
c = tensordot_chunks[0]
chunk_op = c.op.copy()
chunk = chunk_op.new_chunk(c.inputs, c.shape, index=out_idx)
else:
chunk = tree_add(op.dtype, tensordot_chunks, out_idx, tensor_shape)
out_chunks.append(chunk)
get_nsplits = lambda t_idx, i: (a, b)[t_idx].nsplits[i]
nsplits = [get_nsplits(*it) for it in output_axes]
new_op = op.copy()
return new_op.new_tensors(
[a, b], op.outputs[0].shape, chunks=out_chunks, nsplits=nsplits
)
|
https://github.com/mars-project/mars/issues/230
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.randint(1, 10, (6, 6), chunk_size=3)
In [3]: b = a.dot(a)
In [4]: r = mt.linalg.inv(b)
Traceback (most recent call last):
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 65, in __getattr__
return self[item]
KeyError: 'raw_chunk_size'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.7/lib/python3.7/site-packages/IPython/core/interactiveshell.py", line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-7-7491bd48bc7b>", line 1, in <module>
mt.linalg.inv(a.dot(a)).execute()
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/linalg/inv.py", line 61, in inv
return solve(a, eye(a.shape[0], chunk_size=a.params.raw_chunk_size))
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/utils.py", line 68, in __getattr__
"'AttributeDict' object has no attribute {0}".format(item))
AttributeError: 'AttributeDict' object has no attribute raw_chunk_size
|
KeyError
|
def _execute_operand(self, op):
results = self._chunk_results
ref_counts = self._chunk_key_ref_counts
op_keys = self._executed_op_keys
try:
ops = list(self._op_key_to_ops[op.key])
if not self._mock:
# do real execution
# note that currently execution is the chunk-level
# so we pass the first operand's first output to Executor.handle
first_op = ops[0]
Executor.handle(first_op.outputs[0], results)
op_keys.add(first_op.key)
# handle other operands
for rest_op in ops[1:]:
for op_output, rest_op_output in zip(first_op.outputs, rest_op.outputs):
results[rest_op_output.key] = results[op_output.key]
else:
sparse_percent = self._sparse_mock_percent if op.sparse else 1.0
for output in op.outputs:
results[output.key] = output.nbytes * sparse_percent
with self._lock:
for output in itertools.chain(*[op.outputs for op in ops]):
# the output not in the graph will be skipped
if output not in self._graph:
continue
# in case that operand has multiple outputs
# and some of the output not in result keys, delete them
if ref_counts.get(output.key) == 0:
del results[output.key]
# clean the predecessors' results if ref counts equals 0
for pred_chunk in self._graph.iter_predecessors(output):
if pred_chunk.key in ref_counts:
ref_counts[pred_chunk.key] -= 1
if ref_counts[pred_chunk.key] == 0:
del results[pred_chunk.key]
# add successors' operands to queue
for succ_chunk in self._graph.iter_successors(output):
preds = self._graph.predecessors(succ_chunk)
if succ_chunk.op.key not in self._submitted_op_keys and (
len(preds) == 0 or all(pred.op.key in op_keys for pred in preds)
):
self._queue.insert(0, succ_chunk.op)
except Exception:
self._has_error.set()
raise
finally:
self._semaphore.release()
|
def _execute_operand(self, op):
results = self._chunk_results
ref_counts = self._chunk_key_ref_counts
op_keys = self._executed_op_keys
try:
ops = list(self._op_key_to_ops[op.key])
if not self._mock:
# do real execution
# note that currently execution is the chunk-level
# so we pass the first operand's first output to Executor.handle
first_op = ops[0]
Executor.handle(first_op.outputs[0], results)
op_keys.add(first_op.key)
# handle other operands
for rest_op in ops[1:]:
for op_output, rest_op_output in zip(first_op.outputs, rest_op.outputs):
results[rest_op_output.key] = results[op_output.key]
else:
sparse_percent = self._sparse_mock_percent if op.sparse else 1.0
for output in op.outputs:
results[output.key] = output.nbytes * sparse_percent
with self._lock:
for output in itertools.chain(*[op.outputs for op in ops]):
# in case that operand has multiple outputs
# and some of the output not in result keys, delete them
if ref_counts.get(output.key) == 0:
del results[output.key]
# clean the predecessors' results if ref counts equals 0
for pred_chunk in self._graph.iter_predecessors(output):
if pred_chunk.key in ref_counts:
ref_counts[pred_chunk.key] -= 1
if ref_counts[pred_chunk.key] == 0:
del results[pred_chunk.key]
# add successors' operands to queue
for succ_chunk in self._graph.iter_successors(output):
preds = self._graph.predecessors(succ_chunk)
if succ_chunk.op.key not in self._submitted_op_keys and (
len(preds) == 0 or all(pred.op.key in op_keys for pred in preds)
):
self._queue.insert(0, succ_chunk.op)
except Exception:
self._has_error.set()
raise
finally:
self._semaphore.release()
|
https://github.com/mars-project/mars/issues/220
|
In [1]: import mars.tensor as mt
In [2]: a = mt.random.randint(1, 10, (5, 5))
...: arrs = mt.linalg.qr(a)
...: arrs[0].execute()
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/Documents/mars_dev/mars/mars/graph.pyx in mars.graph.DirectedGraph.iter_predecessors()
KeyError: Chunk <op=TensorQR, key=fd777e263eae8d37f1d6057a36de12a4>
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-2-17af77a63ed7> in <module>
1 a = mt.random.randint(1, 10, (5, 5))
2 arrs = mt.linalg.qr(a)
----> 3 arrs[0].execute()
~/Documents/mars_dev/mars/mars/tensor/core.py in execute(self, session, **kw)
275 if session is None:
276 session = Session.default_or_local()
--> 277 return session.run(self, **kw)
278
279 def fetch(self, session=None, **kw):
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
107
108 tensors = tuple(mt.tensor(t) for t in tensors)
--> 109 result = self._sess.run(*tensors, **kw)
110
111 for t in tensors:
~/Documents/mars_dev/mars/mars/session.py in run(self, *tensors, **kw)
49 if 'n_parallel' not in kw:
50 kw['n_parallel'] = cpu_count()
---> 51 res = self._executor.execute_tensors(tensors, **kw)
52 return res
53
~/Documents/mars_dev/mars/mars/utils.py in _wrapped(*args, **kwargs)
343 def _wrapped(*args, **kwargs):
344 _kernel_mode.eager = False
--> 345 return_value = func(*args, **kwargs)
346 _kernel_mode.eager = None
347 return return_value
~/Documents/mars_dev/mars/mars/executor.py in execute_tensors(self, tensors, fetch, n_parallel, n_thread, print_progress, mock, sparse_mock_percent)
470 self.execute_graph(graph, result_keys, n_parallel=n_parallel or n_thread,
471 print_progress=print_progress, mock=mock,
--> 472 sparse_mock_percent=sparse_mock_percent)
473
474 results = self._chunk_result
~/Documents/mars_dev/mars/mars/executor.py in execute_graph(self, graph, keys, n_parallel, print_progress, mock, sparse_mock_percent)
407 print_progress=print_progress, mock=mock,
408 sparse_mock_percent=sparse_mock_percent)
--> 409 return graph_execution.execute(True)
410
411 @kernel_mode
~/Documents/mars_dev/mars/mars/executor.py in execute(self, retval)
336 # wait until all the futures completed
337 for future in executed_futures:
--> 338 future.result()
339
340 if self._mock:
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in result(self, timeout)
423 raise CancelledError()
424 elif self._state == FINISHED:
--> 425 return self.__get_result()
426
427 self._condition.wait(timeout)
~/miniconda3/lib/python3.7/concurrent/futures/_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~/miniconda3/lib/python3.7/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/Documents/mars_dev/mars/mars/executor.py in _execute_operand(self, op)
246
247 # clean the predecessors' results if ref counts equals 0
--> 248 for pred_chunk in self._graph.iter_predecessors(output):
249 if pred_chunk.key in ref_counts:
250 ref_counts[pred_chunk.key] -= 1
~/Documents/mars_dev/mars/mars/graph.pyx in mars.graph.DirectedGraph.iter_predecessors()
KeyError: 'Node Chunk <op=TensorQR, key=fd777e263eae8d37f1d6057a36de12a4> does not exist in the directed graph'
|
KeyError
|
def _main(self, argv=None):
parser = argparse.ArgumentParser(description=self.service_description)
parser.add_argument("-a", "--advertise", help="advertise ip")
parser.add_argument(
"-k",
"--kv-store",
help="address of kv store service, for instance, etcd://localhost:4001",
)
parser.add_argument("-e", "--endpoint", help="endpoint of the service")
parser.add_argument(
"-s",
"--schedulers",
help="endpoint of scheduler, when single scheduler and etcd is not available",
)
parser.add_argument(
"-H",
"--host",
help="host of the scheduler service, only available when `endpoint` is absent",
)
parser.add_argument(
"-p",
"--port",
help="port of the scheduler service, only available when `endpoint` is absent",
)
parser.add_argument("--level", help="log level")
parser.add_argument("--format", help="log format")
parser.add_argument("--log_conf", help="log config file")
parser.add_argument("--inspect", help="inspection endpoint")
parser.add_argument("--load-modules", nargs="*", help="modules to import")
self.config_args(parser)
args = parser.parse_args(argv)
self.args = args
endpoint = args.endpoint
host = args.host
port = args.port
options.kv_store = args.kv_store if args.kv_store else options.kv_store
load_modules = []
for mod in args.load_modules or ():
load_modules.extend(mod.split(","))
if not args.load_modules:
load_module_str = os.environ.get("MARS_LOAD_MODULES")
if load_module_str:
load_modules = load_module_str.split(",")
load_modules.append("mars.tensor")
[__import__(m, globals(), locals(), []) for m in load_modules]
self.service_logger.info("Modules %s loaded", ",".join(load_modules))
self.n_process = 1
self.config_service()
self.config_logging()
if not host:
host = args.advertise or "0.0.0.0"
if not endpoint and port:
endpoint = host + ":" + port
try:
self.validate_arguments()
except StartArgumentError as ex:
parser.error("Failed to start application: %s" % ex)
if getattr(self, "require_pool", True):
self.endpoint, self.pool = self._try_create_pool(
endpoint=endpoint, host=host, port=port
)
self.service_logger.info(
"%s started at %s.", self.service_description, self.endpoint
)
self.main_loop()
|
def _main(self, argv=None):
parser = argparse.ArgumentParser(description=self.service_description)
parser.add_argument("-a", "--advertise", help="advertise ip")
parser.add_argument(
"-k",
"--kv-store",
help="address of kv store service, for instance, etcd://localhost:4001",
)
parser.add_argument("-e", "--endpoint", help="endpoint of the service")
parser.add_argument(
"-s",
"--schedulers",
help="endpoint of scheduler, when single scheduler and etcd is not available",
)
parser.add_argument(
"-H",
"--host",
help="host of the scheduler service, only available when `endpoint` is absent",
)
parser.add_argument(
"-p",
"--port",
help="port of the scheduler service, only available when `endpoint` is absent",
)
parser.add_argument("--level", help="log level")
parser.add_argument("--format", help="log format")
parser.add_argument("--log_conf", help="log config file")
parser.add_argument("--inspect", help="inspection endpoint")
parser.add_argument("--load-modules", nargs="*", help="modules to import")
parser.add_argument("--profile", nargs="?", default=-1, help="profile application")
self.config_args(parser)
args = parser.parse_args(argv)
self.args = args
endpoint = args.endpoint
host = args.host
port = args.port
options.kv_store = args.kv_store if args.kv_store else options.kv_store
load_modules = []
for mod in args.load_modules or ():
load_modules.extend(mod.split(","))
if not args.load_modules:
load_module_str = os.environ.get("MARS_LOAD_MODULES")
if load_module_str:
load_modules = load_module_str.split(",")
load_modules.append("mars.tensor")
[__import__(m, globals(), locals(), []) for m in load_modules]
if load_modules:
self.service_logger.info("Modules %s loaded", ",".join(load_modules))
self.n_process = 1
self.config_service()
self.config_logging()
if not host:
host = args.advertise or "0.0.0.0"
if not endpoint and port:
endpoint = host + ":" + port
try:
self.validate_arguments()
except StartArgumentError as ex:
parser.error("Failed to start application: %s" % ex)
if getattr(self, "require_pool", True):
self.endpoint, self.pool = self._try_create_pool(
endpoint=endpoint, host=host, port=port
)
self.service_logger.info(
"%s started at %s.", self.service_description, self.endpoint
)
self.main_loop()
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def main_loop(self):
try:
with self.pool:
try:
self.start()
self._running = True
while True:
self.pool.join(1)
stopped = []
for idx, proc in enumerate(self.pool.processes):
if not proc.is_alive():
stopped.append(idx)
if stopped:
self.handle_process_down(stopped)
except:
self._running = False
self.stop()
finally:
self._running = False
|
def main_loop(self):
if self.args.profile == -1:
profile_file = None
else:
profile_file = self.args.profile or (
"mars_" + self.__class__.__name__ + ".prof"
)
try:
if profile_file:
import yappi
yappi.set_clock_type("wall")
yappi.start(builtins=False, profile_threads=False)
with self.pool:
try:
self.start()
self._running = True
while True:
self.pool.join(1)
stopped = []
for idx, proc in enumerate(self.pool.processes):
if not proc.is_alive():
stopped.append(idx)
if stopped:
self.handle_process_down(stopped)
except:
self._running = False
self.stop()
finally:
self._running = False
if profile_file:
import yappi
yappi.logger = logging.getLogger(__name__)
p = yappi.convert2pstats(yappi.get_func_stats())
p.strip_dirs()
p.sort_stats("time")
p.print_stats(40)
p.dump_stats(profile_file)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def _get_schedulers(self):
schedulers = [
s.key.rsplit("/", 1)[1] for s in self._client.read(SCHEDULER_PATH).children
]
logger.debug("Schedulers obtained. Results: %r", schedulers)
return [to_str(s) for s in schedulers]
|
def _get_schedulers(self):
schedulers = [
s.key.rsplit("/", 1)[1] for s in self._client.read(SCHEDULER_PATH).children
]
logger.debug("Schedulers obtained. Results: %r", schedulers)
return schedulers
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def watch(self):
for new_schedulers in self._client.eternal_watch(SCHEDULER_PATH):
self._cluster_info_ref.set_schedulers([to_str(s) for s in new_schedulers])
|
def watch(self):
for new_schedulers in self._client.eternal_watch(SCHEDULER_PATH):
self._cluster_info_ref.set_schedulers(new_schedulers)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def _values_(self):
return [
getattr(self, k, None) for k in self._keys_ if k not in self._no_copy_attrs_
]
|
def _values_(self):
return [
getattr(self, k, None) for k in self.__slots__ if k not in self._no_copy_attrs_
]
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def update_key(self):
object.__setattr__(
self,
"_key",
tokenize(
type(self), *(getattr(self, k, None) for k in self._keys_ if k != "_index")
),
)
|
def update_key(self):
object.__setattr__(
self,
"_key",
tokenize(
type(self),
*(getattr(self, k, None) for k in self.__slots__ if k != "_index"),
),
)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def update_key(self):
args = tuple(getattr(self, k, None) for k in self._keys_)
if self.state is None:
args += (np.random.random(),)
self._key = tokenize(type(self), *args)
|
def update_key(self):
args = tuple(getattr(self, k, None) for k in self.__slots__)
if self.state is None:
args += (np.random.random(),)
self._key = tokenize(type(self), *args)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def start(self, endpoint, schedulers, pool):
"""
there are two way to start a scheduler
1) if options.kv_store is specified as an etcd address, the endpoint will be written
into kv-storage to indicate that this scheduler is one the schedulers,
and the etcd is used as a service discover.
2) if options.kv_store is not an etcd address, there will be only one scheduler
"""
kv_store = kvstore.get(options.kv_store)
kv_store.write("/schedulers/%s" % endpoint, dir=True)
if not isinstance(kv_store, kvstore.LocalKVStore):
# set etcd as service discover
logger.info("Mars Scheduler started with kv store %s.", options.kv_store)
service_discover_addr = options.kv_store
all_schedulers = None
# create KVStoreActor when there is a distributed KV store
self._kv_store_ref = pool.create_actor(
KVStoreActor, uid=KVStoreActor.default_name()
)
else:
# single scheduler
logger.info("Mars Scheduler started in standalone mode.")
service_discover_addr = None
all_schedulers = {endpoint}
if isinstance(schedulers, six.string_types):
schedulers = schedulers.split(",")
if schedulers:
all_schedulers.update(schedulers)
all_schedulers = list(all_schedulers)
# create ClusterInfoActor
self._cluster_info_ref = pool.create_actor(
ClusterInfoActor,
all_schedulers,
service_discover_addr,
uid=ClusterInfoActor.default_name(),
)
# create ChunkMetaActor
self._chunk_meta_ref = pool.create_actor(
ChunkMetaActor, uid=ChunkMetaActor.default_name()
)
# create SessionManagerActor
self._session_manager_ref = pool.create_actor(
SessionManagerActor, uid=SessionManagerActor.default_name()
)
# create AssignerActor
self._assigner_ref = pool.create_actor(
AssignerActor, uid=AssignerActor.default_name()
)
# create ResourceActor
self._resource_ref = pool.create_actor(
ResourceActor, uid=ResourceActor.default_name()
)
# create NodeInfoActor
self._node_info_ref = pool.create_actor(
NodeInfoActor, uid=NodeInfoActor.default_name()
)
kv_store.write(
"/schedulers/%s/meta" % endpoint,
json.dumps(self._resource_ref.get_workers_meta()),
)
# create ResultReceiverActor
self._result_receiver_ref = pool.create_actor(
ResultReceiverActor, uid=ResultReceiverActor.default_name()
)
|
def start(self, endpoint, schedulers, pool):
"""
there are two way to start a scheduler
1) if options.kv_store is specified as an etcd address, the endpoint will be written
into kv-storage to indicate that this scheduler is one the schedulers,
and the etcd is used as a service discover.
2) if options.kv_store is not an etcd address, there will be only one scheduler
"""
kv_store = kvstore.get(options.kv_store)
kv_store.write("/schedulers/%s" % endpoint, dir=True)
if not isinstance(kv_store, kvstore.LocalKVStore):
# set etcd as service discover
logger.info("Mars Scheduler started with kv store %s.", options.kv_store)
service_discover_addr = options.kv_store
all_schedulers = None
# create KVStoreActor when there is a distributed KV store
self._kv_store_ref = pool.create_actor(
KVStoreActor, uid=KVStoreActor.default_name()
)
else:
# single scheduler
logger.info("Mars Scheduler started in standalone mode.")
service_discover_addr = None
all_schedulers = {endpoint}
if schedulers:
all_schedulers.update(schedulers)
all_schedulers = list(all_schedulers)
# create ClusterInfoActor
self._cluster_info_ref = pool.create_actor(
ClusterInfoActor,
all_schedulers,
service_discover_addr,
uid=ClusterInfoActor.default_name(),
)
# create ChunkMetaActor
self._chunk_meta_ref = pool.create_actor(
ChunkMetaActor, uid=ChunkMetaActor.default_name()
)
# create SessionManagerActor
self._session_manager_ref = pool.create_actor(
SessionManagerActor, uid=SessionManagerActor.default_name()
)
# create AssignerActor
self._assigner_ref = pool.create_actor(
AssignerActor, uid=AssignerActor.default_name()
)
# create ResourceActor
self._resource_ref = pool.create_actor(
ResourceActor, uid=ResourceActor.default_name()
)
# create NodeInfoActor
self._node_info_ref = pool.create_actor(
NodeInfoActor, uid=NodeInfoActor.default_name()
)
kv_store.write(
"/schedulers/%s/meta" % endpoint,
json.dumps(self._resource_ref.get_workers_meta()),
)
# create ResultReceiverActor
self._result_receiver_ref = pool.create_actor(
ResultReceiverActor, uid=ResultReceiverActor.default_name()
)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def _prepare_graph_inputs(self, session_id, graph_key):
"""
Load input data from spilled storage and other workers
:param session_id: session id
:param graph_key: key of the execution graph
"""
graph_record = self._graph_records[(session_id, graph_key)]
if graph_record.stop_requested:
raise ExecutionInterrupted
unspill_keys = []
transfer_keys = []
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_state(session_id, graph_key, ExecutionState.PREPARING_INPUTS)
prepare_promises = []
chunks_use_once = graph_record.chunks_use_once
handled_keys = set()
for chunk in graph_record.graph:
if not isinstance(chunk.op, TensorFetch):
continue
if chunk.key in handled_keys:
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_key = to_str(chunk.key)
chunk_meta = self.get_meta_ref(session_id, chunk_key).get_chunk_meta(
session_id, chunk_key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk_key)
worker_results = chunk_meta.workers
worker_priorities = []
for worker_ip in worker_results:
# todo sort workers by speed of network and other possible factors
worker_priorities.append((worker_ip, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
graph_record.targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
|
def _prepare_graph_inputs(self, session_id, graph_key):
"""
Load input data from spilled storage and other workers
:param session_id: session id
:param graph_key: key of the execution graph
"""
graph_record = self._graph_records[(session_id, graph_key)]
if graph_record.stop_requested:
raise ExecutionInterrupted
unspill_keys = []
transfer_keys = []
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_state(session_id, graph_key, ExecutionState.PREPARING_INPUTS)
prepare_promises = []
chunks_use_once = graph_record.chunks_use_once
handled_keys = set()
for chunk in graph_record.graph:
if not isinstance(chunk.op, TensorFetch):
continue
if chunk.key in handled_keys:
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key).get_chunk_meta(
session_id, chunk.key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk.key)
worker_results = chunk_meta.workers
worker_priorities = []
for worker_ip in worker_results:
# todo sort workers by speed of network and other possible factors
worker_priorities.append((worker_ip, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
graph_record.targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def start(
self, endpoint, pool, distributed=True, schedulers=None, process_start_index=0
):
if schedulers:
if isinstance(schedulers, six.string_types):
schedulers = schedulers.split(",")
service_discover_addr = None
else:
schedulers = None
service_discover_addr = options.kv_store
if distributed:
# create ClusterInfoActor
self._cluster_info_ref = pool.create_actor(
ClusterInfoActor,
schedulers=schedulers,
service_discover_addr=service_discover_addr,
uid=ClusterInfoActor.default_name(),
)
# create process daemon
from .daemon import WorkerDaemonActor
actor_holder = self._daemon_ref = pool.create_actor(
WorkerDaemonActor, uid=WorkerDaemonActor.default_name()
)
# create StatusActor
port_str = endpoint.rsplit(":", 1)[-1]
self._status_ref = pool.create_actor(
StatusActor,
self._advertise_addr + ":" + port_str,
uid=StatusActor.default_name(),
)
else:
# create StatusActor
self._status_ref = pool.create_actor(
StatusActor, endpoint, uid=StatusActor.default_name()
)
actor_holder = pool
if self._ignore_avail_mem:
# start a QuotaActor instead of MemQuotaActor to avoid memory size detection
# for debug purpose only, DON'T USE IN PRODUCTION
self._mem_quota_ref = pool.create_actor(
QuotaActor, self._soft_mem_limit, uid=MemQuotaActor.default_name()
)
else:
self._mem_quota_ref = pool.create_actor(
MemQuotaActor,
self._soft_quota_limit,
self._hard_mem_limit,
uid=MemQuotaActor.default_name(),
)
# create ChunkHolderActor
self._chunk_holder_ref = pool.create_actor(
ChunkHolderActor, self._cache_mem_limit, uid=ChunkHolderActor.default_name()
)
# create TaskQueueActor
self._task_queue_ref = pool.create_actor(
TaskQueueActor, uid=TaskQueueActor.default_name()
)
# create DispatchActor
self._dispatch_ref = pool.create_actor(
DispatchActor, uid=DispatchActor.default_name()
)
# create ExecutionActor
self._execution_ref = pool.create_actor(
ExecutionActor, uid=ExecutionActor.default_name()
)
# create CpuCalcActor
if not distributed:
self._n_cpu_process = pool.cluster_info.n_process - 1 - process_start_index
for cpu_id in range(self._n_cpu_process):
uid = "w:%d:mars-calc-%d-%d" % (cpu_id + 1, os.getpid(), cpu_id)
actor = actor_holder.create_actor(CpuCalcActor, uid=uid)
self._cpu_calc_actors.append(actor)
if distributed:
# create SenderActor and ReceiverActor
start_pid = 1 + process_start_index + self._n_cpu_process
for sender_id in range(self._n_io_process):
uid = "w:%d:mars-sender-%d-%d" % (
start_pid + sender_id,
os.getpid(),
sender_id,
)
actor = actor_holder.create_actor(SenderActor, uid=uid)
self._sender_actors.append(actor)
for receiver_id in range(2 * self._n_io_process):
uid = "w:%d:mars-receiver-%d-%d" % (
start_pid + receiver_id // 2,
os.getpid(),
receiver_id,
)
actor = actor_holder.create_actor(ReceiverActor, uid=uid)
self._receiver_actors.append(actor)
# create ProcessHelperActor
for proc_id in range(pool.cluster_info.n_process - process_start_index):
uid = "w:%d:mars-process-helper-%d-%d" % (proc_id, os.getpid(), proc_id)
actor = actor_holder.create_actor(ProcessHelperActor, uid=uid)
self._process_helper_actors.append(actor)
# create ResultSenderActor
self._result_sender_ref = pool.create_actor(
ResultSenderActor, uid=ResultSenderActor.default_name()
)
# create SpillActor
start_pid = pool.cluster_info.n_process - 1
if options.worker.spill_directory:
for spill_id in range(len(options.worker.spill_directory) * 2):
uid = "w:%d:mars-spill-%d-%d" % (start_pid, os.getpid(), spill_id)
actor = actor_holder.create_actor(SpillActor, uid=uid)
self._spill_actors.append(actor)
|
def start(
self, endpoint, pool, distributed=True, schedulers=None, process_start_index=0
):
if schedulers:
if isinstance(schedulers, six.string_types):
schedulers = [schedulers]
service_discover_addr = None
else:
schedulers = None
service_discover_addr = options.kv_store
if distributed:
# create ClusterInfoActor
self._cluster_info_ref = pool.create_actor(
ClusterInfoActor,
schedulers=schedulers,
service_discover_addr=service_discover_addr,
uid=ClusterInfoActor.default_name(),
)
# create process daemon
from .daemon import WorkerDaemonActor
actor_holder = self._daemon_ref = pool.create_actor(
WorkerDaemonActor, uid=WorkerDaemonActor.default_name()
)
# create StatusActor
port_str = endpoint.rsplit(":", 1)[-1]
self._status_ref = pool.create_actor(
StatusActor,
self._advertise_addr + ":" + port_str,
uid=StatusActor.default_name(),
)
else:
# create StatusActor
self._status_ref = pool.create_actor(
StatusActor, endpoint, uid=StatusActor.default_name()
)
actor_holder = pool
if self._ignore_avail_mem:
# start a QuotaActor instead of MemQuotaActor to avoid memory size detection
# for debug purpose only, DON'T USE IN PRODUCTION
self._mem_quota_ref = pool.create_actor(
QuotaActor, self._soft_mem_limit, uid=MemQuotaActor.default_name()
)
else:
self._mem_quota_ref = pool.create_actor(
MemQuotaActor,
self._soft_quota_limit,
self._hard_mem_limit,
uid=MemQuotaActor.default_name(),
)
# create ChunkHolderActor
self._chunk_holder_ref = pool.create_actor(
ChunkHolderActor, self._cache_mem_limit, uid=ChunkHolderActor.default_name()
)
# create TaskQueueActor
self._task_queue_ref = pool.create_actor(
TaskQueueActor, uid=TaskQueueActor.default_name()
)
# create DispatchActor
self._dispatch_ref = pool.create_actor(
DispatchActor, uid=DispatchActor.default_name()
)
# create ExecutionActor
self._execution_ref = pool.create_actor(
ExecutionActor, uid=ExecutionActor.default_name()
)
# create CpuCalcActor
if not distributed:
self._n_cpu_process = pool.cluster_info.n_process - 1 - process_start_index
for cpu_id in range(self._n_cpu_process):
uid = "w:%d:mars-calc-%d-%d" % (cpu_id + 1, os.getpid(), cpu_id)
actor = actor_holder.create_actor(CpuCalcActor, uid=uid)
self._cpu_calc_actors.append(actor)
if distributed:
# create SenderActor and ReceiverActor
start_pid = 1 + process_start_index + self._n_cpu_process
for sender_id in range(self._n_io_process):
uid = "w:%d:mars-sender-%d-%d" % (
start_pid + sender_id,
os.getpid(),
sender_id,
)
actor = actor_holder.create_actor(SenderActor, uid=uid)
self._sender_actors.append(actor)
for receiver_id in range(2 * self._n_io_process):
uid = "w:%d:mars-receiver-%d-%d" % (
start_pid + receiver_id // 2,
os.getpid(),
receiver_id,
)
actor = actor_holder.create_actor(ReceiverActor, uid=uid)
self._receiver_actors.append(actor)
# create ProcessHelperActor
for proc_id in range(pool.cluster_info.n_process - process_start_index):
uid = "w:%d:mars-process-helper-%d-%d" % (proc_id, os.getpid(), proc_id)
actor = actor_holder.create_actor(ProcessHelperActor, uid=uid)
self._process_helper_actors.append(actor)
# create ResultSenderActor
self._result_sender_ref = pool.create_actor(
ResultSenderActor, uid=ResultSenderActor.default_name()
)
# create SpillActor
start_pid = pool.cluster_info.n_process - 1
if options.worker.spill_directory:
for spill_id in range(len(options.worker.spill_directory) * 2):
uid = "w:%d:mars-spill-%d-%d" % (start_pid, os.getpid(), spill_id)
actor = actor_holder.create_actor(SpillActor, uid=uid)
self._spill_actors.append(actor)
|
https://github.com/mars-project/mars/issues/201
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 444, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 448, in mars.actors.pool.gevent_pool.ActorRemoteHelper._send_remote
File "mars/actors/pool/gevent_pool.pyx", line 437, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 442, in mars.actors.pool.gevent_pool.ActorRemoteHelper._new_connection
File "mars/actors/pool/gevent_pool.pyx", line 365, in mars.actors.pool.gevent_pool.Connections.connect
File "mars/actors/pool/gevent_pool.pyx", line 380, in mars.actors.pool.gevent_pool.Connections.connect
File "/Users/wenjun.swj/miniconda3/lib/python3.7/site-packages/gevent/socket.py", line 80, in create_connection
host, port = address
ValueError: not enough values to unpack (expected 2, got 1)
|
ValueError
|
def execute_graph(
self,
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets=None,
callback=None,
):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: graph key
:param graph_ser: serialized executable graph
:param io_meta: io meta of the chunk
:param data_sizes: data size of each input chunk, as a dict
:param send_targets: targets to send results after execution
:param callback: promise callback
"""
from ..tensor.expressions.datasource import TensorFetchChunk
data_sizes = data_sizes or dict()
graph = deserialize_graph(graph_ser)
targets = io_meta["chunks"]
chunks_use_once = set(io_meta.get("input_chunks", [])) - set(
io_meta.get("shared_input_chunks", [])
)
graph_ops = ",".join(
type(c.op).__name__ for c in graph if not isinstance(c.op, TensorFetchChunk)
)
logger.debug(
"Worker graph %s(%s) targeting at %r accepted.", graph_key, graph_ops, targets
)
self._update_stage_info(session_id, graph_key, graph_ops, "allocate_resource")
# add callbacks to callback store
if callback is None:
callback = []
elif not isinstance(callback, list):
callback = [callback]
self._callbacks[graph_key].extend(callback)
if graph_key in self._callback_cache:
del self._callback_cache[graph_key]
unspill_keys = []
transfer_keys = []
calc_keys = set()
alloc_mem_batch = dict()
alloc_cache_batch = dict()
input_chunk_keys = dict()
if self._status_ref:
self.estimate_graph_finish_time(graph_key, graph)
# collect potential allocation sizes
for chunk in graph:
if not isinstance(chunk.op, TensorFetchChunk) and chunk.key in targets:
# use estimated size as potential allocation size
calc_keys.add(chunk.key)
alloc_mem_batch[chunk.key] = chunk.nbytes * 2
alloc_cache_batch[chunk.key] = chunk.nbytes
else:
# use actual size as potential allocation size
input_chunk_keys[chunk.key] = data_sizes.get(chunk.key, chunk.nbytes)
calc_keys = list(calc_keys)
keys_to_pin = list(input_chunk_keys.keys())
try:
self._pin_requests[graph_key] = set(
self._chunk_holder_ref.pin_chunks(graph_key, keys_to_pin)
)
except PinChunkFailed:
# cannot pin input chunks: retry later
callback = self._callbacks[graph_key]
self._cleanup_graph(session_id, graph_key)
retry_delay = self._retry_delays[graph_key] + 0.5 + random.random()
self._retry_delays[graph_key] = min(1 + self._retry_delays[graph_key], 30)
self.ref().execute_graph(
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets,
callback,
_tell=True,
_delay=retry_delay,
)
return
load_chunk_sizes = dict(
(k, v)
for k, v in input_chunk_keys.items()
if k not in self._pin_requests[graph_key]
)
alloc_mem_batch.update(
(self._build_load_key(graph_key, k), v)
for k, v in load_chunk_sizes.items()
if k in chunks_use_once
)
self._chunk_holder_ref.spill_size(sum(alloc_cache_batch.values()), _tell=True)
# build allocation promises
batch_alloc_promises = []
if alloc_mem_batch:
self._mem_requests[graph_key] = list(alloc_mem_batch.keys())
batch_alloc_promises.append(
self._mem_quota_ref.request_batch_quota(alloc_mem_batch, _promise=True)
)
@log_unhandled
def _prepare_inputs(*_):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "prepare_inputs")
prepare_promises = []
handled_keys = set()
for chunk in graph:
if chunk.key in handled_keys:
continue
if not isinstance(chunk.op, TensorFetchChunk):
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key).get_chunk_meta(
session_id, chunk.key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk.key)
worker_priorities = []
for w in chunk_meta.workers:
# todo sort workers by speed of network and other possible factors
worker_priorities.append((w, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
@log_unhandled
def _wait_free_slot(*_):
logger.debug("Waiting for free CPU slot for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "fetch_free_slot")
return self._dispatch_ref.get_free_slot("cpu", _promise=True)
@log_unhandled
def _send_calc_request(calc_uid):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
# get allocation for calc, in case that memory exhausts
target_allocs = dict()
for chunk in graph:
if isinstance(chunk.op, TensorFetchChunk):
if not self._chunk_holder_ref.is_stored(chunk.key):
alloc_key = self._build_load_key(graph_key, chunk.key)
if alloc_key in alloc_mem_batch:
target_allocs[alloc_key] = alloc_mem_batch[alloc_key]
elif chunk.key in targets:
target_allocs[chunk.key] = alloc_mem_batch[chunk.key]
logger.debug("Start calculation for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "calculate")
calc_ref = self.promise_ref(calc_uid)
self.estimate_graph_finish_time(graph_key, graph, calc_fetch=False)
# make sure that memory suffices before actually run execution
return (
self._mem_quota_ref.request_batch_quota(target_allocs, _promise=True)
.then(
lambda *_: self._deallocate_scheduler_resource(
session_id, graph_key, delay=2
)
)
.then(
lambda *_: calc_ref.calc(session_id, graph_ser, targets, _promise=True)
)
)
@log_unhandled
def _dump_cache(inproc_uid, save_sizes):
# do some clean up
self._deallocate_scheduler_resource(session_id, graph_key)
inproc_ref = self.promise_ref(inproc_uid)
if graph_key in self._stop_requests:
inproc_ref.remove_cache(calc_keys, _tell=True)
raise ExecutionInterrupted
self._update_stage_info(session_id, graph_key, graph_ops, "dump_cache")
logger.debug(
"Graph %s: Start putting %r into shared cache. Target actor uid %s.",
graph_key,
calc_keys,
inproc_uid,
)
self._chunk_holder_ref.unpin_chunks(
graph_key, list(set(c.key for c in graph)), _tell=True
)
if logger.getEffectiveLevel() <= logging.DEBUG:
self._dump_execution_stages()
# self._cache_ref.dump_cache_status(_tell=True)
self._size_cache[graph_key] = save_sizes
if not send_targets:
# no endpoints to send, dump keys into shared memory and return
logger.debug(
"Worker graph %s(%s) finished execution. Dumping %r into plasma...",
graph_key,
graph_ops,
calc_keys,
)
return inproc_ref.dump_cache(calc_keys, _promise=True)
else:
# dump keys into shared memory and send
logger.debug(
"Worker graph %s(%s) finished execution. Dumping %r into plasma "
"while actively transferring %r...",
graph_key,
graph_ops,
calc_keys,
send_targets,
)
return inproc_ref.dump_cache(calc_keys, _promise=True).then(
_do_active_transfer
)
@log_unhandled
def _do_active_transfer(*_):
# transfer the result chunk to expected endpoints
@log_unhandled
def _send_chunk(sender_uid, chunk_key, target_addrs):
sender_ref = self.promise_ref(sender_uid)
logger.debug("Request for chunk %s sent to %s", chunk_key, target_addrs)
return sender_ref.send_data(
session_id,
chunk_key,
target_addrs,
ensure_cached=False,
timeout=options.worker.prepare_data_timeout,
_promise=True,
)
if graph_key in self._mem_requests:
self._mem_quota_ref.release_quotas(
self._mem_requests[graph_key], _tell=True
)
del self._mem_requests[graph_key]
promises = []
for key, targets in send_targets.items():
promises.append(
self._dispatch_ref.get_free_slot("sender", _promise=True)
.then(partial(_send_chunk, chunk_key=key, target_addrs=targets))
.catch(lambda *_: None)
)
return promise.all_(promises)
@log_unhandled
def _handle_rejection(*exc):
# some error occurred...
logger.debug("Entering _handle_rejection() for graph %s", graph_key)
if logger.getEffectiveLevel() <= logging.DEBUG:
self._dump_execution_stages()
# self._cache_ref.dump_cache_status(_tell=True)
if graph_key in self._stop_requests:
self._stop_requests.remove(graph_key)
self._mem_quota_ref.cancel_requests(list(alloc_mem_batch.keys()), _tell=True)
if not issubclass(exc[0], _WORKER_RETRY_ERRORS):
# exception not retryable: call back to scheduler
if isinstance(exc[0], ExecutionInterrupted):
logger.warning("Execution of graph %s interrupted.", graph_key)
else:
try:
six.reraise(*exc)
except:
logger.exception(
"Unexpected error occurred in executing %s", graph_key
)
self._invoke_finish_callbacks(
session_id, graph_key, *exc, **dict(_accept=False)
)
return
logger.debug(
"Graph %s rejected from execution because of %s", graph_key, exc[0].__name__
)
cb = self._callbacks[graph_key]
self._cleanup_graph(session_id, graph_key)
if issubclass(exc[0], ObjectNotInPlasma):
retry_delay = 0
else:
retry_delay = self._retry_delays[graph_key] + 0.5 + random.random()
self._retry_delays[graph_key] = min(1 + self._retry_delays[graph_key], 30)
self.ref().execute_graph(
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets,
cb,
_tell=True,
_delay=retry_delay,
)
promise.all_(batch_alloc_promises).then(_prepare_inputs).then(_wait_free_slot).then(
_send_calc_request
).then(_dump_cache).then(
lambda *_: self._invoke_finish_callbacks(
session_id, graph_key, self._size_cache.get(graph_key)
)
).catch(_handle_rejection)
|
def execute_graph(
self,
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets=None,
callback=None,
):
"""
Submit graph to the worker and control the execution
:param session_id: session id
:param graph_key: graph key
:param graph_ser: serialized executable graph
:param io_meta: io meta of the chunk
:param data_sizes: data size of each input chunk, as a dict
:param send_targets: targets to send results after execution
:param callback: promise callback
"""
from ..tensor.expressions.datasource import TensorFetchChunk
data_sizes = data_sizes or dict()
graph = deserialize_graph(graph_ser)
targets = io_meta["chunks"]
chunks_use_once = set(io_meta.get("input_chunks", [])) - set(
io_meta.get("shared_input_chunks", [])
)
graph_ops = ",".join(
type(c.op).__name__ for c in graph if not isinstance(c.op, TensorFetchChunk)
)
logger.debug(
"Worker graph %s(%s) targeting at %r accepted.", graph_key, graph_ops, targets
)
self._update_stage_info(session_id, graph_key, graph_ops, "allocate_resource")
# add callbacks to callback store
if callback is None:
callback = []
elif not isinstance(callback, list):
callback = [callback]
self._callbacks[graph_key].extend(callback)
if graph_key in self._callback_cache:
del self._callback_cache[graph_key]
unspill_keys = []
transfer_keys = []
calc_keys = set()
alloc_mem_batch = dict()
alloc_cache_batch = dict()
input_chunk_keys = dict()
if self._status_ref:
self.estimate_graph_finish_time(graph_key, graph)
# collect potential allocation sizes
for chunk in graph:
if not isinstance(chunk.op, TensorFetchChunk) and chunk.key in targets:
# use estimated size as potential allocation size
calc_keys.add(chunk.key)
alloc_mem_batch[chunk.key] = chunk.nbytes * 2
alloc_cache_batch[chunk.key] = chunk.nbytes
else:
# use actual size as potential allocation size
input_chunk_keys[chunk.key] = data_sizes.get(chunk.key, chunk.nbytes)
calc_keys = list(calc_keys)
keys_to_pin = list(input_chunk_keys.keys())
try:
self._pin_requests[graph_key] = set(
self._chunk_holder_ref.pin_chunks(graph_key, keys_to_pin)
)
except PinChunkFailed:
# cannot pin input chunks: retry later
callback = self._callbacks[graph_key]
self._cleanup_graph(session_id, graph_key)
retry_delay = self._retry_delays[graph_key] + 0.5 + random.random()
self._retry_delays[graph_key] = min(1 + self._retry_delays[graph_key], 30)
self.ref().execute_graph(
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets,
callback,
_tell=True,
_delay=retry_delay,
)
return
load_chunk_sizes = dict(
(k, v)
for k, v in input_chunk_keys.items()
if k not in self._pin_requests[graph_key]
)
alloc_mem_batch.update(
(self._build_load_key(graph_key, k), v)
for k, v in load_chunk_sizes.items()
if k in chunks_use_once
)
self._chunk_holder_ref.spill_size(sum(alloc_cache_batch.values()), _tell=True)
# build allocation promises
batch_alloc_promises = []
if alloc_mem_batch:
self._mem_requests[graph_key] = list(alloc_mem_batch.keys())
batch_alloc_promises.append(
self._mem_quota_ref.request_batch_quota(alloc_mem_batch, _promise=True)
)
@log_unhandled
def _prepare_inputs(*_):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "prepare_inputs")
prepare_promises = []
handled_keys = set()
for chunk in graph:
if chunk.key in handled_keys:
continue
if not isinstance(chunk.op, TensorFetchChunk):
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key).get_chunk_meta(
session_id, chunk.key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk.key)
worker_results = chunk_meta.workers
worker_priorities = []
for w in worker_results.children:
_, worker_ip = w.key.rsplit("/", 1)
# todo sort workers by speed of network and other possible factors
worker_priorities.append((worker_ip, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
@log_unhandled
def _wait_free_slot(*_):
logger.debug("Waiting for free CPU slot for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "fetch_free_slot")
return self._dispatch_ref.get_free_slot("cpu", _promise=True)
@log_unhandled
def _send_calc_request(calc_uid):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
# get allocation for calc, in case that memory exhausts
target_allocs = dict()
for chunk in graph:
if isinstance(chunk.op, TensorFetchChunk):
if not self._chunk_holder_ref.is_stored(chunk.key):
alloc_key = self._build_load_key(graph_key, chunk.key)
if alloc_key in alloc_mem_batch:
target_allocs[alloc_key] = alloc_mem_batch[alloc_key]
elif chunk.key in targets:
target_allocs[chunk.key] = alloc_mem_batch[chunk.key]
logger.debug("Start calculation for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "calculate")
calc_ref = self.promise_ref(calc_uid)
self.estimate_graph_finish_time(graph_key, graph, calc_fetch=False)
# make sure that memory suffices before actually run execution
return (
self._mem_quota_ref.request_batch_quota(target_allocs, _promise=True)
.then(
lambda *_: self._deallocate_scheduler_resource(
session_id, graph_key, delay=2
)
)
.then(
lambda *_: calc_ref.calc(session_id, graph_ser, targets, _promise=True)
)
)
@log_unhandled
def _dump_cache(inproc_uid, save_sizes):
# do some clean up
self._deallocate_scheduler_resource(session_id, graph_key)
inproc_ref = self.promise_ref(inproc_uid)
if graph_key in self._stop_requests:
inproc_ref.remove_cache(calc_keys, _tell=True)
raise ExecutionInterrupted
self._update_stage_info(session_id, graph_key, graph_ops, "dump_cache")
logger.debug(
"Graph %s: Start putting %r into shared cache. Target actor uid %s.",
graph_key,
calc_keys,
inproc_uid,
)
self._chunk_holder_ref.unpin_chunks(
graph_key, list(set(c.key for c in graph)), _tell=True
)
if logger.getEffectiveLevel() <= logging.DEBUG:
self._dump_execution_stages()
# self._cache_ref.dump_cache_status(_tell=True)
self._size_cache[graph_key] = save_sizes
if not send_targets:
# no endpoints to send, dump keys into shared memory and return
logger.debug(
"Worker graph %s(%s) finished execution. Dumping %r into plasma...",
graph_key,
graph_ops,
calc_keys,
)
return inproc_ref.dump_cache(calc_keys, _promise=True)
else:
# dump keys into shared memory and send
logger.debug(
"Worker graph %s(%s) finished execution. Dumping %r into plasma "
"while actively transferring %r...",
graph_key,
graph_ops,
calc_keys,
send_targets,
)
return inproc_ref.dump_cache(calc_keys, _promise=True).then(
_do_active_transfer
)
@log_unhandled
def _do_active_transfer(*_):
# transfer the result chunk to expected endpoints
@log_unhandled
def _send_chunk(sender_uid, chunk_key, target_addrs):
sender_ref = self.promise_ref(sender_uid)
logger.debug("Request for chunk %s sent to %s", chunk_key, target_addrs)
return sender_ref.send_data(
session_id,
chunk_key,
target_addrs,
ensure_cached=False,
timeout=options.worker.prepare_data_timeout,
_promise=True,
)
if graph_key in self._mem_requests:
self._mem_quota_ref.release_quotas(
self._mem_requests[graph_key], _tell=True
)
del self._mem_requests[graph_key]
promises = []
for key, targets in send_targets.items():
promises.append(
self._dispatch_ref.get_free_slot("sender", _promise=True)
.then(partial(_send_chunk, chunk_key=key, target_addrs=targets))
.catch(lambda *_: None)
)
return promise.all_(promises)
@log_unhandled
def _handle_rejection(*exc):
# some error occurred...
logger.debug("Entering _handle_rejection() for graph %s", graph_key)
if logger.getEffectiveLevel() <= logging.DEBUG:
self._dump_execution_stages()
# self._cache_ref.dump_cache_status(_tell=True)
if graph_key in self._stop_requests:
self._stop_requests.remove(graph_key)
self._mem_quota_ref.cancel_requests(list(alloc_mem_batch.keys()), _tell=True)
if not issubclass(exc[0], _WORKER_RETRY_ERRORS):
# exception not retryable: call back to scheduler
if isinstance(exc[0], ExecutionInterrupted):
logger.warning("Execution of graph %s interrupted.", graph_key)
else:
try:
six.reraise(*exc)
except:
logger.exception(
"Unexpected error occurred in executing %s", graph_key
)
self._invoke_finish_callbacks(
session_id, graph_key, *exc, **dict(_accept=False)
)
return
logger.debug(
"Graph %s rejected from execution because of %s", graph_key, exc[0].__name__
)
cb = self._callbacks[graph_key]
self._cleanup_graph(session_id, graph_key)
if issubclass(exc[0], ObjectNotInPlasma):
retry_delay = 0
else:
retry_delay = self._retry_delays[graph_key] + 0.5 + random.random()
self._retry_delays[graph_key] = min(1 + self._retry_delays[graph_key], 30)
self.ref().execute_graph(
session_id,
graph_key,
graph_ser,
io_meta,
data_sizes,
send_targets,
cb,
_tell=True,
_delay=retry_delay,
)
promise.all_(batch_alloc_promises).then(_prepare_inputs).then(_wait_free_slot).then(
_send_calc_request
).then(_dump_cache).then(
lambda *_: self._invoke_finish_callbacks(
session_id, graph_key, self._size_cache.get(graph_key)
)
).catch(_handle_rejection)
|
https://github.com/mars-project/mars/issues/129
|
Unexpected error occurred in executing bd974beb010abfa6964bdee22c5d2080
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/worker/execution.py", line 457, in _handle_rejection
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/lib/six.py", line 703, in reraise
raise value
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/promise.py", line 86, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/utils.py", line 287, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/worker/execution.py", line 328, in _prepare_inputs
for w in worker_results.children:
AttributeError: 'tuple' object has no attribute 'children'
|
AttributeError
|
def _prepare_inputs(*_):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "prepare_inputs")
prepare_promises = []
handled_keys = set()
for chunk in graph:
if chunk.key in handled_keys:
continue
if not isinstance(chunk.op, TensorFetchChunk):
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key).get_chunk_meta(
session_id, chunk.key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk.key)
worker_priorities = []
for w in chunk_meta.workers:
# todo sort workers by speed of network and other possible factors
worker_priorities.append((w, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
|
def _prepare_inputs(*_):
if graph_key in self._stop_requests:
raise ExecutionInterrupted
logger.debug("Start preparing input data for graph %s", graph_key)
self._update_stage_info(session_id, graph_key, graph_ops, "prepare_inputs")
prepare_promises = []
handled_keys = set()
for chunk in graph:
if chunk.key in handled_keys:
continue
if not isinstance(chunk.op, TensorFetchChunk):
continue
handled_keys.add(chunk.key)
if self._chunk_holder_ref.is_stored(chunk.key):
# data already in plasma: we just pin it
pinned_keys = self._chunk_holder_ref.pin_chunks(graph_key, chunk.key)
if chunk.key in pinned_keys:
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
continue
if spill_exists(chunk.key):
if chunk.key in chunks_use_once:
# input only use in current operand, we only need to load it into process memory
continue
self._mem_quota_ref.release_quota(
self._build_load_key(graph_key, chunk.key)
)
load_fun = partial(
lambda gk, ck, *_: self._chunk_holder_ref.pin_chunks(gk, ck),
graph_key,
chunk.key,
)
unspill_keys.append(chunk.key)
prepare_promises.append(
ensure_chunk(self, session_id, chunk.key, move_to_end=True).then(
load_fun
)
)
continue
# load data from another worker
chunk_meta = self.get_meta_ref(session_id, chunk.key).get_chunk_meta(
session_id, chunk.key
)
if chunk_meta is None:
raise DependencyMissing("Dependency %s not met on sending." % chunk.key)
worker_results = chunk_meta.workers
worker_priorities = []
for w in worker_results.children:
_, worker_ip = w.key.rsplit("/", 1)
# todo sort workers by speed of network and other possible factors
worker_priorities.append((worker_ip, (0,)))
transfer_keys.append(chunk.key)
# fetch data from other workers, if one fails, try another
sorted_workers = sorted(worker_priorities, key=lambda pr: pr[1])
p = self._fetch_remote_data(
session_id,
graph_key,
chunk.key,
sorted_workers[0][0],
ensure_cached=chunk.key not in chunks_use_once,
)
for wp in sorted_workers[1:]:
p = p.catch(
functools.partial(
self._fetch_remote_data,
session_id,
graph_key,
chunk.key,
wp[0],
ensure_cached=chunk.key not in chunks_use_once,
)
)
prepare_promises.append(p)
logger.debug(
"Graph key %s: Targets %r, unspill keys %r, transfer keys %r",
graph_key,
targets,
unspill_keys,
transfer_keys,
)
return promise.all_(prepare_promises)
|
https://github.com/mars-project/mars/issues/129
|
Unexpected error occurred in executing bd974beb010abfa6964bdee22c5d2080
Traceback (most recent call last):
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/worker/execution.py", line 457, in _handle_rejection
six.reraise(*exc)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/lib/six.py", line 703, in reraise
raise value
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/promise.py", line 86, in _wrapped
result = func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/utils.py", line 287, in _wrapped
return func(*args, **kwargs)
File "/home/admin/work/_public-mars-0.1.0b1.zip/mars/worker/execution.py", line 328, in _prepare_inputs
for w in worker_results.children:
AttributeError: 'tuple' object has no attribute 'children'
|
AttributeError
|
def __init__(
self,
session_id,
graph_key,
serialized_tensor_graph,
target_tensors=None,
serialized_chunk_graph=None,
state=GraphState.UNSCHEDULED,
final_state=None,
):
super(GraphActor, self).__init__()
self._graph_key = graph_key
self._session_id = session_id
self._serialized_tensor_graph = serialized_tensor_graph
self._serialized_chunk_graph = serialized_chunk_graph
self._state = state
self._final_state = final_state
self._start_time = None
self._end_time = None
self._nodes_num = None
self._cluster_info_ref = None
self._assigner_actor_ref = None
self._resource_actor_ref = None
self._kv_store_ref = None
self._chunk_meta_ref = None
self._graph_meta_ref = None
self._tensor_graph_cache = None
self._chunk_graph_cache = None
self._op_key_to_chunk = defaultdict(list)
self._resource_actor = None
self._tensor_key_opid_to_tiled = defaultdict(list)
self._tensor_key_to_opid = dict()
self._terminal_chunk_op_tensor = defaultdict(set)
self._terminated_tensors = set()
self._operand_infos = dict()
if target_tensors:
self._target_tensor_chunk_ops = dict((k, set()) for k in target_tensors)
self._target_tensor_finished = dict(
(k, set()) for k in self._target_tensor_chunk_ops
)
else:
self._target_tensor_chunk_ops = dict()
self._target_tensor_finished = dict()
|
def __init__(
self,
session_id,
graph_key,
serialized_tensor_graph,
target_tensors=None,
serialized_chunk_graph=None,
state=GraphState.UNSCHEDULED,
final_state=None,
):
super(GraphActor, self).__init__()
self._graph_key = graph_key
self._session_id = session_id
self._serialized_tensor_graph = serialized_tensor_graph
self._serialized_chunk_graph = serialized_chunk_graph
self._state = state
self._final_state = final_state
self._start_time = None
self._end_time = None
self._nodes_num = None
self._cluster_info_ref = None
self._assigner_actor_ref = None
self._resource_actor_ref = None
self._kv_store_ref = None
self._chunk_meta_ref = None
self._graph_meta_ref = None
self._tensor_graph_cache = None
self._chunk_graph_cache = None
self._op_key_to_chunk = defaultdict(list)
self._resource_actor = None
self._tensor_to_tiled = defaultdict(list)
self._terminal_chunk_op_tensor = defaultdict(set)
self._terminated_tensors = set()
self._operand_infos = dict()
if target_tensors:
self._target_tensor_chunk_ops = dict((k, set()) for k in target_tensors)
self._target_tensor_finished = dict(
(k, set()) for k in self._target_tensor_chunk_ops
)
else:
self._target_tensor_chunk_ops = dict()
self._target_tensor_finished = dict()
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def execute_graph(self):
"""
Start graph execution
"""
def _detect_cancel(callback=None):
if self.reload_state() == GraphState.CANCELLING:
logger.info("Cancel detected, stopping")
if callback:
callback()
else:
self._end_time = time.time()
self.state = GraphState.CANCELLED
raise ExecutionInterrupted
self._start_time = time.time()
self.state = GraphState.PREPARING
try:
self.prepare_graph()
_detect_cancel()
self.scan_node()
_detect_cancel()
self.place_initial_chunks()
_detect_cancel()
self.create_operand_actors()
_detect_cancel(self.stop_graph)
except ExecutionInterrupted:
pass
except: # noqa: E722
logger.exception("Failed to start graph execution.")
self.stop_graph()
self.state = GraphState.FAILED
raise
|
def execute_graph(self):
"""
Start graph execution
"""
def _detect_cancel(callback=None):
if self.reload_state() == GraphState.CANCELLING:
logger.info("Cancel detected, stopping")
if callback:
callback()
else:
self._end_time = time.time()
self.state = GraphState.CANCELLED
raise ExecutionInterrupted
self._start_time = time.time()
self.state = GraphState.PREPARING
try:
self.prepare_graph()
_detect_cancel()
self.scan_node()
_detect_cancel()
self.place_initial_chunks()
_detect_cancel()
self.create_operand_actors()
_detect_cancel(self.stop_graph)
except ExecutionInterrupted:
pass
except:
logger.exception("Failed to start graph execution.")
self.stop_graph()
self.state = GraphState.FAILED
raise
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def stop_graph(self):
"""
Stop graph execution
"""
from .operand import OperandActor
if self.state == GraphState.CANCELLED:
return
self.state = GraphState.CANCELLING
try:
chunk_graph = self.get_chunk_graph()
except (KeyError, GraphNotExists):
self.state = GraphState.CANCELLED
return
has_stopping = False
for chunk in chunk_graph:
if chunk.op.key not in self._operand_infos:
continue
if self._operand_infos[chunk.op.key]["state"] in (
OperandState.READY,
OperandState.RUNNING,
OperandState.FINISHED,
):
# we only need to stop on ready, running and finished operands
op_uid = OperandActor.gen_uid(self._session_id, chunk.op.key)
scheduler_addr = self.get_scheduler(op_uid)
ref = self.ctx.actor_ref(op_uid, address=scheduler_addr)
has_stopping = True
ref.stop_operand(_tell=True)
if not has_stopping:
self.state = GraphState.CANCELLED
|
def stop_graph(self):
"""
Stop graph execution
"""
from .operand import OperandActor
if self.state == GraphState.CANCELLED:
return
self.state = GraphState.CANCELLING
try:
chunk_graph = self.get_chunk_graph()
except (KeyError, GraphNotExists):
self.state = GraphState.CANCELLED
return
for chunk in chunk_graph:
if chunk.op.key not in self._operand_infos:
continue
if self._operand_infos[chunk.op.key]["state"] in (
OperandState.READY,
OperandState.RUNNING,
OperandState.FINISHED,
):
# we only need to stop on ready, running and finished operands
op_uid = OperandActor.gen_uid(self._session_id, chunk.op.key)
scheduler_addr = self.get_scheduler(op_uid)
ref = self.ctx.actor_ref(op_uid, address=scheduler_addr)
ref.stop_operand(_tell=True)
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def prepare_graph(self, compose=True):
"""
Tile and compose tensor graph into chunk graph
:param compose: if True, do compose after tiling
"""
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
self._tensor_graph_cache = tensor_graph
logger.debug(
"Begin preparing graph %s with %d tensors to chunk graph.",
self._graph_key,
len(tensor_graph),
)
# mark target tensor steps
if not self._target_tensor_chunk_ops:
for tn in tensor_graph:
if not tensor_graph.count_successors(tn):
self._target_tensor_chunk_ops[tn.key] = set()
self._target_tensor_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tensor_key_opid_to_tiled = self._tensor_key_opid_to_tiled
for t in tensor_graph:
self._tensor_key_to_opid[t.key] = t.op.id
if (t.key, t.op.id) not in tensor_key_opid_to_tiled:
continue
t._chunks = [
key_to_chunk[k] for k in [tensor_key_opid_to_tiled[(t.key, t.op.id)][-1]]
]
tq = deque()
for t in tensor_graph:
if t.inputs and not all(
(ti.key, ti.op.id) in tensor_key_opid_to_tiled for ti in t.inputs
):
continue
tq.append(t)
while tq:
tensor = tq.popleft()
if (
not tensor.is_coarse()
or (tensor.key, tensor.op.id) in tensor_key_opid_to_tiled
):
continue
inputs = [
tensor_key_opid_to_tiled[(it.key, it.op.id)][-1]
for it in tensor.inputs or ()
]
op = tensor.op.copy()
_ = op.new_tensors(
inputs,
[o.shape for o in tensor.op.outputs], # noqa: F841
dtype=[o.dtype for o in tensor.op.outputs],
**tensor.params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tensor.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
td = handler.dispatch(to_tile)
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tensor_key_opid_to_tiled[(t.key, t.op.id)].append(tiled)
# add chunks to fine grained graph
q = deque([tiled_c.data for tiled_c in tiled.chunks])
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tensor_graph.successors(t):
if any(
(t.key, t.op.id) not in tensor_key_opid_to_tiled
for t in succ.inputs
):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk_topid in tensor_key_opid_to_tiled:
for n in [c.data for t in tensor_key_opid_to_tiled[tk_topid] for c in t.chunks]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.update(n.op.outputs)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk, topid in tensor_key_opid_to_tiled:
if tk not in self._target_tensor_chunk_ops:
continue
for n in tensor_key_opid_to_tiled[(tk, topid)][-1].chunks:
self._terminal_chunk_op_tensor[n.op.key].add(tk)
self._target_tensor_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._nodes_num = len(chunk_graph)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
def prepare_graph(self, compose=True):
"""
Tile and compose tensor graph into chunk graph
:param compose: if True, do compose after tiling
"""
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
self._tensor_graph_cache = tensor_graph
logger.debug(
"Begin preparing graph %s with %d tensors to chunk graph.",
self._graph_key,
len(tensor_graph),
)
# mark target tensor steps
if not self._target_tensor_chunk_ops:
for tn in tensor_graph:
if not tensor_graph.count_successors(tn):
self._target_tensor_chunk_ops[tn.key] = set()
self._target_tensor_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tensor_to_tiled = self._tensor_to_tiled
for t in tensor_graph:
if t.key not in tensor_to_tiled:
continue
t._chunks = [key_to_chunk[k] for k in [tensor_to_tiled[t.key][-1]]]
tq = deque()
for t in tensor_graph:
if t.inputs and not all(ti.key in tensor_to_tiled for ti in t.inputs):
continue
tq.append(t)
while tq:
tensor = tq.popleft()
if not tensor.is_coarse() or tensor.key in tensor_to_tiled:
continue
inputs = [tensor_to_tiled[it.key][-1] for it in tensor.inputs or ()]
op = tensor.op.copy()
_ = op.new_tensors(
inputs,
[o.shape for o in tensor.op.outputs], # noqa: F841
dtype=[o.dtype for o in tensor.op.outputs],
**tensor.params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tensor.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
td = handler.dispatch(to_tile)
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tensor_to_tiled[t.key].append(tiled)
# add chunks to fine grained graph
q = deque([tiled_c.data for tiled_c in tiled.chunks])
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tensor_graph.iter_successors(t):
if any(t.key not in tensor_to_tiled for t in succ.inputs):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk in self._target_tensor_chunk_ops:
for n in [c.data for t in tensor_to_tiled[tk] for c in t.chunks]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.update(n.op.outputs)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk in tensor_to_tiled:
if tk not in self._target_tensor_chunk_ops:
continue
for n in tensor_to_tiled[tk][-1].chunks:
self._terminal_chunk_op_tensor[n.op.key].add(tk)
self._target_tensor_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._nodes_num = len(chunk_graph)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def free_tensor_data(self, tensor_key):
from .operand import OperandActor
tiled_tensor = self._get_tensor_by_key(tensor_key)
for chunk in tiled_tensor.chunks:
op_uid = OperandActor.gen_uid(self._session_id, chunk.op.key)
scheduler_addr = self.get_scheduler(op_uid)
op_ref = self.ctx.actor_ref(op_uid, address=scheduler_addr)
op_ref.free_data(_tell=True)
|
def free_tensor_data(self, tensor_key):
from .operand import OperandActor
tiled_tensor = self._tensor_to_tiled[tensor_key][-1]
for chunk in tiled_tensor.chunks:
op_uid = OperandActor.gen_uid(self._session_id, chunk.op.key)
scheduler_addr = self.get_scheduler(op_uid)
op_ref = self.ctx.actor_ref(op_uid, address=scheduler_addr)
op_ref.free_data(_tell=True)
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def build_tensor_merge_graph(self, tensor_key):
from ..tensor.expressions.merge.concatenate import TensorConcatenate
from ..tensor.expressions.datasource import TensorFetchChunk
tiled_tensor = self._get_tensor_by_key(tensor_key)
graph = DAG()
if len(tiled_tensor.chunks) == 1:
# only one chunk, just trigger fetch
c = tiled_tensor.chunks[0]
op = TensorFetchChunk(dtype=c.dtype, to_fetch_key=c.key)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
else:
fetch_chunks = []
for c in tiled_tensor.chunks:
op = TensorFetchChunk(dtype=c.dtype, to_fetch_key=c.key)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
fetch_chunks.append(fetch_chunk)
chunk = (
TensorConcatenate(dtype=tiled_tensor.op.dtype)
.new_chunk(fetch_chunks, tiled_tensor.shape)
.data
)
graph.add_node(chunk)
[graph.add_edge(fetch_chunk, chunk) for fetch_chunk in fetch_chunks]
return serialize_graph(graph)
|
def build_tensor_merge_graph(self, tensor_key):
from ..tensor.expressions.merge.concatenate import TensorConcatenate
from ..tensor.expressions.datasource import TensorFetchChunk
tiled_tensor = self._tensor_to_tiled[tensor_key][-1]
graph = DAG()
if len(tiled_tensor.chunks) == 1:
# only one chunk, just trigger fetch
c = tiled_tensor.chunks[0]
op = TensorFetchChunk(dtype=c.dtype, to_fetch_key=c.key)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
else:
fetch_chunks = []
for c in tiled_tensor.chunks:
op = TensorFetchChunk(dtype=c.dtype, to_fetch_key=c.key)
fetch_chunk = op.new_chunk(None, c.shape, index=c.index, _key=c.key).data
graph.add_node(fetch_chunk)
fetch_chunks.append(fetch_chunk)
chunk = (
TensorConcatenate(dtype=tiled_tensor.op.dtype)
.new_chunk(fetch_chunks, tiled_tensor.shape)
.data
)
graph.add_node(chunk)
[graph.add_edge(fetch_chunk, chunk) for fetch_chunk in fetch_chunks]
return serialize_graph(graph)
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def fetch_tensor_result(self, tensor_key):
from ..worker.transfer import ResultSenderActor
# TODO for test
tiled_tensor = self._get_tensor_by_key(tensor_key)
if tensor_key not in self._terminated_tensors:
return None
ctx = dict()
for chunk_key in [c.key for c in tiled_tensor.chunks]:
if chunk_key in ctx:
continue
endpoints = self._chunk_meta_ref.get_workers(self._session_id, chunk_key)
sender_ref = self.ctx.actor_ref(
ResultSenderActor.default_name(), address=endpoints[-1]
)
ctx[chunk_key] = loads(sender_ref.fetch_data(self._session_id, chunk_key))
return dumps(merge_tensor_chunks(tiled_tensor, ctx))
|
def fetch_tensor_result(self, tensor_key):
from ..worker.transfer import ResultSenderActor
# TODO for test
tiled_tensor = self._tensor_to_tiled[tensor_key][-1]
if tensor_key not in self._terminated_tensors:
return None
ctx = dict()
for chunk_key in [c.key for c in tiled_tensor.chunks]:
if chunk_key in ctx:
continue
endpoints = self._chunk_meta_ref.get_workers(self._session_id, chunk_key)
sender_ref = self.ctx.actor_ref(
ResultSenderActor.default_name(), address=endpoints[-1]
)
ctx[chunk_key] = loads(sender_ref.fetch_data(self._session_id, chunk_key))
return dumps(merge_tensor_chunks(tiled_tensor, ctx))
|
https://github.com/mars-project/mars/issues/99
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/wenjun.swj/Code/mars/mars/scheduler/graph.py", line 421, in prepare_graph
for n in tensor_to_tiled[tk][-1].chunks:
IndexError: list index out of range
|
IndexError
|
def run(self, *tensors, **kw):
from . import tensor as mt
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
tensors = tuple(mt.tensor(t) for t in tensors)
run_tensors = []
fetch_results = dict()
# those executed tensors should fetch data directly, submit the others
for t in tensors:
if t.key in self._executed_keys:
fetch_results[t.key] = self.fetch(t)
else:
run_tensors.append(t)
if all([t.key in fetch_results for t in tensors]):
results = [fetch_results[t.key] for t in tensors]
return results if ret_list else results[0]
result = self._sess.run(*run_tensors, **kw)
self._executed_keys.update(t.key for t in run_tensors)
for t in run_tensors:
t._execute_session = self
ret = []
for r, t in zip(result, tensors):
if r is None:
ret.append(r)
continue
if t.isscalar() and hasattr(r, "item"):
ret.append(np.asscalar(r))
else:
ret.append(r)
results = []
result_iter = iter(ret)
for k in [t.key for t in tensors]:
if k in fetch_results:
results.append(fetch_results[k])
else:
results.append(next(result_iter))
if ret_list:
return results
return results[0]
|
def run(self, *tensors, **kw):
from . import tensor as mt
ret_list = False
if len(tensors) == 1 and isinstance(tensors[0], (tuple, list)):
ret_list = True
tensors = tensors[0]
elif len(tensors) > 1:
ret_list = True
tensors = tuple(mt.tensor(t) for t in tensors)
result = self._sess.run(*tensors, **kw)
self._executed_keys.update(t.key for t in tensors)
for t in tensors:
t._execute_session = self
ret = []
for r, t in zip(result, tensors):
if r is None:
ret.append(r)
continue
if t.isscalar() and hasattr(r, "item"):
ret.append(np.asscalar(r))
else:
ret.append(r)
if ret_list:
return ret
return ret[0]
|
https://github.com/mars-project/mars/issues/17
|
Creating operand actors for graph 93ba7372-e748-40ab-af14-6c250f845ba4
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 156, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in create_operand_actors
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in <listcomp>
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 758, in mars.actors.pool.gevent_pool.Communicator._create_local_actor
File "mars/actors/pool/gevent_pool.pyx", line 237, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
mars.actors.errors.ActorAlreadyExist: Actor s:operator$2724de06-fc4a-11e8-8d24-47a7eeef77d5$c84e3ef10df7fabaf2dc028149609284 already exist, cannot create
|
mars.actors.errors.ActorAlreadyExist
|
def decref(self, *keys):
self._executed_keys = self._executed_keys.difference(keys)
if hasattr(self._sess, "decref"):
self._sess.decref(*keys)
|
def decref(self, *keys):
if hasattr(self._sess, "decref"):
self._sess.decref(*keys)
|
https://github.com/mars-project/mars/issues/17
|
Creating operand actors for graph 93ba7372-e748-40ab-af14-6c250f845ba4
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 156, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in create_operand_actors
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in <listcomp>
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 758, in mars.actors.pool.gevent_pool.Communicator._create_local_actor
File "mars/actors/pool/gevent_pool.pyx", line 237, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
mars.actors.errors.ActorAlreadyExist: Actor s:operator$2724de06-fc4a-11e8-8d24-47a7eeef77d5$c84e3ef10df7fabaf2dc028149609284 already exist, cannot create
|
mars.actors.errors.ActorAlreadyExist
|
def __exit__(self, *_):
self.close()
|
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
|
https://github.com/mars-project/mars/issues/17
|
Creating operand actors for graph 93ba7372-e748-40ab-af14-6c250f845ba4
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 156, in execute_graph
self.create_operand_actors()
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in create_operand_actors
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "/Users/hekaisheng/Documents/mars/mars/scheduler/graph.py", line 569, in <listcomp>
[future.result() for future in itertools.chain(six.itervalues(op_refs), [write_future])]
File "src/gevent/event.py", line 457, in gevent._event.AsyncResult.result
File "src/gevent/event.py", line 381, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 399, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 379, in gevent._event.AsyncResult._raise_exception
File "/Users/hekaisheng/miniconda3/lib/python3.6/site-packages/gevent/_compat.py", line 47, in reraise
raise value.with_traceback(tb)
File "mars/actors/pool/gevent_pool.pyx", line 758, in mars.actors.pool.gevent_pool.Communicator._create_local_actor
File "mars/actors/pool/gevent_pool.pyx", line 237, in mars.actors.pool.gevent_pool.LocalActorPool.create_actor
mars.actors.errors.ActorAlreadyExist: Actor s:operator$2724de06-fc4a-11e8-8d24-47a7eeef77d5$c84e3ef10df7fabaf2dc028149609284 already exist, cannot create
|
mars.actors.errors.ActorAlreadyExist
|
def prepare_graph(self, compose=True):
"""
Tile and compose tensor graph into chunk graph
:param compose: if True, do compose after tiling
"""
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
self._tensor_graph_cache = tensor_graph
logger.debug(
"Begin preparing graph %s with %d tensors to chunk graph.",
self._graph_key,
len(tensor_graph),
)
# mark target tensor steps
if not self._target_tensor_chunk_ops:
for tn in tensor_graph:
if not tensor_graph.count_successors(tn):
self._target_tensor_chunk_ops[tn.key] = set()
self._target_tensor_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tensor_to_tiled = self._tensor_to_tiled
for t in tensor_graph:
if t.key not in tensor_to_tiled:
continue
t._chunks = [key_to_chunk[k] for k in [tensor_to_tiled[t.key][-1]]]
tq = deque()
for t in tensor_graph:
if t.inputs and not all(ti.key in tensor_to_tiled for ti in t.inputs):
continue
tq.append(t)
while tq:
tensor = tq.popleft()
if not tensor.is_coarse() or tensor.key in tensor_to_tiled:
continue
inputs = [tensor_to_tiled[it.key][-1] for it in tensor.inputs or ()]
op = tensor.op.copy()
_ = op.new_tensors(
inputs,
[o.shape for o in tensor.op.outputs], # noqa: F841
dtype=[o.dtype for o in tensor.op.outputs],
**tensor.params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tensor.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
td = handler.dispatch(to_tile)
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tensor_to_tiled[t.key].append(tiled)
# add chunks to fine grained graph
q = deque([tiled_c.data for tiled_c in tiled.chunks])
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tensor_graph.iter_successors(t):
if any(t.key not in tensor_to_tiled for t in succ.inputs):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk in self._target_tensor_chunk_ops:
for n in [c.data for t in tensor_to_tiled[tk] for c in t.chunks]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.update(n.op.outputs)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk in tensor_to_tiled:
if tk not in self._target_tensor_chunk_ops:
continue
for n in tensor_to_tiled[tk][-1].chunks:
self._terminal_chunk_op_tensor[n.op.key].add(tk)
self._target_tensor_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._nodes_num = len(chunk_graph)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
def prepare_graph(self, compose=True):
"""
Tile and compose tensor graph into chunk graph
:param compose: if True, do compose after tiling
"""
tensor_graph = deserialize_graph(self._serialized_tensor_graph)
self._tensor_graph_cache = tensor_graph
logger.debug(
"Begin preparing graph %s with %d tensors to chunk graph.",
self._graph_key,
len(tensor_graph),
)
# mark target tensor steps
if not self._target_tensor_chunk_ops:
for tn in tensor_graph:
if not tensor_graph.count_successors(tn):
self._target_tensor_chunk_ops[tn.key] = set()
self._target_tensor_finished[tn.key] = set()
if self._serialized_chunk_graph:
serialized_chunk_graph = self._serialized_chunk_graph
chunk_graph = DAG.from_pb(serialized_chunk_graph)
else:
chunk_graph = DAG()
key_to_chunk = {c.key: c for c in chunk_graph}
tensor_to_tiled = self._tensor_to_tiled
for t in tensor_graph:
if t.key not in tensor_to_tiled:
continue
t._chunks = [key_to_chunk[k] for k in [tensor_to_tiled[t.key][-1]]]
tq = deque()
for t in tensor_graph:
if t.inputs and not all(ti.key in tensor_to_tiled for ti in t.inputs):
continue
tq.append(t)
while tq:
tensor = tq.popleft()
if not tensor.is_coarse() or tensor.key in tensor_to_tiled:
continue
inputs = [tensor_to_tiled[it.key][-1] for it in tensor.inputs or ()]
op = tensor.op.copy()
_ = op.new_tensors(
inputs,
[o.shape for o in tensor.op.outputs], # noqa: F841
dtype=[o.dtype for o in tensor.op.outputs],
**tensor.params,
)
total_tiled = []
for j, t, to_tile in zip(itertools.count(0), tensor.op.outputs, op.outputs):
# replace inputs with tiled ones
if not total_tiled:
try:
td = handler.dispatch(to_tile)
except DataNotReady:
continue
if isinstance(td, (tuple, list)):
total_tiled.extend(td)
else:
total_tiled.append(td)
tiled = total_tiled[j]
tensor_to_tiled[t.key].append(tiled)
# add chunks to fine grained graph
q = deque([tiled_c.data for tiled_c in tiled.chunks])
input_chunk_keys = set(
itertools.chain(
*(
[(it.key, it.id) for it in input.chunks]
for input in to_tile.inputs
)
)
)
while len(q) > 0:
c = q.popleft()
if (c.key, c.id) in input_chunk_keys:
continue
if c not in chunk_graph:
chunk_graph.add_node(c)
for ic in c.inputs or []:
if ic not in chunk_graph:
chunk_graph.add_node(ic)
q.append(ic)
chunk_graph.add_edge(ic, c)
for succ in tensor_graph.iter_successors(t):
if any(t.key not in tensor_to_tiled for t in succ.inputs):
continue
tq.append(succ)
# record the chunk nodes in graph
reserve_chunk = set()
result_chunk_keys = list()
for tk in self._target_tensor_chunk_ops:
for n in [c.data for t in tensor_to_tiled[tk] for c in t.chunks]:
result_chunk_keys.append(n.key)
dq_predecessors = deque([n])
while dq_predecessors:
current = dq_predecessors.popleft()
reserve_chunk.add(n)
predecessors = chunk_graph.predecessors(current)
dq_predecessors.extend(
[p for p in predecessors if p not in reserve_chunk]
)
reserve_chunk.update(predecessors)
# delete redundant chunk
for n in list(chunk_graph.iter_nodes()):
if n not in reserve_chunk:
chunk_graph.remove_node(n)
if compose:
chunk_graph.compose(keys=result_chunk_keys)
for tk in tensor_to_tiled:
if tk not in self._target_tensor_chunk_ops:
continue
for n in tensor_to_tiled[tk][-1].chunks:
self._terminal_chunk_op_tensor[n.op.key].add(tk)
self._target_tensor_chunk_ops[tk].add(n.op.key)
# sync chunk graph to kv store
if self._kv_store_ref is not None:
graph_path = "/sessions/%s/graphs/%s" % (self._session_id, self._graph_key)
self._kv_store_ref.write(
"%s/chunk_graph" % graph_path,
serialize_graph(chunk_graph, compress=True),
_tell=True,
_wait=False,
)
self._nodes_num = len(chunk_graph)
self._chunk_graph_cache = chunk_graph
for n in self._chunk_graph_cache:
self._op_key_to_chunk[n.op.key].append(n)
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def execute_chunk(
chunk,
executor=None,
ref_counts=None,
chunk_result=None,
finishes=None,
visited=None,
q=None,
lock=None,
semaphore=None,
has_error=None,
preds=None,
succs=None,
mock=False,
sparse_mock_percent=1.0,
):
try:
with lock:
if (chunk.key, chunk.id) in visited:
return
visited.add((chunk.key, chunk.id))
finished = finishes.get(chunk.key)
if not finished:
if not mock:
# do real execution
if chunk.key not in chunk_result:
executor.handle(chunk, chunk_result)
else:
percent = sparse_mock_percent if chunk.op.sparse else 1.0
# we put the estimated size of data into the chunk_result
chunk_result[chunk.key] = (
np.prod(chunk.shape) * chunk.dtype.itemsize * percent
)
with lock:
for output in chunk.op.outputs:
finishes[output.key] = True
if (
output.key in ref_counts
and ref_counts[output.key] == 0
and output.key in chunk_result
):
# some op have more than 1 outputs,
# and some of the outputs are not in the result ones
del chunk_result[output.key]
for pred_key in preds[chunk.key]:
with lock:
if pred_key not in ref_counts:
continue
ref_counts[pred_key] -= 1
if ref_counts[pred_key] == 0:
del chunk_result[pred_key]
for succ in succs[chunk.key, chunk.id]:
with lock:
if (succ.key, succ.id) in visited:
continue
if len(preds[succ.key]) == 0 or all(
finishes.get(k, False) for k in preds[succ.key]
):
q.insert(0, succ)
except Exception:
has_error.set()
raise
finally:
semaphore.release()
|
def execute_chunk(
chunk,
executor=None,
ref_counts=None,
chunk_result=None,
finishes=None,
visited=None,
q=None,
lock=None,
semaphore=None,
has_error=None,
preds=None,
succs=None,
mock=False,
sparse_mock_percent=1.0,
):
try:
with lock:
if (chunk.key, chunk.id) in visited:
return
visited.add((chunk.key, chunk.id))
finished = finishes.get(chunk.key)
if not finished:
if not mock:
# do real execution
if chunk.key not in chunk_result:
executor.handle(chunk, chunk_result)
else:
percent = sparse_mock_percent if chunk.op.sparse else 1.0
# we put the estimated size of data into the chunk_result
chunk_result[chunk.key] = (
np.prod(chunk.shape) * chunk.dtype.itemsize * percent
)
with lock:
for output in chunk.op.outputs:
finishes[output.key] = True
for pred_key in preds[chunk.key]:
with lock:
if pred_key not in ref_counts:
continue
ref_counts[pred_key] -= 1
if ref_counts[pred_key] == 0:
del chunk_result[pred_key]
for succ in succs[chunk.key, chunk.id]:
with lock:
if (succ.key, succ.id) in visited:
continue
if len(preds[succ.key]) == 0 or all(
finishes.get(k, False) for k in preds[succ.key]
):
q.insert(0, succ)
except Exception:
has_error.set()
raise
finally:
semaphore.release()
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def new_tensors(
self,
inputs,
shape,
dtype=None,
chunks=None,
nsplits=None,
output_limit=None,
kws=None,
**kw,
):
tensor_cls = SparseTensor if getattr(self, "issparse")() else Tensor
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if not np.isinf(output_limit) and len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
if kws is not None and kw:
raise ValueError("can only pass kws or kw")
tensors = []
raw_chunks = chunks
raw_nsplits = nsplits
for i, s in enumerate(shape):
dt = None
if kws:
kw = kws[i]
chunks = kw.pop("chunks", raw_chunks)
nsplits = kw.pop("nsplits", raw_nsplits)
dt = kw.pop("dtype", None)
if nsplits is not None:
kw["_nsplits"] = nsplits
if dt is None:
dt = dtype[i] if isinstance(dtype, (tuple, list)) else dtype
data = TensorData(_shape=s, _dtype=dt, _op=self, _chunks=chunks, **kw)
tensors.append(tensor_cls(data))
setattr(self, "outputs", tensors)
if len(tensors) > 1:
# for each output tensor, hold the reference to the other outputs
# so that either no one or everyone are gc collected
for i, t in enumerate(tensors):
t.data._siblings = [
tensor.data for tensor in tensors[:i] + tensors[i + 1 :]
]
return tensors
|
def new_tensors(
self,
inputs,
shape,
dtype=None,
chunks=None,
nsplits=None,
output_limit=None,
kws=None,
**kw,
):
tensor_cls = SparseTensor if getattr(self, "issparse")() else Tensor
output_limit = (
getattr(self, "output_limit") if output_limit is None else output_limit
)
self.check_inputs(inputs)
getattr(self, "_set_inputs")(inputs)
if getattr(self, "_key", None) is None:
getattr(self, "update_key")() # update key when inputs are set
if (
isinstance(shape, (list, tuple))
and len(shape) > 0
and isinstance(shape[0], (list, tuple))
):
if not np.isinf(output_limit) and len(shape) != output_limit:
raise ValueError(
"shape size must be equal to output limit, expect {0}, got {1}".format(
output_limit, len(shape)
)
)
else:
shape = [shape] * output_limit
if kws is not None and kw:
raise ValueError("can only pass kws or kw")
tensors = []
raw_chunks = chunks
raw_nsplits = nsplits
for i, s in enumerate(shape):
dt = None
if kws:
kw = kws[i]
chunks = kw.pop("chunks", raw_chunks)
nsplits = kw.pop("nsplits", raw_nsplits)
dt = kw.pop("dtype", None)
if nsplits is not None:
kw["_nsplits"] = nsplits
if dt is None:
dt = dtype[i] if isinstance(dtype, (tuple, list)) else dtype
data = TensorData(_shape=s, _dtype=dt, _op=self, _chunks=chunks, **kw)
tensors.append(tensor_cls(data))
setattr(self, "outputs", tensors)
return tensors
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def tile(cls, op):
raise NotSupportTile(
"TensorFuseChunk is a chunk operand which does not support tile"
)
|
def tile(cls, op):
raise NotSupportTile("FetchChunk is a chunk operand which does not support tile")
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def tile(cls, op):
from ..merge.concatenate import TensorConcatenate
from ..indexing.slice import TensorSlice
from .dot import TensorDot
from .qr import TensorQR
from .svd import TensorSVD
calc_svd = cls._is_svd()
a = op.input
tinyq, tinyr = np.linalg.qr(np.ones((1, 1), dtype=a.dtype))
q_dtype, r_dtype = tinyq.dtype, tinyr.dtype
if a.chunk_shape[1] != 1:
new_chunk_size = decide_chunk_sizes(a.shape, {1: a.shape[1]}, a.dtype.itemsize)
a = a.rechunk(new_chunk_size).single_tiles()
# stage 1, map phase
stage1_q_chunks, stage1_r_chunks = stage1_chunks = [[], []] # Q and R chunks
for c in a.chunks:
x, y = c.shape
q_shape = c.shape if x > y else (x, x)
r_shape = c.shape if x < y else (y, y)
qr_op = TensorQR()
qr_chunks = qr_op.new_chunks(
[c],
[q_shape, r_shape],
index=c.index,
kws=[{"side": "q", "dtype": q_dtype}, {"side": "r", "dtype": r_dtype}],
)
stage1_chunks[0].append(qr_chunks[0])
stage1_chunks[1].append(qr_chunks[1])
# stage 2, reduce phase
# concatenate all r chunks into one
shape = (sum(c.shape[0] for c in stage1_r_chunks), stage1_r_chunks[0].shape[1])
concat_op = TensorConcatenate(axis=0, dtype=stage1_r_chunks[0].dtype)
concat_r_chunk = concat_op.new_chunk(stage1_r_chunks, shape, index=(0, 0))
qr_op = TensorQR()
qr_shapes = concat_r_chunk.shape, (concat_r_chunk.shape[1],) * 2
qr_chunks = qr_op.new_chunks(
[concat_r_chunk],
qr_shapes,
index=concat_r_chunk.index,
kws=[{"side": "q", "dtype": q_dtype}, {"side": "r", "dtype": r_dtype}],
)
stage2_q_chunk, stage2_r_chunk = qr_chunks
# stage 3, map phase
# split stage2_q_chunk into the same size as stage1_q_chunks
q_splits = np.cumsum([c.shape[1] for c in stage1_q_chunks])
q_slices = [
slice(q_splits[i]) if i == 0 else slice(q_splits[i - 1], q_splits[i])
for i in range(len(q_splits))
]
stage2_q_chunks = []
for c, s in zip(stage1_q_chunks, q_slices):
slice_op = TensorSlice(slices=[s], dtype=c.dtype)
stage2_q_chunks.append(
slice_op.new_chunk([stage2_q_chunk], c.shape, index=c.index)
)
stage3_q_chunks = []
for c1, c2 in izip(stage1_q_chunks, stage2_q_chunks):
dot_op = TensorDot(dtype=q_dtype)
shape = (c1.shape[0], c2.shape[1])
stage3_q_chunks.append(dot_op.new_chunk([c1, c2], shape, index=c1.index))
if not calc_svd:
q, r = op.outputs
new_op = op.copy()
q_nsplits = ((c.shape[0] for c in stage3_q_chunks), (1,))
r_nsplits = ((stage2_r_chunk.shape[0],), (stage2_r_chunk.shape[1],))
kws = [
# Q
{"chunks": stage3_q_chunks, "nsplits": q_nsplits, "dtype": q.dtype},
# R, calculate from stage2
{"chunks": [stage2_r_chunk], "nsplits": r_nsplits, "dtype": r.dtype},
]
return new_op.new_tensors(op.inputs, [q.shape, r.shape], kws=kws)
else:
U, s, V = op.outputs
U_dtype, s_dtype, V_dtype = U.dtype, s.dtype, V.dtype
U_shape, s_shape, V_shape = U.shape, s.shape, V.shape
svd_op = TensorSVD()
u_shape = stage2_r_chunk.shape
s_shape = (stage2_r_chunk.shape[1],)
v_shape = (stage2_r_chunk.shape[1],) * 2
stage2_usv_chunks = svd_op.new_chunks(
[stage2_r_chunk],
[u_shape, s_shape, v_shape],
kws=[
{"side": "U", "dtype": U_dtype, "index": stage2_r_chunk.index},
{"side": "s", "dtype": s_dtype, "index": stage2_r_chunk.index[1:]},
{"side": "V", "dtype": V_dtype, "index": stage2_r_chunk.index},
],
)
stage2_u_chunk, stage2_s_chunk, stage2_v_chunk = stage2_usv_chunks
# stage 4, U = Q @ u
stage4_u_chunks = []
if U is not None: # U is not garbage collected
for c1 in stage3_q_chunks:
dot_op = TensorDot(dtype=U_dtype)
shape = (c1.shape[0], stage2_u_chunk.shape[1])
stage4_u_chunks.append(
dot_op.new_chunk([c1, stage2_u_chunk], shape, index=c1.index)
)
new_op = op.copy()
u_nsplits = ((c.shape[0] for c in stage4_u_chunks), (1,))
s_nsplits = ((stage2_s_chunk.shape[0],),)
v_nsplits = ((stage2_v_chunk.shape[0],), (stage2_v_chunk.shape[1],))
kws = [
{"chunks": stage4_u_chunks, "nsplits": u_nsplits, "dtype": U_dtype}, # U
{"chunks": [stage2_s_chunk], "nsplits": s_nsplits, "dtype": s_dtype}, # s
{"chunks": [stage2_v_chunk], "nsplits": v_nsplits, "dtype": V_dtype}, # V
]
return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)
|
def tile(cls, op):
from ..merge.concatenate import TensorConcatenate
from ..indexing.slice import TensorSlice
from .dot import TensorDot
from .qr import TensorQR
from .svd import TensorSVD
calc_svd = cls._is_svd()
get_obj_attr = cls._get_obj_attr
a = op.input
tinyq, tinyr = np.linalg.qr(np.ones((1, 1), dtype=a.dtype))
q_dtype, r_dtype = tinyq.dtype, tinyr.dtype
if a.chunk_shape[1] != 1:
new_chunk_size = decide_chunk_sizes(a.shape, {1: a.shape[1]}, a.dtype.itemsize)
a = a.rechunk(new_chunk_size).single_tiles()
# stage 1, map phase
stage1_q_chunks, stage1_r_chunks = stage1_chunks = [[], []] # Q and R chunks
for c in a.chunks:
x, y = c.shape
q_shape = c.shape if x > y else (x, x)
r_shape = c.shape if x < y else (y, y)
qr_op = TensorQR()
qr_chunks = qr_op.new_chunks(
[c],
[q_shape, r_shape],
index=c.index,
kws=[{"side": "q", "dtype": q_dtype}, {"side": "r", "dtype": r_dtype}],
)
stage1_chunks[0].append(qr_chunks[0])
stage1_chunks[1].append(qr_chunks[1])
# stage 2, reduce phase
# concatenate all r chunks into one
shape = (sum(c.shape[0] for c in stage1_r_chunks), stage1_r_chunks[0].shape[1])
concat_op = TensorConcatenate(axis=0, dtype=stage1_r_chunks[0].dtype)
concat_r_chunk = concat_op.new_chunk(stage1_r_chunks, shape, index=(0, 0))
qr_op = TensorQR()
qr_shapes = concat_r_chunk.shape, (concat_r_chunk.shape[1],) * 2
qr_chunks = qr_op.new_chunks(
[concat_r_chunk],
qr_shapes,
index=concat_r_chunk.index,
kws=[{"side": "q", "dtype": q_dtype}, {"side": "r", "dtype": r_dtype}],
)
stage2_q_chunk, stage2_r_chunk = qr_chunks
# stage 3, map phase
# split stage2_q_chunk into the same size as stage1_q_chunks
q_splits = np.cumsum([c.shape[1] for c in stage1_q_chunks])
q_slices = [
slice(q_splits[i]) if i == 0 else slice(q_splits[i - 1], q_splits[i])
for i in range(len(q_splits))
]
stage2_q_chunks = []
for c, s in zip(stage1_q_chunks, q_slices):
slice_op = TensorSlice(slices=[s], dtype=c.dtype)
stage2_q_chunks.append(
slice_op.new_chunk([stage2_q_chunk], c.shape, index=c.index)
)
stage3_q_chunks = []
for c1, c2 in izip(stage1_q_chunks, stage2_q_chunks):
dot_op = TensorDot(dtype=q_dtype)
shape = (c1.shape[0], c2.shape[1])
stage3_q_chunks.append(dot_op.new_chunk([c1, c2], shape, index=c1.index))
if not calc_svd:
q, r = op.outputs
new_op = op.copy()
q_nsplits = ((c.shape[0] for c in stage3_q_chunks), (1,))
r_nsplits = ((stage2_r_chunk.shape[0],), (stage2_r_chunk.shape[1],))
kws = [
# Q
{
"chunks": stage3_q_chunks,
"nsplits": q_nsplits,
"dtype": get_obj_attr(q, "dtype"),
},
# R, calculate from stage2
{
"chunks": [stage2_r_chunk],
"nsplits": r_nsplits,
"dtype": get_obj_attr(r, "dtype"),
},
]
return new_op.new_tensors(
op.inputs, [get_obj_attr(q, "shape"), get_obj_attr(r, "shape")], kws=kws
)
else:
U, s, V = op.outputs
U_dtype, s_dtype, V_dtype = (
get_obj_attr(U, "dtype"),
get_obj_attr(s, "dtype"),
get_obj_attr(V, "dtype"),
)
U_shape, s_shape, V_shape = (
get_obj_attr(U, "shape"),
get_obj_attr(s, "shape"),
get_obj_attr(V, "shape"),
)
svd_op = TensorSVD()
u_shape = stage2_r_chunk.shape
s_shape = (stage2_r_chunk.shape[1],)
v_shape = (stage2_r_chunk.shape[1],) * 2
stage2_usv_chunks = svd_op.new_chunks(
[stage2_r_chunk],
[u_shape, s_shape, v_shape],
kws=[
{"side": "U", "dtype": U_dtype, "index": stage2_r_chunk.index},
{"side": "s", "dtype": s_dtype, "index": stage2_r_chunk.index[1:]},
{"side": "V", "dtype": V_dtype, "index": stage2_r_chunk.index},
],
)
stage2_u_chunk, stage2_s_chunk, stage2_v_chunk = stage2_usv_chunks
# stage 4, U = Q @ u
stage4_u_chunks = []
if U is not None: # U is not garbage collected
for c1 in stage3_q_chunks:
dot_op = TensorDot(dtype=U_dtype)
shape = (c1.shape[0], stage2_u_chunk.shape[1])
stage4_u_chunks.append(
dot_op.new_chunk([c1, stage2_u_chunk], shape, index=c1.index)
)
new_op = op.copy()
u_nsplits = ((c.shape[0] for c in stage4_u_chunks), (1,))
s_nsplits = ((stage2_s_chunk.shape[0],),)
v_nsplits = ((stage2_v_chunk.shape[0],), (stage2_v_chunk.shape[1],))
kws = [
{"chunks": stage4_u_chunks, "nsplits": u_nsplits, "dtype": U_dtype}, # U
{"chunks": [stage2_s_chunk], "nsplits": s_nsplits, "dtype": s_dtype}, # s
{"chunks": [stage2_v_chunk], "nsplits": v_nsplits, "dtype": V_dtype}, # V
]
return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def tile(cls, op):
q, r = op.outputs
q_dtype, r_dtype = q.dtype, r.dtype
q_shape, r_shape = q.shape, r.shape
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
qr_chunks = chunk_op.new_chunks(
[in_chunk],
(q_shape, r_shape),
index=in_chunk.index,
kws=[{"side": "q"}, {"side": "r"}],
)
q_chunk, r_chunk = qr_chunks
new_op = op.copy()
kws = [
{"chunks": [q_chunk], "nsplits": ((1,), (1,)), "dtype": q_dtype},
{"chunks": [r_chunk], "nsplits": ((1,), (1,)), "dtype": r_dtype},
]
return new_op.new_tensors(op.inputs, [q_shape, r_shape], kws=kws)
elif op.method == "tsqr":
return super(TensorQR, cls).tile(op)
# TODO(hks): support sfqr(short-and-fat qr)
else:
raise NotImplementedError("Only tsqr method supported for now")
|
def tile(cls, op):
q, r = op.outputs
q_dtype, r_dtype = cls._get_obj_attr(q, "dtype"), cls._get_obj_attr(r, "dtype")
q_shape, r_shape = cls._get_obj_attr(q, "shape"), cls._get_obj_attr(r, "shape")
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
qr_chunks = chunk_op.new_chunks(
[in_chunk],
(q_shape, r_shape),
index=in_chunk.index,
kws=[{"side": "q"}, {"side": "r"}],
)
q_chunk, r_chunk = qr_chunks
new_op = op.copy()
kws = [
{"chunks": [q_chunk], "nsplits": ((1,), (1,)), "dtype": q_dtype},
{"chunks": [r_chunk], "nsplits": ((1,), (1,)), "dtype": r_dtype},
]
return new_op.new_tensors(op.inputs, [q_shape, r_shape], kws=kws)
elif op.method == "tsqr":
return super(TensorQR, cls).tile(op)
# TODO(hks): support sfqr(short-and-fat qr)
else:
raise NotImplementedError("Only tsqr method supported for now")
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def tile(cls, op):
U, s, V = op.outputs
U_dtype, s_dtype, V_dtype = U.dtype, s.dtype, V.dtype
U_shape, s_shape, V_shape = U.shape, s.shape, V.shape
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
svd_chunks = chunk_op.new_chunks(
[in_chunk],
(U_shape, s_shape, V_shape),
kws=[
{"side": "U", "dtype": U_dtype, "index": in_chunk.index},
{"side": "s", "dtype": s_dtype, "index": in_chunk.index[1:]},
{"side": "V", "dtype": V_dtype, "index": in_chunk.index},
],
)
U_chunk, s_chunk, V_chunk = svd_chunks
new_op = op.copy()
kws = [
{
"chunks": [U_chunk],
"nsplits": tuple((s,) for s in U_shape),
"dtype": U_dtype,
},
{
"chunks": [s_chunk],
"nsplits": tuple((s,) for s in s_shape),
"dtype": s_dtype,
},
{
"chunks": [V_chunk],
"nsplits": tuple((s,) for s in V_shape),
"dtype": V_dtype,
},
]
return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)
elif op.method == "tsqr":
return super(TensorSVD, cls).tile(op)
else:
raise NotImplementedError("Only tsqr method supported for now")
|
def tile(cls, op):
get_obj_attr = cls._get_obj_attr
U, s, V = op.outputs
U_dtype, s_dtype, V_dtype = (
get_obj_attr(U, "dtype"),
get_obj_attr(s, "dtype"),
get_obj_attr(V, "dtype"),
)
U_shape, s_shape, V_shape = (
get_obj_attr(U, "shape"),
get_obj_attr(s, "shape"),
get_obj_attr(V, "shape"),
)
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
svd_chunks = chunk_op.new_chunks(
[in_chunk],
(U_shape, s_shape, V_shape),
kws=[
{"side": "U", "dtype": U_dtype, "index": in_chunk.index},
{"side": "s", "dtype": s_dtype, "index": in_chunk.index[1:]},
{"side": "V", "dtype": V_dtype, "index": in_chunk.index},
],
)
U_chunk, s_chunk, V_chunk = svd_chunks
new_op = op.copy()
kws = [
{
"chunks": [U_chunk],
"nsplits": tuple((s,) for s in U_shape),
"dtype": U_dtype,
},
{
"chunks": [s_chunk],
"nsplits": tuple((s,) for s in s_shape),
"dtype": s_dtype,
},
{
"chunks": [V_chunk],
"nsplits": tuple((s,) for s in V_shape),
"dtype": V_dtype,
},
]
return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)
elif op.method == "tsqr":
return super(TensorSVD, cls).tile(op)
else:
raise NotImplementedError("Only tsqr method supported for now")
|
https://github.com/mars-project/mars/issues/56
|
In [16]: a = mt.random.rand(20, 10, chunk_size=10)
In [19]: _, s, _ = mt.linalg.svd(a)
In [20]: s.build_graph(tiled=False)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-20-77797d4ed4a0> in <module>
----> 1 s.build_graph(tiled=False)
~/Workspace/mars/mars/tensor/core.py in build_graph(self, graph, cls, tiled, compose)
298 if not graph.contains(chunk):
299 graph.add_node(chunk)
--> 300 children = chunk.inputs or []
301 for c in children:
302 if not graph.contains(c):
AttributeError: 'NoneType' object has no attribute 'inputs'
|
AttributeError
|
def _index_set_value(ctx, chunk):
indexes = [
ctx[index.key] if hasattr(index, "key") else index for index in chunk.op.indexes
]
input = ctx[chunk.inputs[0].key].copy()
value = (
ctx[chunk.op.value.key] if hasattr(chunk.op.value, "key") else chunk.op.value
)
if hasattr(input, "flags") and not input.flags.writeable:
input.setflags(write=True)
input[tuple(indexes)] = value
ctx[chunk.key] = input
|
def _index_set_value(ctx, chunk):
indexes = [
ctx[index.key] if hasattr(index, "key") else index for index in chunk.op.indexes
]
input = ctx[chunk.inputs[0].key]
value = (
ctx[chunk.op.value.key] if hasattr(chunk.op.value, "key") else chunk.op.value
)
if hasattr(input, "flags") and not input.flags.writeable:
input.setflags(write=True)
input[tuple(indexes)] = value
ctx[chunk.key] = input
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def new_tensors(self, inputs, shape, **kw):
indexes = kw.pop("indexes", None)
with self._handle_params(inputs, indexes) as mix_inputs:
return super(TensorIndex, self).new_tensors(mix_inputs, shape, **kw)
|
def new_tensors(self, inputs, shape, **kw):
tensor, indexes = inputs
self._indexes = indexes
inputs = self._handle_inputs(inputs)
return super(TensorIndex, self).new_tensors(inputs, shape, **kw)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
def new_chunks(self, inputs, shape, **kw):
indexes = kw.pop("indexes", None)
with self._handle_params(inputs, indexes) as mix_inputs:
return super(TensorIndex, self).new_chunks(mix_inputs, shape, **kw)
|
def new_chunks(self, inputs, shape, **kw):
chunk, indexes = inputs
self._indexes = indexes
inputs = self._handle_inputs(inputs)
return super(TensorIndex, self).new_chunks(inputs, shape, **kw)
|
https://github.com/mars-project/mars/issues/64
|
In [1]: from mars.deploy.local import new_cluster
In [2]: cluster = new_cluster(scheduler_n_process=2, worker_n_process=3, web=True)
In [3]: import mars.tensor as mt
In [4]: t = mt.random.rand(10, 10)
In [5]: mt.split(t, 5).execute()
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "mars/actors/pool/gevent_pool.pyx", line 88, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 91, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 102, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/pool/gevent_pool.pyx", line 96, in mars.actors.pool.gevent_pool.ActorExecutionContext.fire_run
File "mars/actors/core.pyx", line 108, in mars.actors.core.FunctionActor.on_receive
File "mars/actors/core.pyx", line 110, in mars.actors.core.FunctionActor.on_receive
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/scheduler/graph.py", line 527, in get_executable_operand_dag
kws=kws)
File "/Users/hekaisheng/Documents/mars_dev/mars/mars/tensor/expressions/indexing/getitem.py", line 58, in new_chunks
chunk, indexes = inputs
ValueError: not enough values to unpack (expected 2, got 1)
2018-12-25T02:23:13Z <Greenlet "Greenlet-0" at 0x11a9f47b8: <built-in method fire_run of mars.actors.pool.gevent_pool.ActorExecutionContext object at 0x11b154458>> failed with ValueError
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.