after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def run(self, edit, message=None):
self.commit_on_close = self.view.settings().get("git_savvy.commit_on_close")
if self.commit_on_close:
# make sure the view would not be closed by commiting synchronously
self.run_async(commit_message=message)
else:
sublime.set_timeout_async(lambda: self.run_async(commit_message=message), 0)
|
def run(self, edit, message=None):
sublime.set_timeout_async(lambda: self.run_async(commit_message=message), 0)
|
https://github.com/timbrel/GitSavvy/issues/772
|
Traceback (most recent call last):
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 103, in git
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 297, in repo_path
RuntimeError: Window does not exist.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 142, in <lambda>
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 162, in run_async
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 106, in git
File "core.exceptions in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 7, in __init__
TypeError: GitSavvyError does not take keyword arguments
|
RuntimeError
|
def run_async(self, commit_message=None):
if commit_message is None:
view_text = self.view.substr(sublime.Region(0, self.view.size()))
help_text = self.view.settings().get("git_savvy.commit_view.help_text")
commit_message = view_text.split(help_text)[0]
include_unstaged = self.view.settings().get(
"git_savvy.commit_view.include_unstaged"
)
show_panel_overrides = sublime.load_settings("GitSavvy.sublime-settings").get(
"show_panel_for"
)
self.git(
"commit",
"-q" if "commit" not in show_panel_overrides else None,
"-a" if include_unstaged else None,
"--amend" if self.view.settings().get("git_savvy.commit_view.amend") else None,
"-F",
"-",
stdin=commit_message,
)
is_commit_view = self.view.settings().get("git_savvy.commit_view")
if is_commit_view and not self.commit_on_close:
self.view.window().focus_view(self.view)
self.view.set_scratch(True) # ignore dirty on actual commit
self.view.window().run_command("close_file")
sublime.set_timeout_async(
lambda: util.view.refresh_gitsavvy(sublime.active_window().active_view())
)
|
def run_async(self, commit_message=None):
if commit_message is None:
view_text = self.view.substr(sublime.Region(0, self.view.size()))
help_text = self.view.settings().get("git_savvy.commit_view.help_text")
commit_message = view_text.split(help_text)[0]
include_unstaged = self.view.settings().get(
"git_savvy.commit_view.include_unstaged"
)
show_panel_overrides = sublime.load_settings("GitSavvy.sublime-settings").get(
"show_panel_for"
)
self.git(
"commit",
"-q" if "commit" not in show_panel_overrides else None,
"-a" if include_unstaged else None,
"--amend" if self.view.settings().get("git_savvy.commit_view.amend") else None,
"-F",
"-",
stdin=commit_message,
)
# ensure view is not already closed (i.e.: when "commit_on_close" enabled)
is_commit_view = self.view.settings().get("git_savvy.commit_view")
if is_commit_view and self.view.window():
self.view.window().focus_view(self.view)
self.view.set_scratch(True) # ignore dirty on actual commit
self.view.window().run_command("close_file")
else:
sublime.set_timeout_async(
lambda: util.view.refresh_gitsavvy(sublime.active_window().active_view())
)
|
https://github.com/timbrel/GitSavvy/issues/772
|
Traceback (most recent call last):
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 103, in git
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 297, in repo_path
RuntimeError: Window does not exist.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 142, in <lambda>
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 162, in run_async
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 106, in git
File "core.exceptions in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 7, in __init__
TypeError: GitSavvyError does not take keyword arguments
|
RuntimeError
|
def run(self, edit):
if self.view.settings().get("git_savvy.commit_on_close"):
view_text = self.view.substr(sublime.Region(0, self.view.size()))
help_text = self.view.settings().get("git_savvy.commit_view.help_text")
message_txt = view_text.split(help_text)[0] if help_text in view_text else ""
message_txt = message_txt.strip()
if message_txt:
self.view.run_command("gs_commit_view_do_commit", {"message": message_txt})
|
def run(self, edit):
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("commit_on_close"):
view_text = self.view.substr(sublime.Region(0, self.view.size()))
help_text = self.view.settings().get("git_savvy.commit_view.help_text")
message_txt = view_text.split(help_text)[0] if help_text in view_text else ""
message_txt = message_txt.strip()
if message_txt:
self.view.run_command("gs_commit_view_do_commit", {"message": message_txt})
|
https://github.com/timbrel/GitSavvy/issues/772
|
Traceback (most recent call last):
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 103, in git
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 297, in repo_path
RuntimeError: Window does not exist.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 142, in <lambda>
File "core.commands.commit in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 162, in run_async
File "core.git_command in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 106, in git
File "core.exceptions in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 7, in __init__
TypeError: GitSavvyError does not take keyword arguments
|
RuntimeError
|
def add_ignore(self, path_or_pattern):
"""
Add the provided relative path or pattern to the repo's `.gitignore` file.
"""
global linesep
if not linesep:
# Use native line ending on Windows only when `autocrlf` is set to `true`.
if os.name == "nt":
autocrlf = (
self.git(
"config", "--global", "core.autocrlf", throw_on_stderr=False
).strip()
== "true"
)
linesep = os.linesep if autocrlf else "\n"
else:
linesep = os.linesep
gitignore = os.path.join(self.repo_path, ".gitignore")
with util.file.safe_open(gitignore, "at", encoding="utf-8") as ignore_file:
ignore_file.write(linesep + path_or_pattern + linesep)
|
def add_ignore(self, path_or_pattern):
"""
Add the provided relative path or pattern to the repo's `.gitignore` file.
"""
global linesep
if not linesep:
# Use native line ending on Windows only when `autocrlf` is set to `true`.
if os.name == "nt":
autocrlf = (
self.git(
"config", "--global", "core.autocrlf", throw_on_stderr=False
).strip()
== "true"
)
linesep = os.linesep if autocrlf else "\n"
else:
linesep = os.linesep
with util.file.safe_open(
os.path.join(self.repo_path, ".gitignore"), "at"
) as ignore_file:
ignore_file.write(linesep + path_or_pattern + linesep)
|
https://github.com/timbrel/GitSavvy/issues/498
|
Traceback (most recent call last):
File "/Applications/Sublime Text.app/Contents/MacOS/sublime_plugin.py", line 574, in run_
return self.run(edit)
File "/Users/alextegelid/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/interfaces/status.py", line 566, in run
self.add_ignore(os.path.join("/", fpath))
File "/Users/alextegelid/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_mixins/ignore.py", line 24, in add_ignore
ignore_file.write(linesep + path_or_pattern + linesep)
UnicodeEncodeError: 'ascii' codec can't encode character '\xe4' in position 4: ordinal not in range(128)
|
UnicodeEncodeError
|
def run(self):
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
if savvy_settings.get("disable_git_init_prompt"):
return
active_view_id = self.window.active_view().id()
if active_view_id not in views_with_offer_made and sublime.ok_cancel_dialog(
NO_REPO_MESSAGE
):
self.window.run_command("gs_init")
else:
views_with_offer_made.add(active_view_id)
|
def run(self):
active_view_id = self.window.active_view().id()
if active_view_id not in views_with_offer_made and sublime.ok_cancel_dialog(
NO_REPO_MESSAGE
):
self.window.run_command("gs_init")
else:
views_with_offer_made.add(active_view_id)
|
https://github.com/timbrel/GitSavvy/issues/589
|
Traceback (most recent call last):
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 133, in git
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 215, in repo_path
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 255, in _repo_path
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 156, in git
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 119, in raise_error
GitSavvy.core.git_command.GitSavvyError: `C:\Program Files\Git\cmd\git.EXE rev-parse --show-toplevel` failed with following output:
fatal: Not a git repository (or any of the parent directories): .git
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 289, in run_async
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 65, in __init__
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 101, in render
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 117, in _render_template
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 155, in get_keyed_content
File "./python3.3/collections/__init__.py", line 56, in __init__
SublimeLinter: glualint: sv_purchasing.lua ['C:\\Users\\fpeij\\Programs\\glualint\\dist\\build\\glualint\\glualint.exe'] File "./python3.3/collections/abc.py", line 578, in update
File "common.ui in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 155, in <genexpr>
File "core.interfaces.status in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 189, in render_stashes
File "core.git_mixins.stash in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 13, in get_stashes
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 141, in git
File "core.git_command in C:\Users\fpeij\AppData\Roaming\Sublime Text 3\Installed Packages\GitSavvy.sublime-package", line 119, in raise_error
GitSavvy.core.git_command.GitSavvyError: `C:\Program Files\Git\cmd\git.EXE rev-parse --show-toplevel` failed with following output:
fatal: Not a git repository (or any of the parent directories): .git
|
GitSavvy.core.git_command.GitSavvyError
|
def run_async(self, settings=None, cached=False):
if settings is None:
file_view = self.window.active_view()
syntax_file = file_view.settings().get("syntax")
settings = {
"git_savvy.file_path": self.file_path,
"git_savvy.repo_path": self.repo_path,
}
else:
syntax_file = settings["syntax"]
del settings["syntax"]
view_key = "{0}+{1}".format(cached, settings["git_savvy.file_path"])
if (
view_key in inline_diff_views
and inline_diff_views[view_key] in sublime.active_window().views()
):
diff_view = inline_diff_views[view_key]
else:
diff_view = util.view.get_scratch_view(self, "inline_diff", read_only=True)
title = INLINE_DIFF_CACHED_TITLE if cached else INLINE_DIFF_TITLE
diff_view.set_name(title + os.path.basename(settings["git_savvy.file_path"]))
diff_view.set_syntax_file(syntax_file)
file_ext = util.file.get_file_extension(
os.path.basename(settings["git_savvy.file_path"])
)
self.augment_color_scheme(diff_view, file_ext)
diff_view.settings().set("git_savvy.inline_diff.cached", cached)
for k, v in settings.items():
diff_view.settings().set(k, v)
inline_diff_views[view_key] = diff_view
file_binary = util.file.get_file_contents_binary(
settings["git_savvy.repo_path"], settings["git_savvy.file_path"]
)
try:
file_binary.decode()
except UnicodeDecodeError as unicode_err:
try:
file_binary.decode("latin-1")
diff_view.settings().set("git_savvy.inline_diff.encoding", "latin-1")
except UnicodeDecodeError as unicode_err:
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
fallback_encoding = savvy_settings.get("fallback_encoding")
diff_view.settings().set(
"git_savvy.inline_diff.encoding", fallback_encoding
)
self.window.focus_view(diff_view)
diff_view.run_command("gs_inline_diff_refresh")
diff_view.run_command("gs_handle_vintageous")
|
def run_async(self, settings=None, cached=False):
if settings is None:
file_view = self.window.active_view()
syntax_file = file_view.settings().get("syntax")
settings = {
"git_savvy.file_path": self.file_path,
"git_savvy.repo_path": self.repo_path,
}
else:
syntax_file = settings["syntax"]
del settings["syntax"]
view_key = "{0}+{1}".format(cached, settings["git_savvy.file_path"])
if (
view_key in inline_diff_views
and inline_diff_views[view_key] in sublime.active_window().views()
):
diff_view = inline_diff_views[view_key]
else:
diff_view = util.view.get_scratch_view(self, "inline_diff", read_only=True)
title = INLINE_DIFF_CACHED_TITLE if cached else INLINE_DIFF_TITLE
diff_view.set_name(title + os.path.basename(settings["git_savvy.file_path"]))
diff_view.set_syntax_file(syntax_file)
file_ext = util.file.get_file_extension(
os.path.basename(settings["git_savvy.file_path"])
)
self.augment_color_scheme(diff_view, file_ext)
diff_view.settings().set("git_savvy.inline_diff.cached", cached)
for k, v in settings.items():
diff_view.settings().set(k, v)
inline_diff_views[view_key] = diff_view
file_binary = util.file.get_file_contents_binary(self.repo_path, self.file_path)
try:
file_binary.decode()
except UnicodeDecodeError as unicode_err:
try:
file_binary.decode("latin-1")
diff_view.settings().set("git_savvy.inline_diff.encoding", "latin-1")
except UnicodeDecodeError as unicode_err:
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
fallback_encoding = savvy_settings.get("fallback_encoding")
diff_view.settings().set(
"git_savvy.inline_diff.encoding", fallback_encoding
)
self.window.focus_view(diff_view)
diff_view.run_command("gs_inline_diff_refresh")
diff_view.run_command("gs_handle_vintageous")
|
https://github.com/timbrel/GitSavvy/issues/624
|
Traceback (most recent call last):
File "core.commands.inline_diff in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 37, in <lambda>
File "core.commands.inline_diff in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 70, in run_async
File "common.util.file in /Users/user/Library/Application Support/Sublime Text 3/Installed Packages/GitSavvy.sublime-package", line 55, in get_file_contents_binary
File "./python3.3/posixpath.py", line 80, in join
AttributeError: 'NoneType' object has no attribute 'startswith'
|
AttributeError
|
def do_clone(self):
sublime.status_message("Start cloning {}".format(self.git_url))
self.git("clone", self.git_url, self.suggested_git_root, working_dir=".")
sublime.status_message("Cloned repo successfully.")
self.open_folder()
util.view.refresh_gitsavvy(self.window.active_view())
|
def do_clone(self):
sublime.status_message("Start cloning {}".format(self.git_url))
self.git("clone", self.git_url, self.suggested_git_root)
sublime.status_message("Cloned repo successfully.")
self.open_folder()
util.view.refresh_gitsavvy(self.window.active_view())
|
https://github.com/timbrel/GitSavvy/issues/613
|
Traceback (most recent call last):
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 126, in git
cwd=working_dir or self.repo_path,
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 213, in repo_path
return self._repo_path()
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 246, in _repo_path
return invalid_repo()
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 230, in invalid_repo
raise ValueError("Unable to determine Git repo path.")
ValueError: Unable to determine Git repo path.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/commands/init.py", line 131, in do_clone
self.git("clone", self.git_url, self.suggested_git_root)
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 134, in git
raise_error(e)
File "/Users/pavel/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/git_command.py", line 112, in raise_error
raise GitSavvyError(msg)
GitSavvy.core.exceptions.GitSavvyError: Unable to determine Git repo path.
|
ValueError
|
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == -1:
return
branch_name, ref, _ = self.interface.get_branch_state()
current = log[cursor]
if current["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot undo.")
return
try:
self.checkout_ref(current["ref_before"])
self.git("branch", "-f", branch_name, "HEAD")
cursor -= 1
except Exception as e:
sublime.error_message("Error encountered. Cannot undo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
|
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == -1:
return
branch_name, ref = self.interface.get_branch_state()
current = log[cursor]
if current["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot undo.")
return
try:
self.checkout_ref(current["ref_before"])
self.git("branch", "-f", branch_name, "HEAD")
cursor -= 1
except Exception as e:
sublime.error_message("Error encountered. Cannot undo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
|
https://github.com/timbrel/GitSavvy/issues/490
|
Traceback (most recent call last):
File "/Users/kyank/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/interfaces/rebase.py", line 353, in run_async
branch_name, ref = self.interface.get_branch_state()
ValueError: too many values to unpack (expected 2)
|
ValueError
|
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == len(log) - 1:
return
branch_name, ref, _ = self.interface.get_branch_state()
undone_action = log[cursor + 1]
if undone_action["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot redo.")
return
try:
self.checkout_ref(undone_action["ref_after"])
self.git("branch", "-f", branch_name, "HEAD")
cursor += 1
except Exception as e:
sublime.error_message("Error encountered. Cannot redo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
|
def run_async(self):
log, cursor = self.interface.get_log()
if log is None or cursor is None or cursor == len(log) - 1:
return
branch_name, ref = self.interface.get_branch_state()
undone_action = log[cursor + 1]
if undone_action["branch_name"] != branch_name:
sublime.error_message("Current branch does not match expected. Cannot redo.")
return
try:
self.checkout_ref(undone_action["ref_after"])
self.git("branch", "-f", branch_name, "HEAD")
cursor += 1
except Exception as e:
sublime.error_message("Error encountered. Cannot redo.")
raise e
finally:
self.checkout_ref(branch_name)
self.interface.set_log(log, cursor)
util.view.refresh_gitsavvy(self.view)
|
https://github.com/timbrel/GitSavvy/issues/490
|
Traceback (most recent call last):
File "/Users/kyank/Library/Application Support/Sublime Text 3/Packages/GitSavvy/core/interfaces/rebase.py", line 353, in run_async
branch_name, ref = self.interface.get_branch_state()
ValueError: too many values to unpack (expected 2)
|
ValueError
|
def search_user_directory(self, term: str) -> List[User]:
"""
Search user directory for a given term, returning a list of users
Args:
term: term to be searched for
Returns:
user_list: list of users returned by server-side search
"""
try:
response = self.api._send(
"POST", "/user_directory/search", {"search_term": term}
)
except MatrixRequestError as ex:
if ex.code >= 500:
log.error(
"Ignoring Matrix error in `search_user_directory`",
exc_info=ex,
term=term,
)
return list()
else:
raise ex
try:
return [
User(self.api, _user["user_id"], _user["display_name"])
for _user in response["results"]
]
except KeyError:
return list()
|
def search_user_directory(self, term: str) -> List[User]:
"""
Search user directory for a given term, returning a list of users
Args:
term: term to be searched for
Returns:
user_list: list of users returned by server-side search
"""
response = self.api._send("POST", "/user_directory/search", {"search_term": term})
try:
return [
User(self.api, _user["user_id"], _user["display_name"])
for _user in response["results"]
]
except KeyError:
return []
|
https://github.com/raiden-network/raiden/issues/6584
|
Traceback (most recent call last):
File "raiden/ui/cli.py", line 617, in run
File "raiden/ui/runners.py", line 21, in run_services
File "raiden/ui/app.py", line 446, in run_app
File "raiden/app.py", line 90, in start
File "raiden/raiden_service.py", line 444, in start
File "raiden/raiden_service.py", line 552, in _start_transport
File "raiden/network/transport/matrix/transport.py", line 449, in start
File "raiden/network/transport/matrix/utils.py", line 711, in login
File "raiden/network/transport/matrix/utils.py", line 623, in first_login
File "matrix_client/client.py", line 270, in login
File "matrix_client/api.py", line 160, in login
File "raiden/network/transport/matrix/client.py", line 180, in _send
File "matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 500: {"errcode":"M_UNKNOWN","error":"Internal server error"}
``
|
matrix_client.errors.MatrixRequestError
|
def warm_users(self, users: List[User]) -> None:
for user in users:
user_id = user.user_id
cached_displayname = self.userid_to_displayname.get(user_id)
if cached_displayname is None:
# The cache is cold, query and warm it.
if not user.displayname:
# Handles an edge case where the Matrix federation does not
# have the profile for a given userid. The server response
# is roughly:
#
# {"errcode":"M_NOT_FOUND","error":"Profile was not found"} or
# {"errcode":"M_UNKNOWN","error":"Failed to fetch profile"}
try:
user.get_display_name()
except MatrixRequestError as ex:
# We ignore the error here and set user presence: SERVER_ERROR at the
# calling site
log.error(
"Ignoring Matrix error in `get_display_name`",
exc_info=ex,
user_id=user.user_id,
)
if user.displayname is not None:
self.userid_to_displayname[user.user_id] = user.displayname
elif user.displayname is None:
user.displayname = cached_displayname
elif user.displayname != cached_displayname:
log.debug(
"User displayname changed!",
cached=cached_displayname,
current=user.displayname,
)
self.userid_to_displayname[user.user_id] = user.displayname
|
def warm_users(self, users: List[User]) -> None:
for user in users:
user_id = user.user_id
cached_displayname = self.userid_to_displayname.get(user_id)
if cached_displayname is None:
# The cache is cold, query and warm it.
if not user.displayname:
# Handles an edge case where the Matrix federation does not
# have the profile for a given userid. The server response
# is roughly:
#
# {"errcode":"M_NOT_FOUND","error":"Profile was not found"} or
# {"errcode":"M_UNKNOWN","error":"Failed to fetch profile"}
try:
user.get_display_name()
except MatrixRequestError as ex:
# We ignore the error here and set user presence: SERVER_ERROR at the
# calling site
log.error(
f"Ignoring failed `get_display_name` for user {user}",
exc_info=ex,
)
if user.displayname is not None:
self.userid_to_displayname[user.user_id] = user.displayname
elif user.displayname is None:
user.displayname = cached_displayname
elif user.displayname != cached_displayname:
log.debug(
"User displayname changed!",
cached=cached_displayname,
current=user.displayname,
)
self.userid_to_displayname[user.user_id] = user.displayname
|
https://github.com/raiden-network/raiden/issues/6584
|
Traceback (most recent call last):
File "raiden/ui/cli.py", line 617, in run
File "raiden/ui/runners.py", line 21, in run_services
File "raiden/ui/app.py", line 446, in run_app
File "raiden/app.py", line 90, in start
File "raiden/raiden_service.py", line 444, in start
File "raiden/raiden_service.py", line 552, in _start_transport
File "raiden/network/transport/matrix/transport.py", line 449, in start
File "raiden/network/transport/matrix/utils.py", line 711, in login
File "raiden/network/transport/matrix/utils.py", line 623, in first_login
File "matrix_client/client.py", line 270, in login
File "matrix_client/api.py", line 160, in login
File "raiden/network/transport/matrix/client.py", line 180, in _send
File "matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 500: {"errcode":"M_UNKNOWN","error":"Internal server error"}
``
|
matrix_client.errors.MatrixRequestError
|
def first_login(
client: GMatrixClient, signer: Signer, username: str, cap_str: str
) -> User:
"""Login within a server.
There are multiple cases where a previous auth token can become invalid and
a new login is necessary:
- The server is configured to automatically invalidate tokens after a while
(not the default)
- A server operator may manually wipe or invalidate existing access tokens
- A node may have roamed to a different server (e.g. because the original
server was temporarily unavailable) and is now 'returning' to the
previously used server.
This relies on the Matrix server having the `eth_auth_provider` plugin
installed, the plugin will automatically create the user on the first
login. The plugin requires the password to be the signature of the server
hostname, verified by the server to prevent account creation spam.
Displayname is the signature of the whole user_id (including homeserver),
to be verified by other peers and prevent impersonation attacks.
"""
server_url = client.api.base_url
server_name = urlparse(server_url).netloc
# The plugin `eth_auth_provider` expects a signature of the server_name as
# the user's password.
#
# For a honest matrix server:
#
# - This prevents impersonation attacks / name squatting, since the plugin
# will validate the username by recovering the address from the signature
# and check the recovered address and the username matches.
#
# For a badly configured server (one without the plugin):
#
# - An attacker can front run and register the username before the honest
# user:
# - Because the attacker cannot guess the correct password, when the
# honest node tries to login it will fail, which tells us the server is
# improperly configured and should be blacklisted.
# - The attacker cannot forge a signature to use as a display name, so
# the partner node can tell there is a malicious node trying to
# eavesdrop the conversation and that matrix server should be
# blacklisted.
# - The username is available, but because the plugin is not installed the
# login will fail since the user is not registered. Here too one can infer
# the server is improperly configured and blacklist the server.
password = encode_hex(signer.sign(server_name.encode()))
# Disabling sync because login is done before the transport is fully
# initialized, i.e. the inventory rooms don't have the callbacks installed.
client.login(username, password, sync=False, device_id="raiden")
# Because this is the first login, the display name has to be set, this
# prevents the impersonation mentioned above. subsequent calls will reuse
# the authentication token and the display name will be properly set.
signature_bytes = signer.sign(client.user_id.encode())
signature_hex = encode_hex(signature_bytes)
user = client.get_user(client.user_id)
try:
current_display_name = user.get_display_name()
except MatrixRequestError as ex:
# calling site
log.error(
"Ignoring Matrix error in `get_display_name`",
exc_info=ex,
user_id=user.user_id,
)
current_display_name = ""
# Only set the display name if necessary, since this is a slow operation.
if current_display_name != signature_hex:
user.set_display_name(signature_hex)
try:
current_capabilities = user.get_avatar_url() or ""
except MatrixRequestError as ex:
log.error(
"Ignoring Matrix error in `get_avatar_url`",
exc_info=ex,
user_id=user.user_id,
)
current_capabilities = ""
# Only set the capabilities if necessary.
if current_capabilities != cap_str:
user.set_avatar_url(cap_str)
log.debug(
"Logged in",
node=to_checksum_address(username),
homeserver=server_name,
server_url=server_url,
)
return user
|
def first_login(
client: GMatrixClient, signer: Signer, username: str, cap_str: str
) -> User:
"""Login within a server.
There are multiple cases where a previous auth token can become invalid and
a new login is necessary:
- The server is configured to automatically invalidate tokens after a while
(not the default)
- A server operator may manually wipe or invalidate existing access tokens
- A node may have roamed to a different server (e.g. because the original
server was temporarily unavailable) and is now 'returning' to the
previously used server.
This relies on the Matrix server having the `eth_auth_provider` plugin
installed, the plugin will automatically create the user on the first
login. The plugin requires the password to be the signature of the server
hostname, verified by the server to prevent account creation spam.
Displayname is the signature of the whole user_id (including homeserver),
to be verified by other peers and prevent impersonation attacks.
"""
server_url = client.api.base_url
server_name = urlparse(server_url).netloc
# The plugin `eth_auth_provider` expects a signature of the server_name as
# the user's password.
#
# For a honest matrix server:
#
# - This prevents impersonation attacks / name squatting, since the plugin
# will validate the username by recovering the address from the signature
# and check the recovered address and the username matches.
#
# For a badly configured server (one without the plugin):
#
# - An attacker can front run and register the username before the honest
# user:
# - Because the attacker cannot guess the correct password, when the
# honest node tries to login it will fail, which tells us the server is
# improperly configured and should be blacklisted.
# - The attacker cannot forge a signature to use as a display name, so
# the partner node can tell there is a malicious node trying to
# eavesdrop the conversation and that matrix server should be
# blacklisted.
# - The username is available, but because the plugin is not installed the
# login will fail since the user is not registered. Here too one can infer
# the server is improperly configured and blacklist the server.
password = encode_hex(signer.sign(server_name.encode()))
# Disabling sync because login is done before the transport is fully
# initialized, i.e. the inventory rooms don't have the callbacks installed.
client.login(username, password, sync=False, device_id="raiden")
# Because this is the first login, the display name has to be set, this
# prevents the impersonation mentioned above. subsequent calls will reuse
# the authentication token and the display name will be properly set.
signature_bytes = signer.sign(client.user_id.encode())
signature_hex = encode_hex(signature_bytes)
user = client.get_user(client.user_id)
current_display_name = user.get_display_name()
# Only set the display name if necessary, since this is a slow operation.
if current_display_name != signature_hex:
user.set_display_name(signature_hex)
current_capabilities = user.get_avatar_url() or ""
# Only set the capabilities if necessary.
if current_capabilities != cap_str:
user.set_avatar_url(cap_str)
log.debug(
"Logged in",
node=to_checksum_address(username),
homeserver=server_name,
server_url=server_url,
)
return user
|
https://github.com/raiden-network/raiden/issues/6584
|
Traceback (most recent call last):
File "raiden/ui/cli.py", line 617, in run
File "raiden/ui/runners.py", line 21, in run_services
File "raiden/ui/app.py", line 446, in run_app
File "raiden/app.py", line 90, in start
File "raiden/raiden_service.py", line 444, in start
File "raiden/raiden_service.py", line 552, in _start_transport
File "raiden/network/transport/matrix/transport.py", line 449, in start
File "raiden/network/transport/matrix/utils.py", line 711, in login
File "raiden/network/transport/matrix/utils.py", line 623, in first_login
File "matrix_client/client.py", line 270, in login
File "matrix_client/api.py", line 160, in login
File "raiden/network/transport/matrix/client.py", line 180, in _send
File "matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 500: {"errcode":"M_UNKNOWN","error":"Internal server error"}
``
|
matrix_client.errors.MatrixRequestError
|
def validate_userid_signature(user: User) -> Optional[Address]:
"""Validate a userId format and signature on displayName, and return its address"""
# display_name should be an address in the USERID_RE format
match = USERID_RE.match(user.user_id)
if not match:
log.warning("Invalid user id", user=user.user_id)
return None
displayname = user.displayname
if displayname is None:
log.warning("Displayname not set", user=user.user_id)
return None
encoded_address = match.group(1)
address: Address = to_canonical_address(encoded_address)
try:
if DISPLAY_NAME_HEX_RE.match(displayname):
signature_bytes = decode_hex(displayname)
else:
log.warning(
"Displayname invalid format", user=user.user_id, displayname=displayname
)
return None
recovered = recover(
data=user.user_id.encode(), signature=Signature(signature_bytes)
)
if not (address and recovered and recovered == address):
log.warning("Unexpected signer of displayname", user=user.user_id)
return None
except (
DecodeError,
TypeError,
InvalidSignature,
MatrixRequestError,
json.decoder.JSONDecodeError,
):
return None
return address
|
def validate_userid_signature(user: User) -> Optional[Address]:
"""Validate a userId format and signature on displayName, and return its address"""
# display_name should be an address in the USERID_RE format
match = USERID_RE.match(user.user_id)
if not match:
return None
msg = (
"The User instance provided to validate_userid_signature must have the "
"displayname attribute set. Make sure to warm the value using the "
"DisplayNameCache."
)
displayname = user.displayname
assert displayname is not None, msg
encoded_address = match.group(1)
address: Address = to_canonical_address(encoded_address)
try:
if DISPLAY_NAME_HEX_RE.match(displayname):
signature_bytes = decode_hex(displayname)
else:
return None
recovered = recover(
data=user.user_id.encode(), signature=Signature(signature_bytes)
)
if not (address and recovered and recovered == address):
return None
except (
DecodeError,
TypeError,
InvalidSignature,
MatrixRequestError,
json.decoder.JSONDecodeError,
):
return None
return address
|
https://github.com/raiden-network/raiden/issues/6555
|
Traceback (most recent call last):
File "raiden/ui/cli.py", line 617, in run
File "raiden/ui/runners.py", line 21, in run_services
File "raiden/ui/app.py", line 446, in run_app
File "raiden/app.py", line 90, in start
File "raiden/raiden_service.py", line 444, in start
File "raiden/raiden_service.py", line 552, in _start_transport
File "raiden/network/transport/matrix/transport.py", line 467, in start
File "raiden/network/transport/matrix/transport.py", line 790, in _initialize_first_sync
File "raiden/network/transport/matrix/client.py", line 567, in blocking_sync
File "raiden/network/transport/matrix/client.py", line 774, in _handle_responses
File "raiden/network/transport/matrix/transport.py", line 1063, in _handle_member_join
File "raiden/network/transport/matrix/transport.py", line 824, in _leave_unexpected_rooms
File "raiden/network/transport/matrix/transport.py", line 932, in _extract_addresses
File "raiden/network/transport/matrix/transport.py", line 932, in <genexpr>
File "cachetools/decorators.py", line 36, in wrapper
File "raiden/network/transport/matrix/utils.py", line 728, in validate_userid_signature
AssertionError: The User instance provided to validate_userid_signature must have the displayname attribute set. Make sure to warm the value using the DisplayNameCache.
FATAL: An unexpected exception occurred. A traceback has been written to /tmp/raiden-exception-2020-09-21T14-498mov1ija.txt
The User instance provided to validate_userid_signature must have the displayname attribute set. Make sure to warm the value using the DisplayNameCache.
|
AssertionError
|
def get(self, token_address: TokenAddress, target_address: Address) -> Response:
kwargs = validate_query_params(self.get_schema)
return self.rest_api.get_raiden_events_payment_history_with_timestamps(
registry_address=self.rest_api.raiden_api.raiden.default_registry.address,
token_address=token_address,
target_address=target_address,
**kwargs,
)
|
def get(
self, token_address: TokenAddress = None, target_address: Address = None
) -> Response:
kwargs = validate_query_params(self.get_schema)
return self.rest_api.get_raiden_events_payment_history_with_timestamps(
registry_address=self.rest_api.raiden_api.raiden.default_registry.address,
token_address=token_address,
target_address=target_address,
**kwargs,
)
|
https://github.com/raiden-network/raiden/issues/6217
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "raiden/api/rest.py", line 409, in _run
File "gevent/baseserver.py", line 389, in serve_forever
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
TypeError: post() missing 1 required positional argument: 'target_address'
2020-06-02T08:47:33Z <Greenlet "APIServer|Greenlet-0" at 0x7f885b0175f0: _run> failed with TypeError
|
TypeError
|
def __init__(
self,
rpc_client: JSONRPCClient,
proxy_manager: ProxyManager,
query_start_block: BlockNumber,
default_registry: TokenNetworkRegistry,
default_secret_registry: SecretRegistry,
default_service_registry: Optional[ServiceRegistry],
default_one_to_n_address: Optional[OneToNAddress],
default_msc_address: Optional[MonitoringServiceAddress],
transport: MatrixTransport,
raiden_event_handler: EventHandler,
message_handler: MessageHandler,
routing_mode: RoutingMode,
config: RaidenConfig,
user_deposit: UserDeposit = None,
api_server: Optional[APIServer] = None,
) -> None:
super().__init__()
self.tokennetworkaddrs_to_connectionmanagers: ConnectionManagerDict = dict()
self.targets_to_identifiers_to_statuses: StatusesDict = defaultdict(dict)
self.rpc_client = rpc_client
self.proxy_manager = proxy_manager
self.default_registry = default_registry
self.query_start_block = query_start_block
self.default_one_to_n_address = default_one_to_n_address
self.default_secret_registry = default_secret_registry
self.default_service_registry = default_service_registry
self.default_msc_address = default_msc_address
self.routing_mode = routing_mode
self.config = config
self.signer: Signer = LocalSigner(self.rpc_client.privkey)
self.address = self.signer.address
self.transport = transport
self.user_deposit = user_deposit
self.alarm = AlarmTask(
proxy_manager=proxy_manager, sleep_time=self.config.blockchain.query_interval
)
self.raiden_event_handler = raiden_event_handler
self.message_handler = message_handler
self.blockchain_events: Optional[BlockchainEvents] = None
self.api_server: Optional[APIServer] = api_server
self.raiden_api: Optional[RaidenAPI] = None
self.rest_api: Optional[RestAPI] = None
if api_server is not None:
self.raiden_api = RaidenAPI(self)
self.rest_api = api_server.rest_api
self.stop_event = Event()
self.stop_event.set() # inits as stopped
self.greenlets: List[Greenlet] = list()
self.last_log_time = time.monotonic()
self.last_log_block = BlockNumber(0)
self.contract_manager = ContractManager(config.contracts_path)
self.wal: Optional[WriteAheadLog] = None
if self.config.database_path != ":memory:":
database_dir = os.path.dirname(config.database_path)
os.makedirs(database_dir, exist_ok=True)
self.database_dir: Optional[str] = database_dir
# Two raiden processes must not write to the same database. Even
# though it's possible the database itself would not be corrupt,
# the node's state could. If a database was shared among multiple
# nodes, the database WAL would be the union of multiple node's
# WAL. During a restart a single node can't distinguish its state
# changes from the others, and it would apply it all, meaning that
# a node would execute the actions of itself and the others.
#
# Additionally the database snapshots would be corrupt, because it
# would not represent the effects of applying all the state changes
# in order.
lock_file = os.path.join(self.database_dir, ".lock")
self.db_lock = filelock.FileLock(lock_file)
else:
self.database_dir = None
self.serialization_file = None
self.db_lock = None
self.payment_identifier_lock = gevent.lock.Semaphore()
# A list is not hashable, so use tuple as key here
self.route_to_feedback_token: Dict[Tuple[Address, ...], UUID] = dict()
# Flag used to skip the processing of all Raiden events during the
# startup.
#
# Rationale: At the startup, the latest snapshot is restored and all
# state changes which are not 'part' of it are applied. The criteria to
# re-apply the state changes is their 'absence' in the snapshot, /not/
# their completeness. Because these state changes are re-executed
# in-order and some of their side-effects will already have been
# completed, the events should be delayed until the state is
# synchronized (e.g. an open channel state change, which has already
# been mined).
#
# Incomplete events, i.e. the ones which don't have their side-effects
# applied, will be executed once the blockchain state is synchronized
# because of the node's queues.
self.ready_to_process_events = False
|
def __init__(
self,
rpc_client: JSONRPCClient,
proxy_manager: ProxyManager,
query_start_block: BlockNumber,
default_registry: TokenNetworkRegistry,
default_secret_registry: SecretRegistry,
default_service_registry: Optional[ServiceRegistry],
default_one_to_n_address: Optional[OneToNAddress],
default_msc_address: Optional[MonitoringServiceAddress],
transport: MatrixTransport,
raiden_event_handler: EventHandler,
message_handler: MessageHandler,
routing_mode: RoutingMode,
config: RaidenConfig,
user_deposit: UserDeposit = None,
api_server: Optional[APIServer] = None,
) -> None:
super().__init__()
self.tokennetworkaddrs_to_connectionmanagers: ConnectionManagerDict = dict()
self.targets_to_identifiers_to_statuses: StatusesDict = defaultdict(dict)
self.rpc_client = rpc_client
self.proxy_manager = proxy_manager
self.default_registry = default_registry
self.query_start_block = query_start_block
self.default_one_to_n_address = default_one_to_n_address
self.default_secret_registry = default_secret_registry
self.default_service_registry = default_service_registry
self.default_msc_address = default_msc_address
self.routing_mode = routing_mode
self.config = config
self.signer: Signer = LocalSigner(self.rpc_client.privkey)
self.address = self.signer.address
self.transport = transport
self.user_deposit = user_deposit
self.alarm = AlarmTask(
proxy_manager=proxy_manager, sleep_time=self.config.blockchain.query_interval
)
self.raiden_event_handler = raiden_event_handler
self.message_handler = message_handler
self.blockchain_events: Optional[BlockchainEvents] = None
self.api_server: Optional[APIServer] = api_server
self.raiden_api: Optional[RaidenAPI] = None
self.rest_api: Optional[RestAPI] = None
if api_server is not None:
self.raiden_api = RaidenAPI(self)
self.rest_api = api_server.rest_api
self.stop_event = Event()
self.stop_event.set() # inits as stopped
self.greenlets: List[Greenlet] = list()
self.last_log_time = datetime.now()
self.last_log_block = BlockNumber(0)
self.contract_manager = ContractManager(config.contracts_path)
self.wal: Optional[WriteAheadLog] = None
if self.config.database_path != ":memory:":
database_dir = os.path.dirname(config.database_path)
os.makedirs(database_dir, exist_ok=True)
self.database_dir: Optional[str] = database_dir
# Two raiden processes must not write to the same database. Even
# though it's possible the database itself would not be corrupt,
# the node's state could. If a database was shared among multiple
# nodes, the database WAL would be the union of multiple node's
# WAL. During a restart a single node can't distinguish its state
# changes from the others, and it would apply it all, meaning that
# a node would execute the actions of itself and the others.
#
# Additionally the database snapshots would be corrupt, because it
# would not represent the effects of applying all the state changes
# in order.
lock_file = os.path.join(self.database_dir, ".lock")
self.db_lock = filelock.FileLock(lock_file)
else:
self.database_dir = None
self.serialization_file = None
self.db_lock = None
self.payment_identifier_lock = gevent.lock.Semaphore()
# A list is not hashable, so use tuple as key here
self.route_to_feedback_token: Dict[Tuple[Address, ...], UUID] = dict()
# Flag used to skip the processing of all Raiden events during the
# startup.
#
# Rationale: At the startup, the latest snapshot is restored and all
# state changes which are not 'part' of it are applied. The criteria to
# re-apply the state changes is their 'absence' in the snapshot, /not/
# their completeness. Because these state changes are re-executed
# in-order and some of their side-effects will already have been
# completed, the events should be delayed until the state is
# synchronized (e.g. an open channel state change, which has already
# been mined).
#
# Incomplete events, i.e. the ones which don't have their side-effects
# applied, will be executed once the blockchain state is synchronized
# because of the node's queues.
self.ready_to_process_events = False
|
https://github.com/raiden-network/raiden/issues/6081
|
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 387, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 383, in _make_request
httplib_response = conn.getresponse()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 1344, in getresponse
response.begin()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 267, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/gevent/_socket3.py", line 496, in recv_into
self._wait(self._read_event)
File "src/gevent/_hub_primitives.py", line 290, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 295, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 286, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 287, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 55, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_waiter.py", line 151, in gevent.__waiter.Waiter.get
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
socket.timeout: timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 641, in urlopen
_stacktrace=sys.exc_info()[2])
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/util/retry.py", line 368, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/packages/six.py", line 686, in reraise
raise value
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 603, in urlopen
chunked=chunked)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 389, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 307, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/cli.py", line 610, in run
run_services(kwargs)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/runners.py", line 21, in run_services
app = run_app(**options)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/app.py", line 438, in run_app
raiden_app.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/app.py", line 91, in start
self.raiden.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 364, in start
self._synchronize_with_blockchain()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 670, in _synchronize_with_blockchain
self._poll_until_target(latest_confirmed_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 993, in _poll_until_target
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 488, in fetch_logs_in_batch
decoded_result = self._query_and_track(from_block, to_block)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 556, in _query_and_track
RPCEndpoint("eth_getLogs"), [filter_params]
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 149, in request_blocking
response = self._make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 128, in _make_request
return request_func(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/cache.py", line 134, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/gas_price_strategy.py", line 34, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/attrdict.py", line 33, in middleware
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/normalize_errors.py", line 25, in middleware
result = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/exception_retry_request.py", line 104, in middleware
return make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/providers/rpc.py", line 83, in make_request
**self.get_request_kwargs()
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/_utils/request.py", line 34, in make_post_request
response = session.post(endpoint_uri, data=data, *args, **kwargs) # type: ignore
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 529, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
|
urllib3.exceptions.ReadTimeoutError
|
def _log_sync_progress(
self, polled_block_number: BlockNumber, target_block: BlockNumber
) -> None:
"""Print a message if there are many blocks to be fetched, or if the
time in-between polls is high.
"""
now = time.monotonic()
blocks_until_target = target_block - polled_block_number
polled_block_count = polled_block_number - self.last_log_block
elapsed = now - self.last_log_time
if blocks_until_target > 100 or elapsed > 15.0:
log.info(
"Synchronizing blockchain events",
remaining_blocks_to_sync=blocks_until_target,
blocks_per_second=polled_block_count / elapsed,
to_block=target_block,
elapsed=elapsed,
)
self.last_log_time = time.monotonic()
self.last_log_block = polled_block_number
|
def _log_sync_progress(self, to_block: BlockNumber) -> None:
"""Print a message if there are many blocks to be fetched, or if the
time in-between polls is high.
"""
now = datetime.now()
blocks_to_sync = to_block - self.last_log_block
elapsed = (now - self.last_log_time).total_seconds()
if blocks_to_sync > 100 or elapsed > 15.0:
log.info(
"Synchronizing blockchain events",
blocks_to_sync=blocks_to_sync,
blocks_per_second=blocks_to_sync / elapsed,
to_block=to_block,
elapsed=elapsed,
)
self.last_log_time = now
self.last_log_block = to_block
|
https://github.com/raiden-network/raiden/issues/6081
|
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 387, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 383, in _make_request
httplib_response = conn.getresponse()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 1344, in getresponse
response.begin()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 267, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/gevent/_socket3.py", line 496, in recv_into
self._wait(self._read_event)
File "src/gevent/_hub_primitives.py", line 290, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 295, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 286, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 287, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 55, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_waiter.py", line 151, in gevent.__waiter.Waiter.get
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
socket.timeout: timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 641, in urlopen
_stacktrace=sys.exc_info()[2])
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/util/retry.py", line 368, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/packages/six.py", line 686, in reraise
raise value
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 603, in urlopen
chunked=chunked)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 389, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 307, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/cli.py", line 610, in run
run_services(kwargs)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/runners.py", line 21, in run_services
app = run_app(**options)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/app.py", line 438, in run_app
raiden_app.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/app.py", line 91, in start
self.raiden.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 364, in start
self._synchronize_with_blockchain()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 670, in _synchronize_with_blockchain
self._poll_until_target(latest_confirmed_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 993, in _poll_until_target
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 488, in fetch_logs_in_batch
decoded_result = self._query_and_track(from_block, to_block)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 556, in _query_and_track
RPCEndpoint("eth_getLogs"), [filter_params]
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 149, in request_blocking
response = self._make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 128, in _make_request
return request_func(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/cache.py", line 134, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/gas_price_strategy.py", line 34, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/attrdict.py", line 33, in middleware
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/normalize_errors.py", line 25, in middleware
result = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/exception_retry_request.py", line 104, in middleware
return make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/providers/rpc.py", line 83, in make_request
**self.get_request_kwargs()
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/_utils/request.py", line 34, in make_post_request
response = session.post(endpoint_uri, data=data, *args, **kwargs) # type: ignore
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 529, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
|
urllib3.exceptions.ReadTimeoutError
|
def _synchronize_with_blockchain(self) -> None:
"""Prepares the alarm task callback and synchronize with the blockchain
since the last run.
Notes about setup order:
- The filters must be polled after the node state has been primed,
otherwise the state changes won't have effect.
- The synchronization must be done before the transport is started, to
reject messages for closed/settled channels.
"""
msg = (
f"Transport must not be started before the node has synchronized "
f"with the blockchain, otherwise the node may accept transfers to a "
f"closed channel. node:{self!r}"
)
assert not self.transport, msg
assert self.wal, f"The database must have been initialized. node:{self!r}"
chain_state = views.state_from_raiden(self)
# The `Block` state change is dispatched only after all the events for
# that given block have been processed, filters can be safely installed
# starting from this position without missing events.
last_block_number = views.block_number(chain_state)
filters = smart_contract_filters_from_node_state(
chain_state,
self.contract_manager,
self.default_registry.address,
self.default_secret_registry.address,
)
blockchain_events = BlockchainEvents(
web3=self.rpc_client.web3,
chain_id=chain_state.chain_id,
contract_manager=self.contract_manager,
last_fetched_block=last_block_number,
event_filters=filters,
block_batch_size_config=self.config.blockchain.block_batch_size_config,
)
self.last_log_block = last_block_number
self.last_log_time = time.monotonic()
latest_block_num = self.rpc_client.block_number()
latest_confirmed_block_number = BlockNumber(
max(GENESIS_BLOCK_NUMBER, latest_block_num - self.confirmation_blocks)
)
# `blockchain_events` is a requirement for `_poll_until_target`, so it
# must be set before calling it
self.blockchain_events = blockchain_events
self._poll_until_target(latest_confirmed_block_number)
self.alarm.register_callback(self._callback_new_block)
|
def _synchronize_with_blockchain(self) -> None:
"""Prepares the alarm task callback and synchronize with the blockchain
since the last run.
Notes about setup order:
- The filters must be polled after the node state has been primed,
otherwise the state changes won't have effect.
- The synchronization must be done before the transport is started, to
reject messages for closed/settled channels.
"""
msg = (
f"Transport must not be started before the node has synchronized "
f"with the blockchain, otherwise the node may accept transfers to a "
f"closed channel. node:{self!r}"
)
assert not self.transport, msg
assert self.wal, f"The database must have been initialized. node:{self!r}"
chain_state = views.state_from_raiden(self)
# The `Block` state change is dispatched only after all the events for
# that given block have been processed, filters can be safely installed
# starting from this position without missing events.
last_block_number = views.block_number(chain_state)
filters = smart_contract_filters_from_node_state(
chain_state,
self.contract_manager,
self.default_registry.address,
self.default_secret_registry.address,
)
blockchain_events = BlockchainEvents(
web3=self.rpc_client.web3,
chain_id=chain_state.chain_id,
contract_manager=self.contract_manager,
last_fetched_block=last_block_number,
event_filters=filters,
block_batch_size_config=self.config.blockchain.block_batch_size_config,
)
latest_block_num = self.rpc_client.block_number()
latest_confirmed_block_number = BlockNumber(
max(GENESIS_BLOCK_NUMBER, latest_block_num - self.confirmation_blocks)
)
# `blockchain_events` is a requirement for `_poll_until_target`, so it
# must be set before calling it
self.blockchain_events = blockchain_events
self._poll_until_target(latest_confirmed_block_number)
self.alarm.register_callback(self._callback_new_block)
|
https://github.com/raiden-network/raiden/issues/6081
|
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 387, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 383, in _make_request
httplib_response = conn.getresponse()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 1344, in getresponse
response.begin()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 267, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/gevent/_socket3.py", line 496, in recv_into
self._wait(self._read_event)
File "src/gevent/_hub_primitives.py", line 290, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 295, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 286, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 287, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 55, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_waiter.py", line 151, in gevent.__waiter.Waiter.get
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
socket.timeout: timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 641, in urlopen
_stacktrace=sys.exc_info()[2])
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/util/retry.py", line 368, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/packages/six.py", line 686, in reraise
raise value
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 603, in urlopen
chunked=chunked)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 389, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 307, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/cli.py", line 610, in run
run_services(kwargs)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/runners.py", line 21, in run_services
app = run_app(**options)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/app.py", line 438, in run_app
raiden_app.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/app.py", line 91, in start
self.raiden.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 364, in start
self._synchronize_with_blockchain()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 670, in _synchronize_with_blockchain
self._poll_until_target(latest_confirmed_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 993, in _poll_until_target
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 488, in fetch_logs_in_batch
decoded_result = self._query_and_track(from_block, to_block)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 556, in _query_and_track
RPCEndpoint("eth_getLogs"), [filter_params]
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 149, in request_blocking
response = self._make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 128, in _make_request
return request_func(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/cache.py", line 134, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/gas_price_strategy.py", line 34, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/attrdict.py", line 33, in middleware
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/normalize_errors.py", line 25, in middleware
result = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/exception_retry_request.py", line 104, in middleware
return make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/providers/rpc.py", line 83, in make_request
**self.get_request_kwargs()
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/_utils/request.py", line 34, in make_post_request
response = session.post(endpoint_uri, data=data, *args, **kwargs) # type: ignore
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 529, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
|
urllib3.exceptions.ReadTimeoutError
|
def _poll_until_target(self, target_block_number: BlockNumber) -> None:
"""Poll blockchain events up to `target_block_number`.
Multiple queries may be necessary on restarts, because the node may
have been offline for an extend period of time. During normal
operation, this must not happen, because in this case the node may have
missed important events, like a channel close, while the transport
layer is running, this can lead to loss of funds.
It is very important for `confirmed_target_block_number` to be an
confirmed block, otherwise reorgs may cause havoc. This is problematic
since some operations are irreversible, namely sending a balance proof.
Once a node accepts a deposit, these tokens can be used to do mediated
transfers, and if a reorg removes the deposit tokens could be lost.
This function takes care of fetching blocks in batches and confirming
their result. This is important to keep memory usage low and to speed
up restarts. Memory usage can get a hit if the node is asleep for a
long period of time and on the first run, since all the missing
confirmed blocks have to be fetched before the node is in a working
state. Restarts get a hit if the node is closed while it was
synchronizing, without regularly saving that work, if the node is
killed while synchronizing, it only gets gradually slower.
Returns:
int: number of polling queries required to synchronized with
`target_block_number`.
"""
msg = (
f"The blockchain event handler has to be instantiated before the "
f"alarm task is started. node:{self!r}"
)
assert self.blockchain_events, msg
sync_start = datetime.now()
while self.blockchain_events.last_fetched_block < target_block_number:
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
if poll_result is None:
# No blocks could be fetched (due to timeout), retry
continue
pendingtokenregistration: Dict[
TokenNetworkAddress, Tuple[TokenNetworkRegistryAddress, TokenAddress]
] = dict()
state_changes: List[StateChange] = list()
for event in poll_result.events:
state_changes.extend(
blockchainevent_to_statechange(
self,
event,
poll_result.polled_block_number,
pendingtokenregistration,
)
)
# On restarts the node has to pick up all events generated since the
# last run. To do this the node will set the filters' from_block to
# the value of the latest block number known to have *all* events
# processed.
#
# To guarantee the above the node must either:
#
# - Dispatch the state changes individually, leaving the Block
# state change last, so that it knows all the events for the
# given block have been processed. On restarts this can result in
# the same event being processed twice.
# - Dispatch all the smart contract events together with the Block
# state change in a single transaction, either all or nothing will
# be applied, and on a restart the node picks up from where it
# left.
#
# The approach used below is to dispatch the Block and the
# blockchain events in a single transaction. This is the preferred
# approach because it guarantees that no events will be missed and
# it fixes race conditions on the value of the block number value,
# that can lead to crashes.
#
# Example: The user creates a new channel with an initial deposit
# of X tokens. This is done with two operations, the first is to
# open the new channel, the second is to deposit the requested
# tokens in it. Once the node fetches the event for the new channel,
# it will immediately request the deposit, which leaves a window for
# a race condition. If the Block state change was not yet
# processed, the block hash used as the triggering block for the
# deposit will be off-by-one, and it will point to the block
# immediately before the channel existed. This breaks a proxy
# precondition which crashes the client.
block_state_change = Block(
block_number=poll_result.polled_block_number,
gas_limit=poll_result.polled_block_gas_limit,
block_hash=poll_result.polled_block_hash,
)
state_changes.append(block_state_change)
# It's important to /not/ block here, because this function can
# be called from the alarm task greenlet, which should not
# starve. This was a problem when the node decided to send a new
# transaction, since the proxies block until the transaction is
# mined and confirmed (e.g. the settle window is over and the
# node sends the settle transaction).
self.handle_and_track_state_changes(state_changes)
self._log_sync_progress(poll_result.polled_block_number, target_block_number)
sync_end = datetime.now()
log.debug(
"Synchronized to a new confirmed block",
event_filters_qty=len(self.blockchain_events._address_to_filters),
sync_elapsed=sync_end - sync_start,
)
|
def _poll_until_target(self, target_block_number: BlockNumber) -> None:
"""Poll blockchain events up to `target_block_number`.
Multiple queries may be necessary on restarts, because the node may
have been offline for an extend period of time. During normal
operation, this must not happen, because in this case the node may have
missed important events, like a channel close, while the transport
layer is running, this can lead to loss of funds.
It is very important for `confirmed_target_block_number` to be an
confirmed block, otherwise reorgs may cause havoc. This is problematic
since some operations are irreversible, namely sending a balance proof.
Once a node accepts a deposit, these tokens can be used to do mediated
transfers, and if a reorg removes the deposit tokens could be lost.
This function takes care of fetching blocks in batches and confirming
their result. This is important to keep memory usage low and to speed
up restarts. Memory usage can get a hit if the node is asleep for a
long period of time and on the first run, since all the missing
confirmed blocks have to be fetched before the node is in a working
state. Restarts get a hit if the node is closed while it was
synchronizing, without regularly saving that work, if the node is
killed while synchronizing, it only gets gradually slower.
Returns:
int: number of polling queries required to synchronized with
`target_block_number`.
"""
msg = (
f"The blockchain event handler has to be instantiated before the "
f"alarm task is started. node:{self!r}"
)
assert self.blockchain_events, msg
log.debug(
"Poll until target",
target=target_block_number,
last_fetched_block=self.blockchain_events.last_fetched_block,
)
sync_start = datetime.now()
while self.blockchain_events.last_fetched_block < target_block_number:
self._log_sync_progress(target_block_number)
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
if poll_result is None:
# No blocks could be fetched (due to timeout), retry
continue
pendingtokenregistration: Dict[
TokenNetworkAddress, Tuple[TokenNetworkRegistryAddress, TokenAddress]
] = dict()
state_changes: List[StateChange] = list()
for event in poll_result.events:
state_changes.extend(
blockchainevent_to_statechange(
self,
event,
poll_result.polled_block_number,
pendingtokenregistration,
)
)
# On restarts the node has to pick up all events generated since the
# last run. To do this the node will set the filters' from_block to
# the value of the latest block number known to have *all* events
# processed.
#
# To guarantee the above the node must either:
#
# - Dispatch the state changes individually, leaving the Block
# state change last, so that it knows all the events for the
# given block have been processed. On restarts this can result in
# the same event being processed twice.
# - Dispatch all the smart contract events together with the Block
# state change in a single transaction, either all or nothing will
# be applied, and on a restart the node picks up from where it
# left.
#
# The approach used below is to dispatch the Block and the
# blockchain events in a single transaction. This is the preferred
# approach because it guarantees that no events will be missed and
# it fixes race conditions on the value of the block number value,
# that can lead to crashes.
#
# Example: The user creates a new channel with an initial deposit
# of X tokens. This is done with two operations, the first is to
# open the new channel, the second is to deposit the requested
# tokens in it. Once the node fetches the event for the new channel,
# it will immediately request the deposit, which leaves a window for
# a race condition. If the Block state change was not yet
# processed, the block hash used as the triggering block for the
# deposit will be off-by-one, and it will point to the block
# immediately before the channel existed. This breaks a proxy
# precondition which crashes the client.
block_state_change = Block(
block_number=poll_result.polled_block_number,
gas_limit=poll_result.polled_block_gas_limit,
block_hash=poll_result.polled_block_hash,
)
state_changes.append(block_state_change)
# It's important to /not/ block here, because this function can
# be called from the alarm task greenlet, which should not
# starve. This was a problem when the node decided to send a new
# transaction, since the proxies block until the transaction is
# mined and confirmed (e.g. the settle window is over and the
# node sends the settle transaction).
self.handle_and_track_state_changes(state_changes)
sync_end = datetime.now()
log.debug(
"Synchronized to a new confirmed block",
event_filters_qty=len(self.blockchain_events._address_to_filters),
sync_elapsed=sync_end - sync_start,
)
|
https://github.com/raiden-network/raiden/issues/6081
|
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 387, in _make_request
six.raise_from(e, None)
File "<string>", line 2, in raise_from
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 383, in _make_request
httplib_response = conn.getresponse()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 1344, in getresponse
response.begin()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 306, in begin
version, status, reason = self._read_status()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/http/client.py", line 267, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/gevent/_socket3.py", line 496, in recv_into
self._wait(self._read_event)
File "src/gevent/_hub_primitives.py", line 290, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 295, in gevent.__hub_primitives.wait_on_socket
File "src/gevent/_hub_primitives.py", line 286, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 287, in gevent.__hub_primitives._primitive_wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 46, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_hub_primitives.py", line 55, in gevent.__hub_primitives.WaitOperationsGreenlet.wait
File "src/gevent/_waiter.py", line 151, in gevent.__waiter.Waiter.get
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
socket.timeout: timed out
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 449, in send
timeout=timeout
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 641, in urlopen
_stacktrace=sys.exc_info()[2])
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/util/retry.py", line 368, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/packages/six.py", line 686, in reraise
raise value
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 603, in urlopen
chunked=chunked)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 389, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/urllib3/connectionpool.py", line 307, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/cli.py", line 610, in run
run_services(kwargs)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/runners.py", line 21, in run_services
app = run_app(**options)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/ui/app.py", line 438, in run_app
raiden_app.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/app.py", line 91, in start
self.raiden.start()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 364, in start
self._synchronize_with_blockchain()
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 670, in _synchronize_with_blockchain
self._poll_until_target(latest_confirmed_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/raiden_service.py", line 993, in _poll_until_target
poll_result = self.blockchain_events.fetch_logs_in_batch(target_block_number)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 488, in fetch_logs_in_batch
decoded_result = self._query_and_track(from_block, to_block)
File "/Users/ulo/Dropbox/devel/brainbot/raiden/raiden/blockchain/events.py", line 556, in _query_and_track
RPCEndpoint("eth_getLogs"), [filter_params]
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 149, in request_blocking
response = self._make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/manager.py", line 128, in _make_request
return request_func(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/cache.py", line 134, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/gas_price_strategy.py", line 34, in middleware
return make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/attrdict.py", line 33, in middleware
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/normalize_errors.py", line 25, in middleware
result = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 73, in apply_formatters
response = make_request(method, formatted_params)
File "cytoolz/functoolz.pyx", line 250, in cytoolz.functoolz.curry.__call__
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/formatting.py", line 75, in apply_formatters
response = make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/middleware/exception_retry_request.py", line 104, in middleware
return make_request(method, params)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/providers/rpc.py", line 83, in make_request
**self.get_request_kwargs()
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/web3/_utils/request.py", line 34, in make_post_request
response = session.post(endpoint_uri, data=data, *args, **kwargs) # type: ignore
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/Users/ulo/Envs/raiden/lib/python3.7/site-packages/requests/adapters.py", line 529, in send
raise ReadTimeout(e, request=request)
requests.exceptions.ReadTimeout: HTTPConnectionPool(host='geth.mainnet.ethnodes.brainbot.com', port=8545): Read timed out. (read timeout=10)
|
urllib3.exceptions.ReadTimeoutError
|
def transfer_async(
self,
registry_address: TokenNetworkRegistryAddress,
token_address: TokenAddress,
amount: PaymentAmount,
target: TargetAddress,
identifier: PaymentID = None,
secret: Secret = None,
secrethash: SecretHash = None,
lock_timeout: BlockTimeout = None,
) -> "PaymentStatus":
current_state = views.state_from_raiden(self.raiden)
token_network_registry_address = self.raiden.default_registry.address
if not isinstance(amount, int): # pragma: no unittest
raise InvalidAmount("Amount not a number")
if Address(target) == self.address:
raise SamePeerAddress("Address must be different than ours")
if amount <= 0:
raise InvalidAmount("Amount negative")
if amount > UINT256_MAX:
raise InvalidAmount("Amount too large")
if not is_binary_address(token_address):
raise InvalidBinaryAddress("token address is not valid.")
if token_address not in views.get_token_identifiers(
current_state, registry_address
):
raise UnknownTokenAddress("Token address is not known.")
if not is_binary_address(target):
raise InvalidBinaryAddress("target address is not valid.")
valid_tokens = views.get_token_identifiers(
views.state_from_raiden(self.raiden), registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
if secret is not None and not isinstance(secret, T_Secret):
raise InvalidSecret("secret is not valid.")
if secrethash is not None and not isinstance(secrethash, T_SecretHash):
raise InvalidSecretHash("secrethash is not valid.")
if identifier is None:
identifier = create_default_identifier()
if identifier <= 0:
raise InvalidPaymentIdentifier("Payment identifier cannot be 0 or negative")
if identifier > UINT64_MAX:
raise InvalidPaymentIdentifier("Payment identifier is too large")
log.debug(
"Initiating transfer",
initiator=to_checksum_address(self.raiden.address),
target=to_checksum_address(target),
token=to_checksum_address(token_address),
amount=amount,
identifier=identifier,
)
token_network_address = views.get_token_network_address_by_token_address(
chain_state=current_state,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
)
if token_network_address is None:
raise UnknownTokenAddress(
f"Token {to_checksum_address(token_address)} is not registered "
f"with the network {to_checksum_address(registry_address)}."
)
payment_status = self.raiden.mediated_transfer_async(
token_network_address=token_network_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
lock_timeout=lock_timeout,
)
return payment_status
|
def transfer_async(
self,
registry_address: TokenNetworkRegistryAddress,
token_address: TokenAddress,
amount: PaymentAmount,
target: TargetAddress,
identifier: PaymentID = None,
secret: Secret = None,
secrethash: SecretHash = None,
lock_timeout: BlockTimeout = None,
) -> "PaymentStatus":
current_state = views.state_from_raiden(self.raiden)
token_network_registry_address = self.raiden.default_registry.address
if not isinstance(amount, int): # pragma: no unittest
raise InvalidAmount("Amount not a number")
if amount <= 0:
raise InvalidAmount("Amount negative")
if amount > UINT256_MAX:
raise InvalidAmount("Amount too large")
if not is_binary_address(token_address):
raise InvalidBinaryAddress("token address is not valid.")
if token_address not in views.get_token_identifiers(
current_state, registry_address
):
raise UnknownTokenAddress("Token address is not known.")
if not is_binary_address(target):
raise InvalidBinaryAddress("target address is not valid.")
valid_tokens = views.get_token_identifiers(
views.state_from_raiden(self.raiden), registry_address
)
if token_address not in valid_tokens:
raise UnknownTokenAddress("Token address is not known.")
if secret is not None and not isinstance(secret, T_Secret):
raise InvalidSecret("secret is not valid.")
if secrethash is not None and not isinstance(secrethash, T_SecretHash):
raise InvalidSecretHash("secrethash is not valid.")
if identifier is None:
identifier = create_default_identifier()
if identifier <= 0:
raise InvalidPaymentIdentifier("Payment identifier cannot be 0 or negative")
if identifier > UINT64_MAX:
raise InvalidPaymentIdentifier("Payment identifier is too large")
log.debug(
"Initiating transfer",
initiator=to_checksum_address(self.raiden.address),
target=to_checksum_address(target),
token=to_checksum_address(token_address),
amount=amount,
identifier=identifier,
)
token_network_address = views.get_token_network_address_by_token_address(
chain_state=current_state,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
)
if token_network_address is None:
raise UnknownTokenAddress(
f"Token {to_checksum_address(token_address)} is not registered "
f"with the network {to_checksum_address(registry_address)}."
)
payment_status = self.raiden.mediated_transfer_async(
token_network_address=token_network_address,
amount=amount,
target=target,
identifier=identifier,
secret=secret,
secrethash=secrethash,
lock_timeout=lock_timeout,
)
return payment_status
|
https://github.com/raiden-network/raiden/issues/5922
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 629, in _health_check_worker
File "site-packages/raiden/network/transport/matrix/transport.py", line 606, in immediate_health_check_for
File "site-packages/raiden/network/transport/matrix/transport.py", line 577, in whitelist
File "site-packages/raiden/network/transport/matrix/transport.py", line 1194, in _maybe_create_room_for_address
File "site-packages/raiden/network/transport/matrix/utils.py", line 876, in my_place_or_yours
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> failed with ValueError
2020-02-19 14:55:13.088967 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> this=<MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 496, in _run
File "site-packages/raiden/network/transport/matrix/transport.py", line 718, in _broadcast_worker
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> failed with ValueError
2020-02-19 14:55:13.259801 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> this=<RaidenService node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/raiden_service.py", line 375, in _run
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "RaidenService._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f625b90: _run> failed with ValueError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-02-19T14-5542cjz658.txt
Addresses to compare must differ
ReturnCode.FATAL
|
ValueError
|
def initiate_payment(
self,
registry_address: TokenNetworkRegistryAddress,
token_address: TokenAddress,
target_address: TargetAddress,
amount: PaymentAmount,
identifier: PaymentID,
secret: Secret,
secret_hash: SecretHash,
lock_timeout: BlockTimeout,
) -> Response:
log.debug(
"Initiating payment",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
amount=amount,
payment_identifier=identifier,
secret=secret,
secret_hash=secret_hash,
lock_timeout=lock_timeout,
)
if identifier is None:
identifier = create_default_identifier()
try:
payment_status = self.raiden_api.transfer(
registry_address=registry_address,
token_address=token_address,
target=target_address,
amount=amount,
identifier=identifier,
secret=secret,
secrethash=secret_hash,
lock_timeout=lock_timeout,
)
except (
InvalidAmount,
InvalidBinaryAddress,
InvalidSecret,
InvalidSecretHash,
InvalidPaymentIdentifier,
PaymentConflict,
SamePeerAddress,
UnknownTokenAddress,
) as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientFunds as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
result = payment_status.payment_done.get()
if isinstance(result, EventPaymentSentFailed):
return api_error(
errors=f"Payment couldn't be completed because: {result.reason}",
status_code=HTTPStatus.CONFLICT,
)
assert isinstance(result, EventPaymentSentSuccess)
payment = {
"initiator_address": self.raiden_api.address,
"registry_address": registry_address,
"token_address": token_address,
"target_address": target_address,
"amount": amount,
"identifier": identifier,
"secret": result.secret,
"secret_hash": sha256(result.secret).digest(),
}
result = self.payment_schema.dump(payment)
return api_response(result=result)
|
def initiate_payment(
self,
registry_address: TokenNetworkRegistryAddress,
token_address: TokenAddress,
target_address: TargetAddress,
amount: PaymentAmount,
identifier: PaymentID,
secret: Secret,
secret_hash: SecretHash,
lock_timeout: BlockTimeout,
) -> Response:
log.debug(
"Initiating payment",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
target_address=to_checksum_address(target_address),
amount=amount,
payment_identifier=identifier,
secret=secret,
secret_hash=secret_hash,
lock_timeout=lock_timeout,
)
if identifier is None:
identifier = create_default_identifier()
try:
payment_status = self.raiden_api.transfer(
registry_address=registry_address,
token_address=token_address,
target=target_address,
amount=amount,
identifier=identifier,
secret=secret,
secrethash=secret_hash,
lock_timeout=lock_timeout,
)
except (
InvalidAmount,
InvalidBinaryAddress,
InvalidSecret,
InvalidSecretHash,
InvalidPaymentIdentifier,
PaymentConflict,
UnknownTokenAddress,
) as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientFunds as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
result = payment_status.payment_done.get()
if isinstance(result, EventPaymentSentFailed):
return api_error(
errors=f"Payment couldn't be completed because: {result.reason}",
status_code=HTTPStatus.CONFLICT,
)
assert isinstance(result, EventPaymentSentSuccess)
payment = {
"initiator_address": self.raiden_api.address,
"registry_address": registry_address,
"token_address": token_address,
"target_address": target_address,
"amount": amount,
"identifier": identifier,
"secret": result.secret,
"secret_hash": sha256(result.secret).digest(),
}
result = self.payment_schema.dump(payment)
return api_response(result=result)
|
https://github.com/raiden-network/raiden/issues/5922
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 629, in _health_check_worker
File "site-packages/raiden/network/transport/matrix/transport.py", line 606, in immediate_health_check_for
File "site-packages/raiden/network/transport/matrix/transport.py", line 577, in whitelist
File "site-packages/raiden/network/transport/matrix/transport.py", line 1194, in _maybe_create_room_for_address
File "site-packages/raiden/network/transport/matrix/utils.py", line 876, in my_place_or_yours
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> failed with ValueError
2020-02-19 14:55:13.088967 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> this=<MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 496, in _run
File "site-packages/raiden/network/transport/matrix/transport.py", line 718, in _broadcast_worker
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> failed with ValueError
2020-02-19 14:55:13.259801 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> this=<RaidenService node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/raiden_service.py", line 375, in _run
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "RaidenService._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f625b90: _run> failed with ValueError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-02-19T14-5542cjz658.txt
Addresses to compare must differ
ReturnCode.FATAL
|
ValueError
|
def _maybe_create_room_for_address(self, address: Address) -> None:
if self._stop_event.ready():
return None
if self._get_room_for_address(address):
return None
assert self._raiden_service is not None, "_raiden_service not set"
# The rooms creation is asymmetric, only the node with the lower
# address is responsible to create the room. This fixes race conditions
# were the two nodes try to create a room with each other at the same
# time, leading to communications problems if the nodes choose a
# different room.
#
# This does not introduce a new attack vector, since not creating the
# room is the same as being unresponsive.
room_creator_address = my_place_or_yours(
our_address=self._raiden_service.address, partner_address=address
)
if self._raiden_service.address != room_creator_address:
self.log.debug(
"This node should not create the room",
partner_address=to_checksum_address(address),
)
return None
with self.room_creation_lock[address]:
candidates = self._client.search_user_directory(to_normalized_address(address))
self._displayname_cache.warm_users(candidates)
partner_users = [
user for user in candidates if validate_userid_signature(user) == address
]
partner_user_ids = [user.user_id for user in partner_users]
if not partner_users:
self.log.error(
"Partner doesn't have a user",
partner_address=to_checksum_address(address),
)
return None
room = self._client.create_room(
None, invitees=partner_user_ids, is_public=False
)
self.log.debug("Created private room", room=room, invitees=partner_users)
self.log.debug(
"Fetching room members",
room=room,
partner_address=to_checksum_address(address),
)
def partner_joined(fetched_members: List[User]) -> bool:
if fetched_members is None:
return False
return any(member.user_id in partner_user_ids for member in fetched_members)
members = self.retry_api_call(
room.get_joined_members, verify_response=partner_joined, force_resync=True
)
assert members is not None, "fetching members failed"
if not partner_joined(members):
self.log.debug(
"Peer has not joined from invite yet, should join eventually",
room=room,
partner_address=to_checksum_address(address),
retry_interval=RETRY_INTERVAL,
)
# Here, the list of valid user ids is composed of
# all known partner user ids along with our own.
# If our partner roams, the user will be invited to
# the room, resulting in multiple user ids for the partner.
# If we roam, a new user and room will be created and only
# the new user shall be in the room.
valid_user_ids = partner_user_ids + [self._client.user_id]
has_unexpected_user_ids = any(
member.user_id not in valid_user_ids for member in members
)
if has_unexpected_user_ids:
self._leave_unexpected_rooms(
[room], "Private room has unexpected participants"
)
return None
self._address_mgr.add_userids_for_address(
address, {user.user_id for user in partner_users}
)
self._set_room_id_for_address(address, room.room_id)
self.log.debug(
"Channel room", peer_address=to_checksum_address(address), room=room
)
return room
|
def _maybe_create_room_for_address(self, address: Address) -> None:
if self._stop_event.ready():
return None
if self._get_room_for_address(address):
return None
assert self._raiden_service is not None, "_raiden_service not set"
# The rooms creation is assymetric, only the node with the lower
# address is responsible to create the room. This fixes race conditions
# were the two nodes try to create a room with each other at the same
# time, leading to communications problems if the nodes choose a
# different room.
#
# This does not introduce a new attack vector, since not creating the
# room is the same as being unresponsible.
room_creator_address = my_place_or_yours(
our_address=self._raiden_service.address, partner_address=address
)
if self._raiden_service.address != room_creator_address:
self.log.debug(
"This node should not create the room",
partner_address=to_checksum_address(address),
)
return None
with self.room_creation_lock[address]:
candidates = self._client.search_user_directory(to_normalized_address(address))
self._displayname_cache.warm_users(candidates)
partner_users = [
user for user in candidates if validate_userid_signature(user) == address
]
partner_user_ids = [user.user_id for user in partner_users]
if not partner_users:
self.log.error(
"Partner doesn't have a user",
partner_address=to_checksum_address(address),
)
return None
room = self._client.create_room(
None, invitees=partner_user_ids, is_public=False
)
self.log.debug("Created private room", room=room, invitees=partner_users)
self.log.debug(
"Fetching room members",
room=room,
partner_address=to_checksum_address(address),
)
def partner_joined(fetched_members: List[User]) -> bool:
if fetched_members is None:
return False
return any(member.user_id in partner_user_ids for member in fetched_members)
members = self.retry_api_call(
room.get_joined_members, verify_response=partner_joined, force_resync=True
)
assert members is not None, "fetching members failed"
if not partner_joined(members):
self.log.debug(
"Peer has not joined from invite yet, should join eventually",
room=room,
partner_address=to_checksum_address(address),
retry_interval=RETRY_INTERVAL,
)
# Here, the list of valid user ids is composed of
# all known partner user ids along with our own.
# If our partner roams, the user will be invited to
# the room, resulting in multiple user ids for the partner.
# If we roam, a new user and room will be created and only
# the new user shall be in the room.
valid_user_ids = partner_user_ids + [self._client.user_id]
has_unexpected_user_ids = any(
member.user_id not in valid_user_ids for member in members
)
if has_unexpected_user_ids:
self._leave_unexpected_rooms(
[room], "Private room has unexpected participants"
)
return None
self._address_mgr.add_userids_for_address(
address, {user.user_id for user in partner_users}
)
self._set_room_id_for_address(address, room.room_id)
self.log.debug(
"Channel room", peer_address=to_checksum_address(address), room=room
)
return room
|
https://github.com/raiden-network/raiden/issues/5922
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 629, in _health_check_worker
File "site-packages/raiden/network/transport/matrix/transport.py", line 606, in immediate_health_check_for
File "site-packages/raiden/network/transport/matrix/transport.py", line 577, in whitelist
File "site-packages/raiden/network/transport/matrix/transport.py", line 1194, in _maybe_create_room_for_address
File "site-packages/raiden/network/transport/matrix/utils.py", line 876, in my_place_or_yours
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> failed with ValueError
2020-02-19 14:55:13.088967 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "Greenlet<fn:_health_check_worker>" at 0x7f1f5e02a050: <bound method MatrixTransport._health_check_worker of <MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>>> this=<MatrixTransport node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9 id:585de50e-f064-4a93-b3f4-df2bf40f8078>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix/transport.py", line 496, in _run
File "site-packages/raiden/network/transport/matrix/transport.py", line 718, in _broadcast_worker
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> failed with ValueError
2020-02-19 14:55:13.259801 [error ] Runnable subtask died! [raiden.utils.runnable] exc=ValueError('Addresses to compare must differ') running=True subtask=<Greenlet "MatrixTransport._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f9a3a70: _run> this=<RaidenService node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/raiden_service.py", line 375, in _run
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: Addresses to compare must differ
2020-02-19T14:55:13Z <Greenlet "RaidenService._run node:0x812f85143bdF6d28228FC580Ef649AA95ba6Fcd9" at 0x7f1f5f625b90: _run> failed with ValueError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-02-19T14-5542cjz658.txt
Addresses to compare must differ
ReturnCode.FATAL
|
ValueError
|
def register_token(
self, registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress
) -> Response:
if self.raiden_api.raiden.config.environment_type == Environment.PRODUCTION:
return api_error(
errors="Registering a new token is currently disabled in production mode",
status_code=HTTPStatus.NOT_IMPLEMENTED,
)
conflict_exceptions = (
AddressWithoutCode,
AlreadyRegisteredTokenAddress,
BrokenPreconditionError,
InvalidBinaryAddress,
InvalidToken,
InvalidTokenAddress,
RaidenRecoverableError,
)
log.debug(
"Registering token",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
)
try:
token_network_address = self.raiden_api.token_network_register(
registry_address=registry_address,
token_address=token_address,
channel_participant_deposit_limit=TokenAmount(UINT256_MAX),
token_network_deposit_limit=TokenAmount(UINT256_MAX),
)
except conflict_exceptions as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientEth as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
except MaxTokenNetworkNumberReached as e:
return api_error(errors=str(e), status_code=HTTPStatus.FORBIDDEN)
return api_response(
result=dict(token_network_address=to_checksum_address(token_network_address)),
status_code=HTTPStatus.CREATED,
)
|
def register_token(
self, registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress
) -> Response:
if self.raiden_api.raiden.config.environment_type == Environment.PRODUCTION:
return api_error(
errors="Registering a new token is currently disabled in production mode",
status_code=HTTPStatus.NOT_IMPLEMENTED,
)
conflict_exceptions = (
AddressWithoutCode,
AlreadyRegisteredTokenAddress,
InvalidBinaryAddress,
InvalidToken,
InvalidTokenAddress,
RaidenRecoverableError,
)
log.debug(
"Registering token",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
)
try:
token_network_address = self.raiden_api.token_network_register(
registry_address=registry_address,
token_address=token_address,
channel_participant_deposit_limit=TokenAmount(UINT256_MAX),
token_network_deposit_limit=TokenAmount(UINT256_MAX),
)
except conflict_exceptions as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientEth as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
except MaxTokenNetworkNumberReached as e:
return api_error(errors=str(e), status_code=HTTPStatus.FORBIDDEN)
return api_response(
result=dict(token_network_address=to_checksum_address(token_network_address)),
status_code=HTTPStatus.CREATED,
)
|
https://github.com/raiden-network/raiden/issues/5779
|
2020-01-29 18:24:57.713109 [info ] 127.0.0.1 - - [2020-01-29 19:24:57] "PUT /api/v1/tokens/0x95B2d84De40a0121061b105E6B54016a49621B44 HTTP/1.1" 500 161 3.064439 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "/home/raphael/code/brainbot/raiden/client/raiden/api/rest.py", line 439, in _run
self.wsgiserver.serve_forever()
File "/home/raphael/.virtualenvs/raiden/lib/python3.7/site-packages/gevent/baseserver.py", line 389, in serve_forever
self._stop_event.wait()
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
raiden.exceptions.BrokenPreconditionError: Number of token networks will exceed the max of 5
2020-01-29T18:24:57Z <Greenlet "APIServer|Greenlet-3" at 0x7f945954b4d0: _run> failed with BrokenPreconditionError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-01-29T18-2580ljbh9m.txt
Number of token networks will exceed the max of 5
ReturnCode.FATAL
|
raiden.exceptions.BrokenPreconditionError
|
def register_token(
self, registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress
) -> Response:
if self.raiden_api.raiden.config.environment_type == Environment.PRODUCTION:
return api_error(
errors="Registering a new token is currently disabled in production mode",
status_code=HTTPStatus.NOT_IMPLEMENTED,
)
conflict_exceptions = (
AddressWithoutCode,
AlreadyRegisteredTokenAddress,
InvalidBinaryAddress,
InvalidToken,
InvalidTokenAddress,
RaidenRecoverableError,
)
log.debug(
"Registering token",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
)
try:
token_network_address = self.raiden_api.token_network_register(
registry_address=registry_address,
token_address=token_address,
channel_participant_deposit_limit=TokenAmount(UINT256_MAX),
token_network_deposit_limit=TokenAmount(UINT256_MAX),
)
except conflict_exceptions as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientEth as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
except MaxTokenNetworkNumberReached as e:
return api_error(errors=str(e), status_code=HTTPStatus.FORBIDDEN)
return api_response(
result=dict(token_network_address=to_checksum_address(token_network_address)),
status_code=HTTPStatus.CREATED,
)
|
def register_token(
self, registry_address: TokenNetworkRegistryAddress, token_address: TokenAddress
) -> Response:
if self.raiden_api.raiden.config.environment_type == Environment.PRODUCTION:
return api_error(
errors="Registering a new token is currently disabled in production mode",
status_code=HTTPStatus.NOT_IMPLEMENTED,
)
conflict_exceptions = (
AddressWithoutCode,
AlreadyRegisteredTokenAddress,
InvalidBinaryAddress,
InvalidToken,
InvalidTokenAddress,
RaidenRecoverableError,
)
log.debug(
"Registering token",
node=to_checksum_address(self.raiden_api.address),
registry_address=to_checksum_address(registry_address),
token_address=to_checksum_address(token_address),
)
try:
token_network_address = self.raiden_api.token_network_register(
registry_address=registry_address,
token_address=token_address,
channel_participant_deposit_limit=TokenAmount(UINT256_MAX),
token_network_deposit_limit=TokenAmount(UINT256_MAX),
)
except conflict_exceptions as e:
return api_error(errors=str(e), status_code=HTTPStatus.CONFLICT)
except InsufficientEth as e:
return api_error(errors=str(e), status_code=HTTPStatus.PAYMENT_REQUIRED)
return api_response(
result=dict(token_network_address=to_checksum_address(token_network_address)),
status_code=HTTPStatus.CREATED,
)
|
https://github.com/raiden-network/raiden/issues/5779
|
2020-01-29 18:24:57.713109 [info ] 127.0.0.1 - - [2020-01-29 19:24:57] "PUT /api/v1/tokens/0x95B2d84De40a0121061b105E6B54016a49621B44 HTTP/1.1" 500 161 3.064439 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "/home/raphael/code/brainbot/raiden/client/raiden/api/rest.py", line 439, in _run
self.wsgiserver.serve_forever()
File "/home/raphael/.virtualenvs/raiden/lib/python3.7/site-packages/gevent/baseserver.py", line 389, in serve_forever
self._stop_event.wait()
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
raiden.exceptions.BrokenPreconditionError: Number of token networks will exceed the max of 5
2020-01-29T18:24:57Z <Greenlet "APIServer|Greenlet-3" at 0x7f945954b4d0: _run> failed with BrokenPreconditionError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-01-29T18-2580ljbh9m.txt
Number of token networks will exceed the max of 5
ReturnCode.FATAL
|
raiden.exceptions.BrokenPreconditionError
|
def add_token(
self,
token_address: TokenAddress,
channel_participant_deposit_limit: TokenAmount,
token_network_deposit_limit: TokenAmount,
given_block_identifier: BlockSpecification,
) -> TokenNetworkAddress:
"""
Register token of `token_address` with the token network.
The limits apply for version 0.13.0 and above of raiden-contracts,
since instantiation also takes the limits as constructor arguments.
"""
if given_block_identifier == "latest":
raise ValueError(
'Calling a proxy with "latest" is usually wrong because '
"the result of the precondition check is not precisely predictable."
)
if token_address == NULL_ADDRESS_BYTES:
raise InvalidTokenAddress("The call to register a token at 0x00..00 will fail.")
if token_network_deposit_limit <= 0:
raise InvalidTokenNetworkDepositLimit(
f"Token network deposit limit must be larger than zero, "
f"{token_network_deposit_limit} given."
)
if channel_participant_deposit_limit <= 0:
raise InvalidTokenNetworkDepositLimit(
f"Participant deposit limit must be larger than zero, "
f"{channel_participant_deposit_limit} given"
)
if channel_participant_deposit_limit > token_network_deposit_limit:
raise InvalidChannelParticipantDepositLimit(
f"Participant deposit limit must be smaller than the network "
f"deposit limit, {channel_participant_deposit_limit} is larger "
f"than {token_network_deposit_limit}."
)
token_proxy = self.proxy_manager.token(token_address, given_block_identifier)
try:
token_supply = token_proxy.total_supply(block_identifier=given_block_identifier)
already_registered = self.get_token_network(
token_address=token_address, block_identifier=given_block_identifier
)
deprecation_executor = self.get_deprecation_executor(
block_identifier=given_block_identifier
)
settlement_timeout_min = self.settlement_timeout_min(
block_identifier=given_block_identifier
)
settlement_timeout_max = self.settlement_timeout_max(
block_identifier=given_block_identifier
)
chain_id = self.get_chain_id(block_identifier=given_block_identifier)
secret_registry_address = self.get_secret_registry_address(
block_identifier=given_block_identifier
)
max_token_networks = self.get_max_token_networks(
block_identifier=given_block_identifier
)
token_networks_created = self.get_token_network_created(
block_identifier=given_block_identifier
)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be performed
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if token_networks_created >= max_token_networks:
raise MaxTokenNetworkNumberReached(
f"Number of token networks will exceed the maximum of {max_token_networks}"
)
if token_supply is None:
raise InvalidToken(
"Given token address does not follow the "
"ERC20 standard (missing `totalSupply()`)"
)
if already_registered:
raise BrokenPreconditionError(
"The token is already registered in the TokenNetworkRegistry."
)
if deprecation_executor == NULL_ADDRESS_BYTES:
raise BrokenPreconditionError(
"The deprecation executor property for the TokenNetworkRegistry is invalid."
)
if chain_id == 0:
raise BrokenPreconditionError(
"The chain ID property for the TokenNetworkRegistry is invalid."
)
if chain_id != self.rpc_client.chain_id:
raise BrokenPreconditionError(
f"The provided chain ID {chain_id} does not match the "
f"network Raiden is running on: {self.rpc_client.chain_id}."
)
if secret_registry_address == NULL_ADDRESS_BYTES:
raise BrokenPreconditionError(
"The secret registry address for the token network is invalid."
)
if settlement_timeout_min == 0:
raise BrokenPreconditionError(
"The minimum settlement timeout for the token network "
"should be larger than zero."
)
if settlement_timeout_max <= settlement_timeout_min:
raise BrokenPreconditionError(
"The maximum settlement timeout for the token network "
"should be larger than the minimum settlement timeout."
)
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"token_address": to_checksum_address(token_address),
"given_block_identifier": format_block_id(given_block_identifier),
"channel_participant_deposit_limit": channel_participant_deposit_limit,
"token_network_deposit_limit": token_network_deposit_limit,
}
with log_transaction(log, "add_token", log_details):
return self._add_token(
token_address=token_address,
channel_participant_deposit_limit=channel_participant_deposit_limit,
token_network_deposit_limit=token_network_deposit_limit,
log_details=log_details,
)
|
def add_token(
self,
token_address: TokenAddress,
channel_participant_deposit_limit: TokenAmount,
token_network_deposit_limit: TokenAmount,
given_block_identifier: BlockSpecification,
) -> TokenNetworkAddress:
"""
Register token of `token_address` with the token network.
The limits apply for version 0.13.0 and above of raiden-contracts,
since instantiation also takes the limits as constructor arguments.
"""
if given_block_identifier == "latest":
raise ValueError(
'Calling a proxy with "latest" is usually wrong because '
"the result of the precondition check is not precisely predictable."
)
if token_address == NULL_ADDRESS_BYTES:
raise InvalidTokenAddress("The call to register a token at 0x00..00 will fail.")
if token_network_deposit_limit <= 0:
raise InvalidTokenNetworkDepositLimit(
f"Token network deposit limit must be larger than zero, "
f"{token_network_deposit_limit} given."
)
if channel_participant_deposit_limit <= 0:
raise InvalidTokenNetworkDepositLimit(
f"Participant deposit limit must be larger than zero, "
f"{channel_participant_deposit_limit} given"
)
if channel_participant_deposit_limit > token_network_deposit_limit:
raise InvalidChannelParticipantDepositLimit(
f"Participant deposit limit must be smaller than the network "
f"deposit limit, {channel_participant_deposit_limit} is larger "
f"than {token_network_deposit_limit}."
)
token_proxy = self.proxy_manager.token(token_address, given_block_identifier)
try:
token_supply = token_proxy.total_supply(block_identifier=given_block_identifier)
already_registered = self.get_token_network(
token_address=token_address, block_identifier=given_block_identifier
)
deprecation_executor = self.get_deprecation_executor(
block_identifier=given_block_identifier
)
settlement_timeout_min = self.settlement_timeout_min(
block_identifier=given_block_identifier
)
settlement_timeout_max = self.settlement_timeout_max(
block_identifier=given_block_identifier
)
chain_id = self.get_chain_id(block_identifier=given_block_identifier)
secret_registry_address = self.get_secret_registry_address(
block_identifier=given_block_identifier
)
max_token_networks = self.get_max_token_networks(
block_identifier=given_block_identifier
)
token_networks_created = self.get_token_network_created(
block_identifier=given_block_identifier
)
except ValueError:
# If `given_block_identifier` has been pruned the checks cannot be performed
pass
except BadFunctionCallOutput:
raise_on_call_returned_empty(given_block_identifier)
else:
if token_networks_created >= max_token_networks:
raise BrokenPreconditionError(
f"Number of token networks will exceed the max of {max_token_networks}"
)
if token_supply is None:
raise InvalidToken(
"Given token address does not follow the "
"ERC20 standard (missing `totalSupply()`)"
)
if already_registered:
raise BrokenPreconditionError(
"The token is already registered in the TokenNetworkRegistry."
)
if deprecation_executor == NULL_ADDRESS_BYTES:
raise BrokenPreconditionError(
"The deprecation executor property for the TokenNetworkRegistry is invalid."
)
if chain_id == 0:
raise BrokenPreconditionError(
"The chain ID property for the TokenNetworkRegistry is invalid."
)
if chain_id != self.rpc_client.chain_id:
raise BrokenPreconditionError(
f"The provided chain ID {chain_id} does not match the "
f"network Raiden is running on: {self.rpc_client.chain_id}."
)
if secret_registry_address == NULL_ADDRESS_BYTES:
raise BrokenPreconditionError(
"The secret registry address for the token network is invalid."
)
if settlement_timeout_min == 0:
raise BrokenPreconditionError(
"The minimum settlement timeout for the token network "
"should be larger than zero."
)
if settlement_timeout_max <= settlement_timeout_min:
raise BrokenPreconditionError(
"The maximum settlement timeout for the token network "
"should be larger than the minimum settlement timeout."
)
log_details = {
"node": to_checksum_address(self.node_address),
"contract": to_checksum_address(self.address),
"token_address": to_checksum_address(token_address),
"given_block_identifier": format_block_id(given_block_identifier),
"channel_participant_deposit_limit": channel_participant_deposit_limit,
"token_network_deposit_limit": token_network_deposit_limit,
}
with log_transaction(log, "add_token", log_details):
return self._add_token(
token_address=token_address,
channel_participant_deposit_limit=channel_participant_deposit_limit,
token_network_deposit_limit=token_network_deposit_limit,
log_details=log_details,
)
|
https://github.com/raiden-network/raiden/issues/5779
|
2020-01-29 18:24:57.713109 [info ] 127.0.0.1 - - [2020-01-29 19:24:57] "PUT /api/v1/tokens/0x95B2d84De40a0121061b105E6B54016a49621B44 HTTP/1.1" 500 161 3.064439 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._greenlet.Greenlet.run
File "/home/raphael/code/brainbot/raiden/client/raiden/api/rest.py", line 439, in _run
self.wsgiserver.serve_forever()
File "/home/raphael/.virtualenvs/raiden/lib/python3.7/site-packages/gevent/baseserver.py", line 389, in serve_forever
self._stop_event.wait()
File "src/gevent/event.py", line 133, in gevent._event.Event.wait
File "src/gevent/_abstract_linkable.py", line 208, in gevent.__abstract_linkable.AbstractLinkable._wait
File "src/gevent/_abstract_linkable.py", line 178, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 196, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_abstract_linkable.py", line 182, in gevent.__abstract_linkable.AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 61, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 65, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
raiden.exceptions.BrokenPreconditionError: Number of token networks will exceed the max of 5
2020-01-29T18:24:57Z <Greenlet "APIServer|Greenlet-3" at 0x7f945954b4d0: _run> failed with BrokenPreconditionError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2020-01-29T18-2580ljbh9m.txt
Number of token networks will exceed the max of 5
ReturnCode.FATAL
|
raiden.exceptions.BrokenPreconditionError
|
def check_handler(self) -> None:
"""Check handler executed after the poll backend returns.
Note:
- For each of the watchers in the ready state there will be a callback,
which will do work related to the watcher (e.g. read from a socket).
This time must not be accounted for in the Idle timeout, therefore
this handler must have a high priority.
"""
curr_time = time.time()
# It is possible for the check_handler to be executed before the
# prepare_handler, this happens when the watchers are installed by a
# greenlet that was switched onto because of IO (IOW, Idle.enable is
# called while the event loop is executing watchers, after the `poll`)
if self.before_poll is not None:
self.measurements.append( # pylint: disable=no-member
IdleMeasurement(self.before_poll, curr_time)
)
while (
self.measurements
and curr_time - self.measurements_start > self.measurement_interval
):
self.measurements.pop() # pylint: disable=no-member
if curr_time - self.last_print >= self.measurement_interval:
self.log()
self.last_print = curr_time
|
def check_handler(self) -> None:
"""Check handler executed after the poll backend returns.
Note:
- For each of the watchers in the ready state there will be a callback,
which will do work related to the watcher (e.g. read from a socket).
This time must not be accounted for in the Idle timeout, therefore
this handler must have a high priority.
"""
curr_time = time.time()
# It is possible for the check_handler to be executed before the
# prepare_handler, this happens when the watchers are installed by a
# greenlet that was switched onto because of IO (IOW, Idle.enable is
# called while the event loop is executing watchers, after the `poll`)
if self.before_poll is not None:
self.measurements.append( # pylint: disable=no-member
IdleMeasurement(self.before_poll, curr_time)
)
while curr_time - self.measurements_start > self.measurement_interval:
self.measurements.pop() # pylint: disable=no-member
if curr_time - self.last_print >= self.measurement_interval:
self.log()
self.last_print = curr_time
|
https://github.com/raiden-network/raiden/issues/5738
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/raiden/utils/debugging.py", line 123, in check_handler
while curr_time - self.measurements_start > self.measurement_interval:
File "/usr/local/lib/python3.7/site-packages/raiden/utils/debugging.py", line 137, in measurements_start
return self.measurements[0].before_poll
IndexError: list index out of range
2020-01-23T05:20:56Z <check at 0x7ff9417d3978 active callback=<bound method Idle.check_handler of Idle(measurement_interval=10, before_poll=1579756855.8080018, last_print=1579756845.8452065, measurements=[])> args=()> failed with IndexError
|
IndexError
|
def log(self) -> None:
if not self.measurements:
log.debug(
"Idle",
context_switches=self.context_switches,
measurements=self.measurements,
)
return
log.debug(
"Idle",
start=self.measurements_start,
context_switches=self.context_switches,
idled=self.idled,
interval=self.running_interval,
idle_pct=self.idled_pct,
measurements=self.measurements,
)
|
def log(self) -> None:
log.debug(
"Idle",
start=self.measurements_start,
context_switches=self.context_switches,
idled=self.idled,
interval=self.running_interval,
idle_pct=self.idled_pct,
measurements=self.measurements,
)
|
https://github.com/raiden-network/raiden/issues/5738
|
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/raiden/utils/debugging.py", line 123, in check_handler
while curr_time - self.measurements_start > self.measurement_interval:
File "/usr/local/lib/python3.7/site-packages/raiden/utils/debugging.py", line 137, in measurements_start
return self.measurements[0].before_poll
IndexError: list index out of range
2020-01-23T05:20:56Z <check at 0x7ff9417d3978 active callback=<bound method Idle.check_handler of Idle(measurement_interval=10, before_poll=1579756855.8080018, last_print=1579756845.8452065, measurements=[])> args=()> failed with IndexError
|
IndexError
|
def _check_and_send(self) -> None:
"""Check and send all pending/queued messages that are not waiting on retry timeout
After composing the to-be-sent message, also message queue from messages that are not
present in the respective SendMessageEvent queue anymore
"""
if not self.transport.greenlet:
self.log.warning("Can't retry", reason="Transport not yet started")
return
if self.transport._stop_event.ready():
self.log.warning("Can't retry", reason="Transport stopped")
return
assert self._lock.locked(), (
"RetryQueue lock must be held while messages are being sent"
)
# On startup protocol messages must be sent only after the monitoring
# services are updated. For more details refer to the method
# `RaidenService._initialize_monitoring_services_queue`
if self.transport._prioritize_broadcast_messages:
self.transport._broadcast_queue.join()
self.log.debug("Retrying message(s)", receiver=to_checksum_address(self.receiver))
status = self.transport._address_mgr.get_address_reachability(self.receiver)
if status is not AddressReachability.REACHABLE:
# if partner is not reachable, return
self.log.debug(
"Partner not reachable. Skipping.",
partner=to_checksum_address(self.receiver),
status=status,
)
return
def message_is_in_queue(message_data: _RetryQueue._MessageData) -> bool:
if message_data.queue_identifier not in self.transport._queueids_to_queues:
# The Raiden queue for this queue identifier has been removed
return False
return any(
isinstance(message_data.message, RetrieableMessage)
and send_event.message_identifier == message_data.message.message_identifier
for send_event in self.transport._queueids_to_queues[
message_data.queue_identifier
]
)
message_texts: List[str] = list()
for message_data in self._message_queue[:]:
# Messages are sent on two conditions:
# - Non-retryable (e.g. Delivered)
# - Those are immediately remove from the local queue since they are only sent once
# - Retryable
# - Those are retried according to their retry generator as long as they haven't been
# removed from the Raiden queue
remove = False
if isinstance(message_data.message, (Delivered, Ping, Pong)):
# e.g. Delivered, send only once and then clear
# TODO: Is this correct? Will a missed Delivered be 'fixed' by the
# later `Processed` message?
remove = True
message_texts.append(message_data.text)
elif not message_is_in_queue(message_data):
remove = True
self.log.debug(
"Stopping message send retry",
queue=message_data.queue_identifier,
message=message_data.message,
reason="Message was removed from queue or queue was removed",
)
else:
# The message is still eligible for retry, consult the expiration generator if
# it should be retried now
if next(message_data.expiration_generator):
message_texts.append(message_data.text)
if remove:
self._message_queue.remove(message_data)
if message_texts:
self.log.debug(
"Send", receiver=to_checksum_address(self.receiver), messages=message_texts
)
for message_batch in make_message_batches(message_texts):
self.transport._send_raw(self.receiver, message_batch)
|
def _check_and_send(self) -> None:
"""Check and send all pending/queued messages that are not waiting on retry timeout
After composing the to-be-sent message, also message queue from messages that are not
present in the respective SendMessageEvent queue anymore
"""
if not self.transport.greenlet:
self.log.warning("Can't retry", reason="Transport not yet started")
return
if self.transport._stop_event.ready():
self.log.warning("Can't retry", reason="Transport stopped")
return
assert self._lock.locked(), (
"RetryQueue lock must be held while messages are being sent"
)
# On startup protocol messages must be sent only after the monitoring
# services are updated. For more details refer to the method
# `RaidenService._initialize_monitoring_services_queue`
if self.transport._prioritize_broadcast_messages:
self.transport._broadcast_queue.join()
self.log.debug("Retrying message(s)", receiver=to_checksum_address(self.receiver))
status = self.transport._address_mgr.get_address_reachability(self.receiver)
if status is not AddressReachability.REACHABLE:
# if partner is not reachable, return
self.log.debug(
"Partner not reachable. Skipping.",
partner=to_checksum_address(self.receiver),
status=status,
)
return
def message_is_in_queue(message_data: _RetryQueue._MessageData) -> bool:
if message_data.queue_identifier not in self.transport._queueids_to_queues:
# The Raiden queue for this queue identifier has been removed
return False
return any(
isinstance(message_data.message, RetrieableMessage)
and send_event.message_identifier == message_data.message.message_identifier
for send_event in self.transport._queueids_to_queues[
message_data.queue_identifier
]
)
message_texts: List[str] = list()
for message_data in self._message_queue[:]:
# Messages are sent on two conditions:
# - Non-retryable (e.g. Delivered)
# - Those are immediately remove from the local queue since they are only sent once
# - Retryable
# - Those are retried according to their retry generator as long as they haven't been
# removed from the Raiden queue
remove = False
if isinstance(message_data.message, (Delivered, Ping, Pong)):
# e.g. Delivered, send only once and then clear
# TODO: Is this correct? Will a missed Delivered be 'fixed' by the
# later `Processed` message?
remove = True
message_texts.append(message_data.text)
elif not message_is_in_queue(message_data):
remove = True
self.log.debug(
"Stopping message send retry",
queue=message_data.queue_identifier,
message=message_data.message,
reason="Message was removed from queue or queue was removed",
)
else:
# The message is still eligible for retry, consult the expiration generator if
# it should be retried now
if next(message_data.expiration_generator):
message_texts.append(message_data.text)
if remove:
self._message_queue.remove(message_data)
if message_texts:
self.log.debug(
"Send", receiver=to_checksum_address(self.receiver), messages=message_texts
)
self.transport._send_raw(self.receiver, "\n".join(message_texts))
|
https://github.com/raiden-network/raiden/issues/3512
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix.py", line 263, in _run
File "site-packages/raiden/network/transport/matrix.py", line 254, in _check_and_send
File "site-packages/raiden/network/transport/matrix.py", line 1023, in _send_raw
File "site-packages/matrix_client/room.py", line 105, in send_text
File "site-packages/matrix_client/api.py", line 336, in send_message
File "site-packages/matrix_client/api.py", line 256, in send_message_event
File "site-packages/raiden_libs/network/matrix/client.py", line 75, in _send
File "site-packages/matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 413: {"errcode":"M_TOO_LARGE","error":"event too large"}
2019-02-19T12:10:29Z <Greenlet "RetryQueue:b2c94bba" at 0x7f077d3a9158: <bound method _RetryQueue._run of <_RetryQueue for 0xb2c94bbaf307c6dfbf70cc2537385553385a345b>>> failed with MatrixRequestError
|
matrix_client.errors.MatrixRequestError
|
def _broadcast_worker(self) -> None:
def _broadcast(room_name: str, serialized_message: str) -> None:
if not any(suffix in room_name for suffix in self._config.broadcast_rooms):
raise RuntimeError(
f'Broadcast called on non-public room "{room_name}". '
f"Known public rooms: {self._config.broadcast_rooms}."
)
room_name = make_room_alias(self.chain_id, room_name)
if room_name not in self._broadcast_rooms:
room = join_broadcast_room(
self._client, f"#{room_name}:{self._server_name}"
)
self._broadcast_rooms[room_name] = room
existing_room = self._broadcast_rooms.get(room_name)
assert existing_room, f"Unknown broadcast room: {room_name!r}"
self.log.debug(
"Broadcast",
room_name=room_name,
room=existing_room,
data=serialized_message.replace("\n", "\\n"),
)
existing_room.send_text(serialized_message)
while not self._stop_event.ready():
self._broadcast_event.clear()
messages: Dict[str, List[Message]] = defaultdict(list)
while self._broadcast_queue.qsize() > 0:
room_name, message = self._broadcast_queue.get()
messages[room_name].append(message)
for room_name, messages_for_room in messages.items():
serialized_messages = (
MessageSerializer.serialize(message) for message in messages_for_room
)
for message_batch in make_message_batches(serialized_messages):
_broadcast(room_name, message_batch)
for _ in messages_for_room:
# Every message needs to be marked as done.
# Unfortunately there's no way to do that in one call :(
# https://github.com/gevent/gevent/issues/1436
self._broadcast_queue.task_done()
# Stop prioritizing broadcast messages after initial queue has been emptied
self._prioritize_broadcast_messages = False
self._broadcast_event.wait(self._config.retry_interval)
|
def _broadcast_worker(self) -> None:
def _broadcast(room_name: str, serialized_message: str) -> None:
if not any(suffix in room_name for suffix in self._config.broadcast_rooms):
raise RuntimeError(
f'Broadcast called on non-public room "{room_name}". '
f"Known public rooms: {self._config.broadcast_rooms}."
)
room_name = make_room_alias(self.chain_id, room_name)
if room_name not in self._broadcast_rooms:
room = join_broadcast_room(
self._client, f"#{room_name}:{self._server_name}"
)
self._broadcast_rooms[room_name] = room
existing_room = self._broadcast_rooms.get(room_name)
assert existing_room, f"Unknown broadcast room: {room_name!r}"
self.log.debug(
"Broadcast",
room_name=room_name,
room=existing_room,
data=serialized_message.replace("\n", "\\n"),
)
existing_room.send_text(serialized_message)
while not self._stop_event.ready():
self._broadcast_event.clear()
messages: Dict[str, List[Message]] = defaultdict(list)
while self._broadcast_queue.qsize() > 0:
room_name, message = self._broadcast_queue.get()
messages[room_name].append(message)
for room_name, messages_for_room in messages.items():
message_text = "\n".join(
MessageSerializer.serialize(message) for message in messages_for_room
)
_broadcast(room_name, message_text)
for _ in messages_for_room:
# Every message needs to be marked as done.
# Unfortunately there's no way to do that in one call :(
# https://github.com/gevent/gevent/issues/1436
self._broadcast_queue.task_done()
# Stop prioritizing broadcast messages after initial queue has been emptied
self._prioritize_broadcast_messages = False
self._broadcast_event.wait(self._config.retry_interval)
|
https://github.com/raiden-network/raiden/issues/3512
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix.py", line 263, in _run
File "site-packages/raiden/network/transport/matrix.py", line 254, in _check_and_send
File "site-packages/raiden/network/transport/matrix.py", line 1023, in _send_raw
File "site-packages/matrix_client/room.py", line 105, in send_text
File "site-packages/matrix_client/api.py", line 336, in send_message
File "site-packages/matrix_client/api.py", line 256, in send_message_event
File "site-packages/raiden_libs/network/matrix/client.py", line 75, in _send
File "site-packages/matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 413: {"errcode":"M_TOO_LARGE","error":"event too large"}
2019-02-19T12:10:29Z <Greenlet "RetryQueue:b2c94bba" at 0x7f077d3a9158: <bound method _RetryQueue._run of <_RetryQueue for 0xb2c94bbaf307c6dfbf70cc2537385553385a345b>>> failed with MatrixRequestError
|
matrix_client.errors.MatrixRequestError
|
def _check_and_send(self) -> None:
"""Check and send all pending/queued messages that are not waiting on retry timeout
After composing the to-be-sent message, also message queue from messages that are not
present in the respective SendMessageEvent queue anymore
"""
if not self.transport.greenlet:
self.log.warning("Can't retry", reason="Transport not yet started")
return
if self.transport._stop_event.ready():
self.log.warning("Can't retry", reason="Transport stopped")
return
# On startup protocol messages must be sent only after the monitoring
# services are updated. For more details refer to the method
# `RaidenService._initialize_monitoring_services_queue`
if self.transport._prioritize_broadcast_messages:
self.transport._broadcast_queue.join()
self.log.debug("Retrying message", receiver=to_checksum_address(self.receiver))
status = self.transport._address_mgr.get_address_reachability(self.receiver)
if status is not AddressReachability.REACHABLE:
# if partner is not reachable, return
self.log.debug(
"Partner not reachable. Skipping.",
partner=to_checksum_address(self.receiver),
status=status,
)
return
message_texts = [
data.text
for data in self._message_queue
# if expired_gen generator yields False, message was sent recently, so skip it
if next(data.expiration_generator)
]
def message_is_in_queue(data: _RetryQueue._MessageData) -> bool:
return any(
isinstance(data.message, RetrieableMessage)
and send_event.message_identifier == data.message.message_identifier
for send_event in self.transport._queueids_to_queues[data.queue_identifier]
)
# clean after composing, so any queued messages (e.g. Delivered) are sent at least once
for msg_data in self._message_queue[:]:
remove = False
if isinstance(msg_data.message, (Delivered, Ping, Pong)):
# e.g. Delivered, send only once and then clear
# TODO: Is this correct? Will a missed Delivered be 'fixed' by the
# later `Processed` message?
remove = True
elif msg_data.queue_identifier not in self.transport._queueids_to_queues:
remove = True
self.log.debug(
"Stopping message send retry",
queue=msg_data.queue_identifier,
message=msg_data.message,
reason="Raiden queue is gone",
)
elif not message_is_in_queue(msg_data):
remove = True
self.log.debug(
"Stopping message send retry",
queue=msg_data.queue_identifier,
message=msg_data.message,
reason="Message was removed from queue",
)
if remove:
self._message_queue.remove(msg_data)
if message_texts:
self.log.debug(
"Send", receiver=to_checksum_address(self.receiver), messages=message_texts
)
for message_batch in make_message_batches(message_texts):
self.transport._send_raw(self.receiver, message_batch)
|
def _check_and_send(self) -> None:
"""Check and send all pending/queued messages that are not waiting on retry timeout
After composing the to-be-sent message, also message queue from messages that are not
present in the respective SendMessageEvent queue anymore
"""
if not self.transport.greenlet:
self.log.warning("Can't retry", reason="Transport not yet started")
return
if self.transport._stop_event.ready():
self.log.warning("Can't retry", reason="Transport stopped")
return
# On startup protocol messages must be sent only after the monitoring
# services are updated. For more details refer to the method
# `RaidenService._initialize_monitoring_services_queue`
if self.transport._prioritize_broadcast_messages:
self.transport._broadcast_queue.join()
self.log.debug("Retrying message", receiver=to_checksum_address(self.receiver))
status = self.transport._address_mgr.get_address_reachability(self.receiver)
if status is not AddressReachability.REACHABLE:
# if partner is not reachable, return
self.log.debug(
"Partner not reachable. Skipping.",
partner=to_checksum_address(self.receiver),
status=status,
)
return
message_texts = [
data.text
for data in self._message_queue
# if expired_gen generator yields False, message was sent recently, so skip it
if next(data.expiration_generator)
]
def message_is_in_queue(data: _RetryQueue._MessageData) -> bool:
return any(
isinstance(data.message, RetrieableMessage)
and send_event.message_identifier == data.message.message_identifier
for send_event in self.transport._queueids_to_queues[data.queue_identifier]
)
# clean after composing, so any queued messages (e.g. Delivered) are sent at least once
for msg_data in self._message_queue[:]:
remove = False
if isinstance(msg_data.message, (Delivered, Ping, Pong)):
# e.g. Delivered, send only once and then clear
# TODO: Is this correct? Will a missed Delivered be 'fixed' by the
# later `Processed` message?
remove = True
elif msg_data.queue_identifier not in self.transport._queueids_to_queues:
remove = True
self.log.debug(
"Stopping message send retry",
queue=msg_data.queue_identifier,
message=msg_data.message,
reason="Raiden queue is gone",
)
elif not message_is_in_queue(msg_data):
remove = True
self.log.debug(
"Stopping message send retry",
queue=msg_data.queue_identifier,
message=msg_data.message,
reason="Message was removed from queue",
)
if remove:
self._message_queue.remove(msg_data)
if message_texts:
self.log.debug(
"Send", receiver=to_checksum_address(self.receiver), messages=message_texts
)
self.transport._send_raw(self.receiver, "\n".join(message_texts))
|
https://github.com/raiden-network/raiden/issues/3512
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "site-packages/raiden/network/transport/matrix.py", line 263, in _run
File "site-packages/raiden/network/transport/matrix.py", line 254, in _check_and_send
File "site-packages/raiden/network/transport/matrix.py", line 1023, in _send_raw
File "site-packages/matrix_client/room.py", line 105, in send_text
File "site-packages/matrix_client/api.py", line 336, in send_message
File "site-packages/matrix_client/api.py", line 256, in send_message_event
File "site-packages/raiden_libs/network/matrix/client.py", line 75, in _send
File "site-packages/matrix_client/api.py", line 691, in _send
matrix_client.errors.MatrixRequestError: 413: {"errcode":"M_TOO_LARGE","error":"event too large"}
2019-02-19T12:10:29Z <Greenlet "RetryQueue:b2c94bba" at 0x7f077d3a9158: <bound method _RetryQueue._run of <_RetryQueue for 0xb2c94bbaf307c6dfbf70cc2537385553385a345b>>> failed with MatrixRequestError
|
matrix_client.errors.MatrixRequestError
|
def start( # type: ignore
self,
raiden_service: "RaidenService",
whitelist: List[Address],
prev_auth_data: Optional[str],
) -> None:
if not self._stop_event.ready():
raise RuntimeError(f"{self!r} already started")
self.log.debug("Matrix starting")
self._stop_event.clear()
self._starting = True
self._raiden_service = raiden_service
self._address_mgr.start()
try:
login(
client=self._client,
signer=self._raiden_service.signer,
prev_auth_data=prev_auth_data,
)
except ValueError:
# `ValueError` may be raised if `get_user` provides invalid data to
# the `User` constructor. This is either a bug in the login, that
# tries to get the user after a failed login, or a bug in the
# Matrix SDK.
raise RaidenUnrecoverableError("Matrix SDK failed to properly set the userid")
except MatrixHttpLibError:
raise RaidenUnrecoverableError("The Matrix homeserver seems to be unavailable.")
self.log = log.bind(
current_user=self._user_id,
node=to_checksum_address(self._raiden_service.address),
transport_uuid=str(self._uuid),
)
self._initialize_first_sync()
self._initialize_broadcast_rooms()
self._initialize_room_inventory()
def on_success(greenlet: gevent.Greenlet) -> None:
if greenlet in self.greenlets:
self.greenlets.remove(greenlet)
self._client.start_listener_thread()
assert isinstance(self._client.sync_thread, gevent.Greenlet)
self._client.sync_thread.link_exception(self.on_error)
self._client.sync_thread.link_value(on_success)
self.greenlets = [self._client.sync_thread]
self._client.set_presence_state(UserPresence.ONLINE.value)
# (re)start any _RetryQueue which was initialized before start
for retrier in self._address_to_retrier.values():
if not retrier:
self.log.debug("Starting retrier", retrier=retrier)
retrier.start()
super().start() # start greenlet
self._starting = False
self._started = True
pool = Pool(size=10)
greenlets = set(
pool.apply_async(self.whitelist, [address]) for address in whitelist
)
gevent.joinall(greenlets, raise_error=True)
self.log.debug("Matrix started", config=self._config)
# Handle any delayed invites in the future
self._schedule_new_greenlet(self._process_queued_invites, in_seconds_from_now=1)
|
def start( # type: ignore
self,
raiden_service: "RaidenService",
whitelist: List[Address],
prev_auth_data: Optional[str],
) -> None:
if not self._stop_event.ready():
raise RuntimeError(f"{self!r} already started")
self.log.debug("Matrix starting")
self._stop_event.clear()
self._starting = True
self._raiden_service = raiden_service
self._address_mgr.start()
try:
login(
client=self._client,
signer=self._raiden_service.signer,
prev_auth_data=prev_auth_data,
)
except ValueError:
# `ValueError` may be raised if `get_user` provides invalid data to
# the `User` constructor. This is either a bug in the login, that
# tries to get the user after a failed login, or a bug in the
# Matrix SDK.
raise RaidenUnrecoverableError("Matrix SDK failed to properly set the userid")
except MatrixHttpLibError:
raise RaidenUnrecoverableError("The Matrix homeserver seems to be unavailable.")
self.log = log.bind(
current_user=self._user_id,
node=to_checksum_address(self._raiden_service.address),
transport_uuid=str(self._uuid),
)
self._initialize_first_sync()
self._initialize_broadcast_rooms()
self._initialize_inventory_rooms()
def on_success(greenlet: gevent.Greenlet) -> None:
if greenlet in self.greenlets:
self.greenlets.remove(greenlet)
self._client.start_listener_thread()
assert isinstance(self._client.sync_thread, gevent.Greenlet)
self._client.sync_thread.link_exception(self.on_error)
self._client.sync_thread.link_value(on_success)
self.greenlets = [self._client.sync_thread]
self._client.set_presence_state(UserPresence.ONLINE.value)
# (re)start any _RetryQueue which was initialized before start
for retrier in self._address_to_retrier.values():
if not retrier:
self.log.debug("Starting retrier", retrier=retrier)
retrier.start()
super().start() # start greenlet
self._starting = False
self._started = True
pool = Pool(size=10)
greenlets = set(
pool.apply_async(self.whitelist, [address]) for address in whitelist
)
gevent.joinall(greenlets, raise_error=True)
self.log.debug("Matrix started", config=self._config)
# Handle any delayed invites in the future
self._schedule_new_greenlet(self._process_queued_invites, in_seconds_from_now=1)
|
https://github.com/raiden-network/raiden/issues/5276
|
{
"task": "<TransferTask: {'from': 0, 'to': 1, 'amount': 500000000000000000, 'expected_http_status': 200}>",
"event": "Task errored",
"logger": "scenario_player.tasks.base",
"level": "error",
"timestamp": "2019-11-12 01:58:24.322286",
"exception": "Traceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nhttp.client.RemoteDisconnected: Remote end closed connection without response\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n timeout=timeout\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n _stacktrace=sys.exc_info()[2])\n File \"/usr/local/lib/python3.7/site-packages/urllib3/util/retry.py\", line 368, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/packages/six.py\", line 685, in reraise\n raise value.with_traceback(tb)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nurllib3.exceptions.ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 51, in _run\n method=self._method, url=url, json=self._request_params, timeout=self._timeout\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n resp = self.send(prep, **send_kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n r = adapter.send(request, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 498, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/base.py\", line 74, in __call__\n return self._run(*args, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 56, in _run\n raise RESTAPIError(f\"Error performing REST-API call: {self._name}\") from ex\nscenario_player.exceptions.legacy.RESTAPIError: Error performing REST-API call: transfer"
}
|
nurllib3.exceptions.ProtocolError
|
def _handle_invite(self, room_id: _RoomID, state: dict) -> None:
"""Handle an invite request.
Always join a room, even if the partner is not whitelisted. That was
previously done to prevent a malicious node from inviting and spamming
the user. However, there are cases where nodes trying to create rooms
for a channel might race and an invite would be received by one node
which did not yet whitelist the inviting node, as a result the invite
would wrongfully be ignored. This change removes the whitelist check.
To prevent spam, we make sure we ignore presence updates and messages
from non-whitelisted nodes.
"""
if self._stop_event.ready():
return
if self._starting:
self.log.debug("Queueing invite", room_id=room_id)
self._invite_queue.append((room_id, state))
return
invite_events = [
event
for event in state["events"]
if event["type"] == "m.room.member"
and event["content"].get("membership") == "invite"
and event["state_key"] == self._user_id
]
if not invite_events or not invite_events[0]:
self.log.debug("Invite: no invite event found", room_id=room_id)
return # there should always be one and only one invite membership event for us
self.log.debug("Got invite", room_id=room_id)
sender = invite_events[0]["sender"]
user = self._client.get_user(sender)
self._displayname_cache.warm_users([user])
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
"Got invited to a room by invalid signed user - ignoring",
room_id=room_id,
user=user,
)
return
sender_join_events = [
event
for event in state["events"]
if event["type"] == "m.room.member"
and event["content"].get("membership") == "join"
and event["state_key"] == sender
]
if not sender_join_events or not sender_join_events[0]:
self.log.debug("Invite: no sender join event", room_id=room_id)
return # there should always be one and only one join membership event for the sender
join_rules_events = [
event for event in state["events"] if event["type"] == "m.room.join_rules"
]
# room privacy as seen from the event
private_room: bool = False
if join_rules_events:
join_rules_event = join_rules_events[0]
private_room = join_rules_event["content"].get("join_rule") == "invite"
# we join room and _set_room_id_for_address despite room privacy and requirements,
# _get_room_ids_for_address will take care of returning only matching rooms and
# _leave_unused_rooms will clear it in the future, if and when needed
room: Optional[Room] = None
last_ex: Optional[Exception] = None
retry_interval = 0.1
for _ in range(JOIN_RETRIES):
try:
room = self._client.join_room(room_id)
except MatrixRequestError as e:
last_ex = e
if self._stop_event.wait(retry_interval):
break
retry_interval = retry_interval * 2
else:
break
else:
assert last_ex is not None
raise last_ex # re-raise if couldn't succeed in retries
assert room is not None, f"joining room {room} failed"
if self._is_broadcast_room(room):
# This shouldn't happen with well behaving nodes but we need to defend against it
# Since we already are a member of all broadcast rooms, the `join()` above is in
# effect a no-op
self.log.warning("Got invite to broadcast room, ignoring", inviting_user=user)
return
if not room.listeners:
room.add_listener(self._handle_message, "m.room.message")
# room state may not populated yet, so we populate 'invite_only' from event
room.invite_only = private_room
self._set_room_id_for_address(address=peer_address, room_id=room_id)
self.log.debug(
"Joined from invite",
room_id=room_id,
aliases=room.aliases,
inviting_address=to_checksum_address(peer_address),
)
|
def _handle_invite(self, room_id: _RoomID, state: dict) -> None:
"""Handle an invite request.
Always join a room, even if the partner is not whitelisted. That was
previously done to prevent a malicious node from inviting and spamming
the user. However, there are cases where nodes trying to create rooms
for a channel might race and an invite would be received by one node
which did not yet whitelist the inviting node, as a result the invite
would wrongfully be ignored. This change removes the whitelist check.
To prevent spam, we make sure we ignore presence updates and messages
from non-whitelisted nodes.
"""
if self._stop_event.ready():
return
if self._starting:
self.log.debug("Queueing invite", room_id=room_id)
self._invite_queue.append((room_id, state))
return
invite_events = [
event
for event in state["events"]
if event["type"] == "m.room.member"
and event["content"].get("membership") == "invite"
and event["state_key"] == self._user_id
]
if not invite_events or not invite_events[0]:
self.log.debug("Invite: no invite event found", room_id=room_id)
return # there should always be one and only one invite membership event for us
self.log.debug("Got invite", room_id=room_id)
sender = invite_events[0]["sender"]
user = self._client.get_user(sender)
self._displayname_cache.warm_users([user])
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
"Got invited to a room by invalid signed user - ignoring",
room_id=room_id,
user=user,
)
return
sender_join_events = [
event
for event in state["events"]
if event["type"] == "m.room.member"
and event["content"].get("membership") == "join"
and event["state_key"] == sender
]
if not sender_join_events or not sender_join_events[0]:
self.log.debug("Invite: no sender join event", room_id=room_id)
return # there should always be one and only one join membership event for the sender
join_rules_events = [
event for event in state["events"] if event["type"] == "m.room.join_rules"
]
# room privacy as seen from the event
private_room: bool = False
if join_rules_events:
join_rules_event = join_rules_events[0]
private_room = join_rules_event["content"].get("join_rule") == "invite"
# we join room and _set_room_id_for_address despite room privacy and requirements,
# _get_room_ids_for_address will take care of returning only matching rooms and
# _leave_unused_rooms will clear it in the future, if and when needed
room: Optional[Room] = None
last_ex: Optional[Exception] = None
retry_interval = 0.1
for _ in range(JOIN_RETRIES):
try:
room = self._client.join_room(room_id)
except MatrixRequestError as e:
last_ex = e
if self._stop_event.wait(retry_interval):
break
retry_interval = retry_interval * 2
else:
break
else:
assert last_ex is not None
raise last_ex # re-raise if couldn't succeed in retries
assert room is not None, f"joining room {room} failed"
if not room.listeners:
room.add_listener(self._handle_message, "m.room.message")
# room state may not populated yet, so we populate 'invite_only' from event
room.invite_only = private_room
self._set_room_id_for_address(address=peer_address, room_id=room_id)
self.log.debug(
"Joined from invite",
room_id=room_id,
aliases=room.aliases,
inviting_address=to_checksum_address(peer_address),
)
|
https://github.com/raiden-network/raiden/issues/5276
|
{
"task": "<TransferTask: {'from': 0, 'to': 1, 'amount': 500000000000000000, 'expected_http_status': 200}>",
"event": "Task errored",
"logger": "scenario_player.tasks.base",
"level": "error",
"timestamp": "2019-11-12 01:58:24.322286",
"exception": "Traceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nhttp.client.RemoteDisconnected: Remote end closed connection without response\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n timeout=timeout\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n _stacktrace=sys.exc_info()[2])\n File \"/usr/local/lib/python3.7/site-packages/urllib3/util/retry.py\", line 368, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/packages/six.py\", line 685, in reraise\n raise value.with_traceback(tb)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nurllib3.exceptions.ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 51, in _run\n method=self._method, url=url, json=self._request_params, timeout=self._timeout\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n resp = self.send(prep, **send_kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n r = adapter.send(request, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 498, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/base.py\", line 74, in __call__\n return self._run(*args, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 56, in _run\n raise RESTAPIError(f\"Error performing REST-API call: {self._name}\") from ex\nscenario_player.exceptions.legacy.RESTAPIError: Error performing REST-API call: transfer"
}
|
nurllib3.exceptions.ProtocolError
|
def _handle_message(self, room: Room, event: Dict[str, Any]) -> bool:
"""Handle text messages sent to listening rooms"""
if self._stop_event.ready():
return False
is_valid_type = (
event["type"] == "m.room.message" and event["content"]["msgtype"] == "m.text"
)
if not is_valid_type:
return False
sender_id = event["sender"]
if sender_id == self._user_id:
# Ignore our own messages
return False
user = self._client.get_user(sender_id)
self._displayname_cache.warm_users([user])
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
"Ignoring message from user with an invalid display name signature",
peer_user=user.user_id,
room=room,
)
return False
if self._is_broadcast_room(room):
# This must not happen. Nodes must not listen on broadcast rooms.
raise RuntimeError(
f"Received message in broadcast room {room.aliases[0]}. Sending user: {user}"
)
if not self._address_mgr.is_address_known(peer_address):
self.log.debug(
"Ignoring message from non-whitelisted peer",
sender=user,
sender_address=to_checksum_address(peer_address),
room=room,
)
return False
# rooms we created and invited user, or were invited specifically by them
room_ids = self._get_room_ids_for_address(peer_address)
if room.room_id not in room_ids:
self.log.debug(
"Ignoring invalid message",
peer_user=user.user_id,
peer_address=to_checksum_address(peer_address),
room=room,
expected_room_ids=room_ids,
reason="unknown room for user",
)
return False
messages = validate_and_parse_message(event["content"]["body"], peer_address)
if not messages:
return False
self.log.debug(
"Incoming messages",
messages=messages,
sender=to_checksum_address(peer_address),
sender_user=user,
room=room,
)
for message in messages:
if not isinstance(message, (SignedRetrieableMessage, SignedMessage)):
self.log.warning(
"Received invalid message",
message=redact_secret(DictSerializer.serialize(message)),
)
if isinstance(message, Delivered):
self._receive_delivered(message)
elif isinstance(message, Processed):
self._receive_message(message)
else:
assert isinstance(message, SignedRetrieableMessage)
self._receive_message(message)
return True
|
def _handle_message(self, room: Room, event: Dict[str, Any]) -> bool:
"""Handle text messages sent to listening rooms"""
if self._stop_event.ready():
return False
is_valid_type = (
event["type"] == "m.room.message" and event["content"]["msgtype"] == "m.text"
)
if not is_valid_type:
return False
sender_id = event["sender"]
if sender_id == self._user_id:
# Ignore our own messages
return False
user = self._client.get_user(sender_id)
self._displayname_cache.warm_users([user])
peer_address = validate_userid_signature(user)
if not peer_address:
self.log.debug(
"Ignoring message from user with an invalid display name signature",
peer_user=user.user_id,
room=room,
)
return False
if not self._address_mgr.is_address_known(peer_address):
self.log.debug(
"Ignoring message from non-whitelisted peer",
sender=user,
sender_address=to_checksum_address(peer_address),
room=room,
)
return False
# rooms we created and invited user, or were invited specifically by them
room_ids = self._get_room_ids_for_address(peer_address)
if room.room_id not in room_ids:
self.log.debug(
"Ignoring invalid message",
peer_user=user.user_id,
peer_address=to_checksum_address(peer_address),
room=room,
expected_room_ids=room_ids,
reason="unknown room for user",
)
return False
# TODO: With the condition in the TODO above restored this one won't have an effect, check
# if it can be removed after the above is solved
if not room_ids or room.room_id != room_ids[0]:
if self._is_broadcast_room(room):
# This must not happen. Nodes must not listen on broadcast rooms.
raise RuntimeError(f"Received message in broadcast room {room.aliases}.")
self.log.debug(
"Received message triggered new comms room for peer",
peer_user=user.user_id,
peer_address=to_checksum_address(peer_address),
known_user_rooms=room_ids,
room=room,
)
self._set_room_id_for_address(peer_address, room.room_id)
messages = validate_and_parse_message(event["content"]["body"], peer_address)
if not messages:
return False
self.log.debug(
"Incoming messages",
messages=messages,
sender=to_checksum_address(peer_address),
sender_user=user,
room=room,
)
for message in messages:
if not isinstance(message, (SignedRetrieableMessage, SignedMessage)):
self.log.warning(
"Received invalid message",
message=redact_secret(DictSerializer.serialize(message)),
)
if isinstance(message, Delivered):
self._receive_delivered(message)
elif isinstance(message, Processed):
self._receive_message(message)
else:
assert isinstance(message, SignedRetrieableMessage)
self._receive_message(message)
return True
|
https://github.com/raiden-network/raiden/issues/5276
|
{
"task": "<TransferTask: {'from': 0, 'to': 1, 'amount': 500000000000000000, 'expected_http_status': 200}>",
"event": "Task errored",
"logger": "scenario_player.tasks.base",
"level": "error",
"timestamp": "2019-11-12 01:58:24.322286",
"exception": "Traceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nhttp.client.RemoteDisconnected: Remote end closed connection without response\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n timeout=timeout\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n _stacktrace=sys.exc_info()[2])\n File \"/usr/local/lib/python3.7/site-packages/urllib3/util/retry.py\", line 368, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/packages/six.py\", line 685, in reraise\n raise value.with_traceback(tb)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nurllib3.exceptions.ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 51, in _run\n method=self._method, url=url, json=self._request_params, timeout=self._timeout\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n resp = self.send(prep, **send_kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n r = adapter.send(request, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 498, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/base.py\", line 74, in __call__\n return self._run(*args, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 56, in _run\n raise RESTAPIError(f\"Error performing REST-API call: {self._name}\") from ex\nscenario_player.exceptions.legacy.RESTAPIError: Error performing REST-API call: transfer"
}
|
nurllib3.exceptions.ProtocolError
|
def _is_broadcast_room(self, room: Room) -> bool:
room_aliases = set(room.aliases)
if room.canonical_alias:
room_aliases.add(room.canonical_alias)
return any(
suffix in room_alias
for suffix in self._config.broadcast_rooms
for room_alias in room.aliases
)
|
def _is_broadcast_room(self, room: Room) -> bool:
return any(
suffix in room_alias
for suffix in self._config.broadcast_rooms
for room_alias in room.aliases
)
|
https://github.com/raiden-network/raiden/issues/5276
|
{
"task": "<TransferTask: {'from': 0, 'to': 1, 'amount': 500000000000000000, 'expected_http_status': 200}>",
"event": "Task errored",
"logger": "scenario_player.tasks.base",
"level": "error",
"timestamp": "2019-11-12 01:58:24.322286",
"exception": "Traceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nhttp.client.RemoteDisconnected: Remote end closed connection without response\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 449, in send\n timeout=timeout\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 641, in urlopen\n _stacktrace=sys.exc_info()[2])\n File \"/usr/local/lib/python3.7/site-packages/urllib3/util/retry.py\", line 368, in increment\n raise six.reraise(type(error), error, _stacktrace)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/packages/six.py\", line 685, in reraise\n raise value.with_traceback(tb)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 603, in urlopen\n chunked=chunked)\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 387, in _make_request\n six.raise_from(e, None)\n File \"<string>\", line 2, in raise_from\n File \"/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py\", line 383, in _make_request\n httplib_response = conn.getresponse()\n File \"/usr/local/lib/python3.7/http/client.py\", line 1321, in getresponse\n response.begin()\n File \"/usr/local/lib/python3.7/http/client.py\", line 296, in begin\n version, status, reason = self._read_status()\n File \"/usr/local/lib/python3.7/http/client.py\", line 265, in _read_status\n raise RemoteDisconnected(\"Remote end closed connection without\"\nurllib3.exceptions.ProtocolError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nDuring handling of the above exception, another exception occurred:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 51, in _run\n method=self._method, url=url, json=self._request_params, timeout=self._timeout\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 533, in request\n resp = self.send(prep, **send_kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/sessions.py\", line 646, in send\n r = adapter.send(request, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/requests/adapters.py\", line 498, in send\n raise ConnectionError(err, request=request)\nrequests.exceptions.ConnectionError: ('Connection aborted.', RemoteDisconnected('Remote end closed connection without response'))\n\nThe above exception was the direct cause of the following exception:\n\nTraceback (most recent call last):\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/base.py\", line 74, in __call__\n return self._run(*args, **kwargs)\n File \"/usr/local/lib/python3.7/site-packages/scenario_player/tasks/api_base.py\", line 56, in _run\n raise RESTAPIError(f\"Error performing REST-API call: {self._name}\") from ex\nscenario_player.exceptions.legacy.RESTAPIError: Error performing REST-API call: transfer"
}
|
nurllib3.exceptions.ProtocolError
|
def get_transfer_from_task(
secrethash: SecretHash, transfer_task: TransferTask
) -> Optional[LockedTransferType]:
transfer: LockedTransferType
if isinstance(transfer_task, InitiatorTask):
if secrethash in transfer_task.manager_state.initiator_transfers:
transfer = transfer_task.manager_state.initiator_transfers[
secrethash
].transfer
else:
return None
elif isinstance(transfer_task, MediatorTask):
pairs = transfer_task.mediator_state.transfers_pair
if pairs:
transfer = pairs[-1].payer_transfer
elif transfer_task.mediator_state.waiting_transfer:
transfer = transfer_task.mediator_state.waiting_transfer.transfer
elif isinstance(transfer_task, TargetTask):
transfer = transfer_task.target_state.transfer
else: # pragma: no unittest
raise ValueError("get_transfer_from_task for a non TransferTask argument")
return transfer
|
def get_transfer_from_task(
secrethash: SecretHash, transfer_task: TransferTask
) -> Tuple[LockedTransferType, str]:
role = views.role_from_transfer_task(transfer_task)
transfer: LockedTransferType
if isinstance(transfer_task, InitiatorTask):
transfer = transfer_task.manager_state.initiator_transfers[secrethash].transfer
elif isinstance(transfer_task, MediatorTask):
pairs = transfer_task.mediator_state.transfers_pair
if pairs:
transfer = pairs[-1].payer_transfer
elif transfer_task.mediator_state.waiting_transfer:
transfer = transfer_task.mediator_state.waiting_transfer.transfer
elif isinstance(transfer_task, TargetTask):
transfer = transfer_task.target_state.transfer
else: # pragma: no unittest
raise ValueError("get_transfer_from_task for a non TransferTask argument")
return transfer, role
|
https://github.com/raiden-network/raiden/issues/5480
|
2019-12-11 10:18:23.082312 [error ] UnlockFailed! [raiden.raiden_event_handler] node=0x65A148DE5d57279EB9FdFe8c11cF9A43E599a537 reason=route was canceled secrethash=0xa045514824f5fd112d2f42a67e26ec965dce904a2edd90dfdda77fc882339275
2019-12-11 10:18:23.083932 [error ] Error processing request [raiden.api.rest] errors=Payment couldn't be completed because: there is no route available status_code=<HTTPStatus.CONFLICT: 409>
2019-12-11 10:18:23.084729 [info ] 127.0.0.1 - - [2019-12-11 11:18:23] "POST /api/v1/payments/0x31aA9D3E2bd38d22CA3Ae9be7aae1D518fe46043/0x1111111111111111111111111111111111111111 HTTP/1.1" 409 288 1.289927 [raiden.api.rest.pywsgi]
2019-12-11 10:18:24.453252 [info ] 127.0.0.1 - - [2019-12-11 11:18:24] "GET /api/v1/channels HTTP/1.1" 200 2721 0.011393 [raiden.api.rest.pywsgi]
2019-12-11 10:18:27.349499 [critical ] Unhandled exception when processing endpoint request [raiden.api.rest] node=0x65A148DE5d57279EB9FdFe8c11cF9A43E599a537
Traceback (most recent call last):
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/app.py", line 1832, in full_dispatch_request
rv = self.dispatch_request()
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/app.py", line 1818, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask_restful/__init__.py", line 458, in wrapper
resp = resource(*args, **kwargs)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/views.py", line 88, in view
return self.dispatch_request(*args, **kwargs)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask_restful/__init__.py", line 573, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/manuel/Projects/raiden/raiden/raiden/api/v1/resources.py", line 298, in get
return self.rest_api.get_pending_transfers()
File "/home/manuel/Projects/raiden/raiden/raiden/api/rest.py", line 1403, in get_pending_transfers
token_address=token_address, partner_address=partner_address
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 1285, in get_pending_transfers
return transfer_tasks_view(transfer_tasks, token_address, channel_id)
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 154, in transfer_tasks_view
transfer, role = get_transfer_from_task(secrethash, transfer_task)
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 131, in get_transfer_from_task
transfer = transfer_task.manager_state.initiator_transfers[secrethash].transfer
KeyError: b'Hv\x1e\x1a\xdb\xccZ-\\k\x93\x05$\x0e\xad\tX\x1a\x96|\x15\x96^^\xb1#G\xb9\xfc\xf0\xe0f'
2019-12-11 10:18:28.355096 [info ] 127.0.0.1 - - [2019-12-11 11:18:28] "GET /api/v1/pending_transfers HTTP/1.1" 500 161 1.007112 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "/home/manuel/Projects/raiden/raiden/raiden/api/rest.py", line 441, in _run
self.wsgiserver.serve_forever()
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/gevent/baseserver.py", line 364, in serve_forever
self._stop_event.wait()
File "src/gevent/event.py", line 240, in gevent._event.Event.wait
File "src/gevent/event.py", line 140, in gevent._event._AbstractLinkable._wait
File "src/gevent/event.py", line 117, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/event.py", line 119, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 63, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
KeyError: b'Hv\x1e\x1a\xdb\xccZ-\\k\x93\x05$\x0e\xad\tX\x1a\x96|\x15\x96^^\xb1#G\xb9\xfc\xf0\xe0f'
2019-12-11T10:18:28Z <Greenlet "APIServer|Greenlet-5" at 0x7f8d493edb90: <bound method APIServer._run of <raiden.api.rest.APIServer object at 0x7f8d4a7cdd10>>> failed with KeyError
|
KeyError
|
def transfer_tasks_view(
transfer_tasks: Dict[SecretHash, TransferTask],
token_address: TokenAddress = None,
channel_id: ChannelID = None,
) -> List[Dict[str, Any]]:
view = list()
for secrethash, transfer_task in transfer_tasks.items():
transfer = get_transfer_from_task(secrethash, transfer_task)
if transfer is None:
continue
if token_address is not None:
if transfer.token != token_address:
continue
elif channel_id is not None:
if transfer.balance_proof.channel_identifier != channel_id:
continue
role = views.role_from_transfer_task(transfer_task)
view.append(flatten_transfer(transfer, role))
return view
|
def transfer_tasks_view(
transfer_tasks: Dict[SecretHash, TransferTask],
token_address: TokenAddress = None,
channel_id: ChannelID = None,
) -> List[Dict[str, Any]]:
view = list()
for secrethash, transfer_task in transfer_tasks.items():
transfer, role = get_transfer_from_task(secrethash, transfer_task)
if transfer is None:
continue
if token_address is not None:
if transfer.token != token_address:
continue
elif channel_id is not None:
if transfer.balance_proof.channel_identifier != channel_id:
continue
view.append(flatten_transfer(transfer, role))
return view
|
https://github.com/raiden-network/raiden/issues/5480
|
2019-12-11 10:18:23.082312 [error ] UnlockFailed! [raiden.raiden_event_handler] node=0x65A148DE5d57279EB9FdFe8c11cF9A43E599a537 reason=route was canceled secrethash=0xa045514824f5fd112d2f42a67e26ec965dce904a2edd90dfdda77fc882339275
2019-12-11 10:18:23.083932 [error ] Error processing request [raiden.api.rest] errors=Payment couldn't be completed because: there is no route available status_code=<HTTPStatus.CONFLICT: 409>
2019-12-11 10:18:23.084729 [info ] 127.0.0.1 - - [2019-12-11 11:18:23] "POST /api/v1/payments/0x31aA9D3E2bd38d22CA3Ae9be7aae1D518fe46043/0x1111111111111111111111111111111111111111 HTTP/1.1" 409 288 1.289927 [raiden.api.rest.pywsgi]
2019-12-11 10:18:24.453252 [info ] 127.0.0.1 - - [2019-12-11 11:18:24] "GET /api/v1/channels HTTP/1.1" 200 2721 0.011393 [raiden.api.rest.pywsgi]
2019-12-11 10:18:27.349499 [critical ] Unhandled exception when processing endpoint request [raiden.api.rest] node=0x65A148DE5d57279EB9FdFe8c11cF9A43E599a537
Traceback (most recent call last):
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/app.py", line 1832, in full_dispatch_request
rv = self.dispatch_request()
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/app.py", line 1818, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask_restful/__init__.py", line 458, in wrapper
resp = resource(*args, **kwargs)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask/views.py", line 88, in view
return self.dispatch_request(*args, **kwargs)
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/flask_restful/__init__.py", line 573, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/manuel/Projects/raiden/raiden/raiden/api/v1/resources.py", line 298, in get
return self.rest_api.get_pending_transfers()
File "/home/manuel/Projects/raiden/raiden/raiden/api/rest.py", line 1403, in get_pending_transfers
token_address=token_address, partner_address=partner_address
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 1285, in get_pending_transfers
return transfer_tasks_view(transfer_tasks, token_address, channel_id)
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 154, in transfer_tasks_view
transfer, role = get_transfer_from_task(secrethash, transfer_task)
File "/home/manuel/Projects/raiden/raiden/raiden/api/python.py", line 131, in get_transfer_from_task
transfer = transfer_task.manager_state.initiator_transfers[secrethash].transfer
KeyError: b'Hv\x1e\x1a\xdb\xccZ-\\k\x93\x05$\x0e\xad\tX\x1a\x96|\x15\x96^^\xb1#G\xb9\xfc\xf0\xe0f'
2019-12-11 10:18:28.355096 [info ] 127.0.0.1 - - [2019-12-11 11:18:28] "GET /api/v1/pending_transfers HTTP/1.1" 500 161 1.007112 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "/home/manuel/Projects/raiden/raiden/raiden/api/rest.py", line 441, in _run
self.wsgiserver.serve_forever()
File "/home/manuel/.virtualenvs/raiden-nightly/lib/python3.7/site-packages/gevent/baseserver.py", line 364, in serve_forever
self._stop_event.wait()
File "src/gevent/event.py", line 240, in gevent._event.Event.wait
File "src/gevent/event.py", line 140, in gevent._event._AbstractLinkable._wait
File "src/gevent/event.py", line 117, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/event.py", line 119, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 63, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
KeyError: b'Hv\x1e\x1a\xdb\xccZ-\\k\x93\x05$\x0e\xad\tX\x1a\x96|\x15\x96^^\xb1#G\xb9\xfc\xf0\xe0f'
2019-12-11T10:18:28Z <Greenlet "APIServer|Greenlet-5" at 0x7f8d493edb90: <bound method APIServer._run of <raiden.api.rest.APIServer object at 0x7f8d4a7cdd10>>> failed with KeyError
|
KeyError
|
def _deserialize(self, value, attr, data, **kwargs): # pylint: disable=unused-argument
if not is_0x_prefixed(value):
self.fail("missing_prefix")
if not is_checksum_address(value):
self.fail("invalid_checksum")
try:
value = to_canonical_address(value)
except ValueError:
self.fail("invalid_data")
if len(value) != 20:
self.fail("invalid_size")
if value == NULL_ADDRESS_BYTES:
self.fail("null_address")
return value
|
def _deserialize(self, value, attr, data, **kwargs): # pylint: disable=unused-argument
if not is_0x_prefixed(value):
self.fail("missing_prefix")
if not is_checksum_address(value):
self.fail("invalid_checksum")
try:
value = to_canonical_address(value)
except ValueError:
self.fail("invalid_data")
if len(value) != 20:
self.fail("invalid_size")
return value
|
https://github.com/raiden-network/raiden/issues/5209
|
2019-11-04 14:09:33.074227 [critical ] Unhandled exception when processing endpoint request [raiden.api.rest] node=0xf5eAE6E37B2873D32E11C88E310956D3C9c712f0
Traceback (most recent call last):
File "flask/app.py", line 1832, in full_dispatch_request
File "flask/app.py", line 1818, in dispatch_request
File "flask_restful/__init__.py", line 458, in wrapper
File "flask/views.py", line 88, in view
File "flask_restful/__init__.py", line 573, in dispatch_request
File "webargs/core.py", line 464, in wrapper
File "raiden/api/v1/resources.py", line 57, in put
File "raiden/api/rest.py", line 652, in open
File "raiden/api/python.py", line 450, in channel_open
File "raiden/api/python.py", line 350, in is_already_existing_channel
File "raiden/network/proxies/token_network.py", line 435, in get_channel_identifier_or_none
File "raiden/network/proxies/token_network.py", line 409, in get_channel_identifier
File "raiden/network/proxies/token_network.py", line 96, in raise_if_invalid_address_pair
ValueError: The null address is not allowed as a channel participant.
2019-11-04 14:09:34.077496 [info ] 127.0.0.1 - - [2019-11-04 15:09:34] "PUT /api/v1/channels HTTP/1.1" 500 161 1.212333 [raiden.api.rest.pywsgi]
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "raiden/api/rest.py", line 437, in _run
File "gevent/baseserver.py", line 364, in serve_forever
File "src/gevent/event.py", line 240, in gevent._event.Event.wait
File "src/gevent/event.py", line 140, in gevent._event._AbstractLinkable._wait
File "src/gevent/event.py", line 117, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/event.py", line 119, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 63, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: The null address is not allowed as a channel participant.
2019-11-04T14:09:34Z <Greenlet "APIServer|Greenlet-4" at 0x7f3b9da91170: <bound method APIServer._run of <raiden.api.rest.APIServer object at 0x7f3b9e17efd0>>> failed with ValueError
FATAL: An unexpected exception occured. A traceback has been written to /tmp/raiden-exception-2019-11-04T14-09u_e2bh08.txt
The null address is not allowed as a channel participant.
2019-11-04 14:09:34.332260 [info ] Raiden stopped [raiden.app] node=0xf5eAE6E37B2873D32E11C88E310956D3C9c712f0
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "raiden/ui/runners.py", line 229, in stop_task
File "src/gevent/greenlet.py", line 633, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 312, in gevent._greenlet.Greenlet._raise_exception
File "gevent/_compat.py", line 47, in reraise
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "raiden/api/rest.py", line 437, in _run
File "gevent/baseserver.py", line 364, in serve_forever
File "src/gevent/event.py", line 240, in gevent._event.Event.wait
File "src/gevent/event.py", line 140, in gevent._event._AbstractLinkable._wait
File "src/gevent/event.py", line 117, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/event.py", line 119, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 63, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: The null address is not allowed as a channel participant.
2019-11-04T14:09:34Z <Greenlet "Greenlet-3" at 0x7f3b9e29dcb0: stop_task(<raiden.api.rest.APIServer object at 0x7f3b9e17efd)> failed with ValueError
Traceback (most recent call last):
File "raiden-script.py", line 2, in <module>
File "raiden/__main__.py", line 13, in main
File "click/core.py", line 764, in __call__
File "click/core.py", line 717, in main
File "click/core.py", line 1114, in invoke
File "click/core.py", line 956, in invoke
File "click/core.py", line 555, in invoke
File "click/decorators.py", line 17, in new_func
File "raiden/ui/cli.py", line 568, in run
File "raiden/ui/runners.py", line 243, in run
File "raiden/ui/runners.py", line 234, in _start_services
File "src/gevent/greenlet.py", line 849, in gevent._greenlet.joinall
File "src/gevent/greenlet.py", line 865, in gevent._greenlet.joinall
File "src/gevent/greenlet.py", line 312, in gevent._greenlet.Greenlet._raise_exception
File "gevent/_compat.py", line 47, in reraise
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "raiden/ui/runners.py", line 229, in stop_task
File "src/gevent/greenlet.py", line 633, in gevent._greenlet.Greenlet.get
File "src/gevent/greenlet.py", line 312, in gevent._greenlet.Greenlet._raise_exception
File "gevent/_compat.py", line 47, in reraise
File "src/gevent/greenlet.py", line 716, in gevent._greenlet.Greenlet.run
File "raiden/api/rest.py", line 437, in _run
File "gevent/baseserver.py", line 364, in serve_forever
File "src/gevent/event.py", line 240, in gevent._event.Event.wait
File "src/gevent/event.py", line 140, in gevent._event._AbstractLinkable._wait
File "src/gevent/event.py", line 117, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/event.py", line 119, in gevent._event._AbstractLinkable._wait_core
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 59, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/_greenlet_primitives.py", line 63, in gevent.__greenlet_primitives.SwitchOutGreenletWithLoop.switch
File "src/gevent/__greenlet_primitives.pxd", line 35, in gevent.__greenlet_primitives._greenlet_switch
ValueError: The null address is not allowed as a channel participant.
[17657] Failed to execute script raiden-script
|
ValueError
|
def __init__(
self,
stats: Stats,
traj_logger: TrajLogger,
rng: np.random.RandomState,
instances: typing.List[str],
instance_specifics: typing.Mapping[str, np.ndarray] = None,
cutoff: typing.Optional[float] = None,
deterministic: bool = False,
initial_budget: typing.Optional[float] = None,
max_budget: typing.Optional[float] = None,
eta: float = 3,
num_initial_challengers: typing.Optional[int] = None,
run_obj_time: bool = True,
n_seeds: typing.Optional[int] = None,
instance_order: typing.Optional[str] = "shuffle_once",
adaptive_capping_slackfactor: float = 1.2,
inst_seed_pairs: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
min_chall: int = 1,
incumbent_selection: str = "highest_executed_budget",
identifier: int = 0,
) -> None:
super().__init__(
stats=stats,
traj_logger=traj_logger,
rng=rng,
instances=instances,
instance_specifics=instance_specifics,
cutoff=cutoff,
deterministic=deterministic,
run_obj_time=run_obj_time,
adaptive_capping_slackfactor=adaptive_capping_slackfactor,
min_chall=min_chall,
)
self.identifier = identifier
self.logger = logging.getLogger(
self.__module__ + "." + str(self.identifier) + "." + self.__class__.__name__
)
if self.min_chall > 1:
raise ValueError("Successive Halving cannot handle argument `min_chall` > 1.")
self.first_run = True
# INSTANCES
self.n_seeds = n_seeds if n_seeds else 1
self.instance_order = instance_order
# NOTE Remove after solving how to handle multiple seeds and 1 instance
if len(self.instances) == 1 and self.n_seeds > 1:
raise NotImplementedError(
"This case (multiple seeds and 1 instance) cannot be handled yet!"
)
# if instances are coming from Hyperband, skip the instance preprocessing section
# it is already taken care by Hyperband
if not inst_seed_pairs:
# set seed(s) for all SH runs
# - currently user gives the number of seeds to consider
if self.deterministic:
seeds = [0]
else:
seeds = self.rs.randint(low=0, high=MAXINT, size=self.n_seeds)
if self.n_seeds == 1:
self.logger.warning(
"The target algorithm is specified to be non deterministic, "
"but number of seeds to evaluate are set to 1. "
"Consider setting `n_seeds` > 1."
)
# storing instances & seeds as tuples
self.inst_seed_pairs = [(i, s) for s in seeds for i in self.instances]
# determine instance-seed pair order
if self.instance_order == "shuffle_once":
# randomize once
self.rs.shuffle(self.inst_seed_pairs)
else:
self.inst_seed_pairs = inst_seed_pairs
# successive halving parameters
self._init_sh_params(initial_budget, max_budget, eta, num_initial_challengers)
# adaptive capping
if (
self.instance_as_budget
and self.instance_order != "shuffle"
and self.run_obj_time
):
self.adaptive_capping = True
else:
self.adaptive_capping = False
# challengers can be repeated only if optimizing across multiple seeds or changing instance orders every run
# (this does not include having multiple instances)
if self.n_seeds > 1 or self.instance_order == "shuffle":
self.repeat_configs = True
else:
self.repeat_configs = False
# incumbent selection design
assert incumbent_selection in [
"highest_executed_budget",
"highest_budget",
"any_budget",
]
self.incumbent_selection = incumbent_selection
# Define state variables to please mypy
self.curr_inst_idx = 0
self.running_challenger = None
self.success_challengers = set() # type: typing.Set[Configuration]
self.do_not_advance_challengers = set() # type: typing.Set[Configuration]
self.fail_challengers = set() # type: typing.Set[Configuration]
self.fail_chal_offset = 0
# Track which configs were launched. This will allow to have an extra check to make sure
# that a successive halver deals only with the configs it launched,
# but also allows querying the status of the configs via the run history.
# In other works, the run history is agnostic of the origin of the configurations,
# that is, which successive halving instance created it. The RunInfo object
# is aware of this information, and for parallel execution, the routing of
# finish results is expected to use this information.
# Nevertheless, the common object among SMBO/intensifier, which is the
# run history, does not have this information and so we track locally. That way,
# when we access the complete list of configs from the run history, we filter
# the ones launched by the current succesive halver using self.run_tracker
self.run_tracker = {} # type: typing.Dict[typing.Tuple[Configuration, str, int, float], bool]
|
def __init__(
self,
stats: Stats,
traj_logger: TrajLogger,
rng: np.random.RandomState,
instances: typing.List[str],
instance_specifics: typing.Mapping[str, np.ndarray] = None,
cutoff: typing.Optional[float] = None,
deterministic: bool = False,
initial_budget: typing.Optional[float] = None,
max_budget: typing.Optional[float] = None,
eta: float = 3,
num_initial_challengers: typing.Optional[int] = None,
run_obj_time: bool = True,
n_seeds: typing.Optional[int] = None,
instance_order: typing.Optional[str] = "shuffle_once",
adaptive_capping_slackfactor: float = 1.2,
inst_seed_pairs: typing.Optional[typing.List[typing.Tuple[str, int]]] = None,
min_chall: int = 1,
incumbent_selection: str = "highest_executed_budget",
identifier: int = 0,
) -> None:
super().__init__(
stats=stats,
traj_logger=traj_logger,
rng=rng,
instances=instances,
instance_specifics=instance_specifics,
cutoff=cutoff,
deterministic=deterministic,
run_obj_time=run_obj_time,
adaptive_capping_slackfactor=adaptive_capping_slackfactor,
min_chall=min_chall,
)
self.identifier = identifier
self.logger = logging.getLogger(
self.__module__ + "." + str(self.identifier) + "." + self.__class__.__name__
)
if self.min_chall > 1:
raise ValueError("Successive Halving cannot handle argument `min_chall` > 1.")
self.first_run = True
# INSTANCES
self.n_seeds = n_seeds if n_seeds else 1
self.instance_order = instance_order
# NOTE Remove after solving how to handle multiple seeds and 1 instance
if len(self.instances) == 1 and self.n_seeds > 1:
raise NotImplementedError(
"This case (multiple seeds and 1 instance) cannot be handled yet!"
)
# if instances are coming from Hyperband, skip the instance preprocessing section
# it is already taken care by Hyperband
if not inst_seed_pairs:
# set seed(s) for all SH runs
# - currently user gives the number of seeds to consider
if self.deterministic:
seeds = [0]
else:
seeds = self.rs.randint(low=0, high=MAXINT, size=self.n_seeds)
if self.n_seeds == 1:
self.logger.warning(
"The target algorithm is specified to be non deterministic, "
"but number of seeds to evaluate are set to 1. "
"Consider setting `n_seeds` > 1."
)
# storing instances & seeds as tuples
self.inst_seed_pairs = [(i, s) for s in seeds for i in self.instances]
# determine instance-seed pair order
if self.instance_order == "shuffle_once":
# randomize once
self.rs.shuffle(self.inst_seed_pairs)
else:
self.inst_seed_pairs = inst_seed_pairs
# successive halving parameters
self._init_sh_params(initial_budget, max_budget, eta, num_initial_challengers)
# adaptive capping
if (
self.instance_as_budget
and self.instance_order != "shuffle"
and self.run_obj_time
):
self.adaptive_capping = True
else:
self.adaptive_capping = False
# challengers can be repeated only if optimizing across multiple seeds or changing instance orders every run
# (this does not include having multiple instances)
if self.n_seeds > 1 or self.instance_order == "shuffle":
self.repeat_configs = True
else:
self.repeat_configs = False
# incumbent selection design
assert incumbent_selection in [
"highest_executed_budget",
"highest_budget",
"any_budget",
]
self.incumbent_selection = incumbent_selection
# Define state variables to please mypy
self.curr_inst_idx = 0
self.running_challenger = None
self.success_challengers = set() # type: typing.Set[Configuration]
self.do_not_advance_challengers = set() # type: typing.Set[Configuration]
self.fail_challengers = set() # type: typing.Set[Configuration]
self.fail_chal_offset = 0
# Track which configs were launched. This will allow to have an extra check to make sure
# that a successive halver deals only with the configs it launched,
# but also allows querying the status of the configs via the run history.
# In other works, the run history is agnostic of the origin of the configurations,
# that is, which successive halving instance created it. The RunInfo object
# is aware of this information, and for parallel execution, the routing of
# finish results is expected to use this information.
# Nevertheless, the common object among SMBO/intensifier, which is the
# run history, does not have this information and so we track locally. That way,
# when we access the complete list of configs from the run history, we filter
# the ones launched by the current succesive halver using self.run_tracker
self.run_tracker = [] # type: typing.List[typing.Tuple[Configuration, str, int]]
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def process_results(
self,
run_info: RunInfo,
incumbent: typing.Optional[Configuration],
run_history: RunHistory,
time_bound: float,
result: RunValue,
log_traj: bool = True,
) -> typing.Tuple[Configuration, float]:
"""
The intensifier stage will be updated based on the results/status
of a configuration execution.
Also, a incumbent will be determined.
Parameters
----------
run_info : RunInfo
A RunInfo containing the configuration that was evaluated
incumbent : typing.Optional[Configuration]
Best configuration seen so far
run_history : RunHistory
stores all runs we ran so far
if False, an evaluated configuration will not be generated again
time_bound : float
time in [sec] available to perform intensify
result: RunValue
Contain the result (status and other methadata) of exercising
a challenger/incumbent.
log_traj: bool
Whether to log changes of incumbents in trajectory
Returns
-------
incumbent: Configuration
current (maybe new) incumbent configuration
inc_perf: float
empirical performance of incumbent configuration
"""
# Mark the fact that we processed this configuration
self.run_tracker[
(run_info.config, run_info.instance, run_info.seed, run_info.budget)
] = True
# If The incumbent is None and it is the first run, we use the challenger
if not incumbent and self.first_run:
self.logger.info(
"First run, no incumbent provided; challenger is assumed to be the incumbent"
)
incumbent = run_info.config
self.first_run = False
# Account for running instances across configurations, not only on the
# running configuration
n_insts_remaining = self._get_pending_instances_for_stage(run_history)
# Make sure that there is no Budget exhausted
if result.status == StatusType.CAPPED:
self.curr_inst_idx = np.inf
n_insts_remaining = 0
else:
self._ta_time += result.time
self.num_run += 1
self.curr_inst_idx += 1
# adding challengers to the list of evaluated challengers
# - Stop: CAPPED/CRASHED/TIMEOUT/MEMOUT/DONOTADVANCE (!= SUCCESS)
# - Advance to next stage: SUCCESS
# curr_challengers is a set, so "at least 1" success can be counted by set addition (no duplicates)
# If a configuration is successful, it is added to curr_challengers.
# if it fails it is added to fail_challengers.
if np.isfinite(self.curr_inst_idx) and result.status == StatusType.SUCCESS:
self.success_challengers.add(run_info.config) # successful configs
elif np.isfinite(self.curr_inst_idx) and result.status == StatusType.DONOTADVANCE:
self.do_not_advance_challengers.add(run_info.config)
else:
self.fail_challengers.add(
run_info.config
) # capped/crashed/do not advance configs
# We need to update the incumbent if this config we are processing
# completes all scheduled instance-seed pairs.
# Here, a config/seed/instance is going to be processed for the first time
# (it has been previously scheduled by get_next_run and marked False, indicating
# that it has not been processed yet. Entering process_results() this config/seed/instance
# is marked as TRUE as an indication that it has finished and should be processed)
# so if all configurations runs are marked as TRUE it means that this new config
# was the missing piece to have everything needed to compare against the incumbent
update_incumbent = all(
[v for k, v in self.run_tracker.items() if k[0] == run_info.config]
)
# get incumbent if all instances have been evaluated
if n_insts_remaining <= 0 or update_incumbent:
incumbent = self._compare_configs(
challenger=run_info.config,
incumbent=incumbent,
run_history=run_history,
log_traj=log_traj,
)
# if all configurations for the current stage have been evaluated, reset stage
num_chal_evaluated = (
len(
self.success_challengers
| self.fail_challengers
| self.do_not_advance_challengers
)
+ self.fail_chal_offset
)
if (
num_chal_evaluated == self.n_configs_in_stage[self.stage]
and n_insts_remaining <= 0
):
self.logger.info(
"Successive Halving iteration-step: %d-%d with "
"budget [%.2f / %d] - evaluated %d challenger(s)"
% (
self.sh_iters + 1,
self.stage + 1,
self.all_budgets[self.stage],
self.max_budget,
self.n_configs_in_stage[self.stage],
)
)
self._update_stage(run_history=run_history)
# get incumbent cost
inc_perf = run_history.get_cost(incumbent)
return incumbent, inc_perf
|
def process_results(
self,
run_info: RunInfo,
incumbent: typing.Optional[Configuration],
run_history: RunHistory,
time_bound: float,
result: RunValue,
log_traj: bool = True,
) -> typing.Tuple[Configuration, float]:
"""
The intensifier stage will be updated based on the results/status
of a configuration execution.
Also, a incumbent will be determined.
Parameters
----------
run_info : RunInfo
A RunInfo containing the configuration that was evaluated
incumbent : typing.Optional[Configuration]
Best configuration seen so far
run_history : RunHistory
stores all runs we ran so far
if False, an evaluated configuration will not be generated again
time_bound : float
time in [sec] available to perform intensify
result: RunValue
Contain the result (status and other methadata) of exercising
a challenger/incumbent.
log_traj: bool
Whether to log changes of incumbents in trajectory
Returns
-------
incumbent: Configuration
current (maybe new) incumbent configuration
inc_perf: float
empirical performance of incumbent configuration
"""
# If The incumbent is None and it is the first run, we use the challenger
if not incumbent and self.first_run:
self.logger.info(
"First run, no incumbent provided; challenger is assumed to be the incumbent"
)
incumbent = run_info.config
self.first_run = False
# selecting instance-seed subset for this budget, depending on the kind of budget
curr_budget = self.all_budgets[self.stage]
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
curr_insts = self.inst_seed_pairs[int(prev_budget) : int(curr_budget)]
else:
curr_insts = self.inst_seed_pairs
n_insts_remaining = len(curr_insts) - self.curr_inst_idx - 1
# Make sure that there is no Budget exhausted
if result.status == StatusType.CAPPED:
self.curr_inst_idx = np.inf
n_insts_remaining = 0
else:
self._ta_time += result.time
self.num_run += 1
self.curr_inst_idx += 1
# adding challengers to the list of evaluated challengers
# - Stop: CAPPED/CRASHED/TIMEOUT/MEMOUT/DONOTADVANCE (!= SUCCESS)
# - Advance to next stage: SUCCESS
# curr_challengers is a set, so "at least 1" success can be counted by set addition (no duplicates)
# If a configuration is successful, it is added to curr_challengers.
# if it fails it is added to fail_challengers.
if np.isfinite(self.curr_inst_idx) and result.status == StatusType.SUCCESS:
self.success_challengers.add(run_info.config) # successful configs
elif np.isfinite(self.curr_inst_idx) and result.status == StatusType.DONOTADVANCE:
self.do_not_advance_challengers.add(run_info.config)
else:
self.fail_challengers.add(
run_info.config
) # capped/crashed/do not advance configs
# get incumbent if all instances have been evaluated
if n_insts_remaining <= 0:
incumbent = self._compare_configs(
challenger=run_info.config,
incumbent=incumbent,
run_history=run_history,
log_traj=log_traj,
)
# if all configurations for the current stage have been evaluated, reset stage
num_chal_evaluated = (
len(
self.success_challengers
| self.fail_challengers
| self.do_not_advance_challengers
)
+ self.fail_chal_offset
)
if (
num_chal_evaluated == self.n_configs_in_stage[self.stage]
and n_insts_remaining <= 0
):
self.logger.info(
"Successive Halving iteration-step: %d-%d with "
"budget [%.2f / %d] - evaluated %d challenger(s)"
% (
self.sh_iters + 1,
self.stage + 1,
self.all_budgets[self.stage],
self.max_budget,
self.n_configs_in_stage[self.stage],
)
)
self._update_stage(run_history=run_history)
# get incumbent cost
inc_perf = run_history.get_cost(incumbent)
return incumbent, inc_perf
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def get_next_run(
self,
challengers: typing.Optional[typing.List[Configuration]],
incumbent: Configuration,
chooser: typing.Optional[EPMChooser],
run_history: RunHistory,
repeat_configs: bool = True,
num_workers: int = 1,
) -> typing.Tuple[RunInfoIntent, RunInfo]:
"""
Selects which challenger to use based on the iteration stage and set the iteration parameters.
First iteration will choose configurations from the ``chooser`` or input challengers,
while the later iterations pick top configurations from the previously selected challengers in that iteration
Parameters
----------
challengers : typing.List[Configuration]
promising configurations
incumbent: Configuration
incumbent configuration
chooser : smac.optimizer.epm_configuration_chooser.EPMChooser
optimizer that generates next configurations to use for racing
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
repeat_configs : bool
if False, an evaluated configuration will not be generated again
num_workers: int
the maximum number of workers available
at a given time.
Returns
-------
intent: RunInfoIntent
Indicator of how to consume the RunInfo object
run_info: RunInfo
An object that encapsulates the minimum information to
evaluate a configuration
"""
if num_workers > 1:
warnings.warn(
"Consider using ParallelSuccesiveHalving instead of "
"SuccesiveHalving. The later will halt on each stage "
"transition until all configs for the current stage are completed."
)
# if this is the first run, then initialize tracking variables
if not hasattr(self, "stage"):
self._update_stage(run_history=run_history)
# In the case of multiprocessing, we have runs in Running stage, which have not
# been processed via process_results(). get_next_run() is called agnostically by
# smbo. To prevent launching more configs, than the ones needed, we query if
# there is room for more configurations, else we wait for process_results()
# to trigger a new stage
if self._launched_all_configs_for_current_stage(run_history):
return RunInfoIntent.WAIT, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
# sampling from next challenger marks the beginning of a new iteration
self.iteration_done = False
curr_budget = self.all_budgets[self.stage]
# if all instances have been executed, then reset and move on to next config
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
n_insts = int(curr_budget) - prev_budget
else:
n_insts = len(self.inst_seed_pairs)
# In the case of multiprocessing, we will have launched instance/seeds
# which are not completed, yet running. To proactively move to a new challenger,
# we account for them in the n_insts_remaining calculation
running_instances = self._count_running_instances_for_challenger(run_history)
n_insts_remaining = n_insts - (self.curr_inst_idx + running_instances)
# if there are instances pending, finish running configuration
if self.running_challenger and n_insts_remaining > 0:
challenger = self.running_challenger
new_challenger = False
else:
# select next configuration
if self.stage == 0:
# first stage, so sample from configurations/chooser provided
challenger = self._next_challenger(
challengers=challengers,
chooser=chooser,
run_history=run_history,
repeat_configs=repeat_configs,
)
if challenger is None:
# If no challenger was sampled from the EPM or
# initial challengers, it might mean that the EPM
# is proposing a configuration that is currently running.
# There is a filtering on the above _next_challenger to return
# None if the proposed config us already in the run history
# To get a new config, we wait for more data
return RunInfoIntent.WAIT, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
new_challenger = True
else:
# sample top configs from previously sampled configurations
try:
challenger = self.configs_to_run.pop(0)
new_challenger = False
except IndexError:
# self.configs_to_run is populated via update_stage,
# which is triggered after the completion of a run
# If by there are no more configs to run (which is the case
# if we run into a IndexError),
return RunInfoIntent.SKIP, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
if challenger:
# reset instance index for the new challenger
self.curr_inst_idx = 0
self._chall_indx += 1
self.running_challenger = challenger
# If there is a brand new challenger, there will be no
# running instances
running_instances = 0
# calculating the incumbent's performance for adaptive capping
# this check is required because:
# - there is no incumbent performance for the first ever 'intensify' run (from initial design)
# - during the 1st intensify run, the incumbent shouldn't be capped after being compared against itself
if incumbent and incumbent != challenger:
inc_runs = run_history.get_runs_for_config(
incumbent, only_max_observed_budget=True
)
inc_sum_cost = run_history.sum_cost(
config=incumbent, instance_seed_budget_keys=inc_runs
)
else:
inc_sum_cost = np.inf
if self.first_run:
self.logger.info(
"First run, no incumbent provided; challenger is assumed to be the incumbent"
)
incumbent = challenger
# selecting instance-seed subset for this budget, depending on the kind of budget
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
curr_insts = self.inst_seed_pairs[int(prev_budget) : int(curr_budget)]
else:
curr_insts = self.inst_seed_pairs
self.logger.debug(" Running challenger - %s" % str(challenger))
# run the next instance-seed pair for the given configuration
instance, seed = curr_insts[self.curr_inst_idx + running_instances]
# selecting cutoff if running adaptive capping
cutoff = self.cutoff
if self.run_obj_time:
cutoff = self._adapt_cutoff(
challenger=challenger, run_history=run_history, inc_sum_cost=inc_sum_cost
)
if cutoff is not None and cutoff <= 0:
# ran out of time to validate challenger
self.logger.debug(
"Stop challenger intensification due to adaptive capping."
)
self.curr_inst_idx = np.inf
self.logger.debug("Cutoff for challenger: %s" % str(cutoff))
# For testing purposes, this attribute highlights whether a
# new challenger is proposed or not. Not required from a functional
# perspective
self.new_challenger = new_challenger
capped = False
if (self.cutoff is not None) and (cutoff < self.cutoff): # type: ignore[operator] # noqa F821
capped = True
budget = 0.0 if self.instance_as_budget else curr_budget
self.run_tracker[(challenger, instance, seed, budget)] = False
return RunInfoIntent.RUN, RunInfo(
config=challenger,
instance=instance,
instance_specific=self.instance_specifics.get(instance, "0"),
seed=seed,
cutoff=cutoff,
capped=capped,
budget=budget,
source_id=self.identifier,
)
|
def get_next_run(
self,
challengers: typing.Optional[typing.List[Configuration]],
incumbent: Configuration,
chooser: typing.Optional[EPMChooser],
run_history: RunHistory,
repeat_configs: bool = True,
num_workers: int = 1,
) -> typing.Tuple[RunInfoIntent, RunInfo]:
"""
Selects which challenger to use based on the iteration stage and set the iteration parameters.
First iteration will choose configurations from the ``chooser`` or input challengers,
while the later iterations pick top configurations from the previously selected challengers in that iteration
Parameters
----------
challengers : typing.List[Configuration]
promising configurations
incumbent: Configuration
incumbent configuration
chooser : smac.optimizer.epm_configuration_chooser.EPMChooser
optimizer that generates next configurations to use for racing
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
repeat_configs : bool
if False, an evaluated configuration will not be generated again
num_workers: int
the maximum number of workers available
at a given time.
Returns
-------
intent: RunInfoIntent
Indicator of how to consume the RunInfo object
run_info: RunInfo
An object that encapsulates the minimum information to
evaluate a configuration
"""
if num_workers > 1:
warnings.warn(
"Consider using ParallelSuccesiveHalving instead of "
"SuccesiveHalving. The later will halt on each stage "
"transition until all configs for the current stage are completed."
)
# if this is the first run, then initialize tracking variables
if not hasattr(self, "stage"):
self._update_stage(run_history=run_history)
# In the case of multiprocessing, we have runs in Running stage, which have not
# been processed via process_results(). get_next_run() is called agnostically by
# smbo. To prevent launching more configs, than the ones needed, we query if
# there is room for more configurations, else we wait for process_results()
# to trigger a new stage
if self._launched_all_configs_for_current_stage(run_history):
return RunInfoIntent.WAIT, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
# sampling from next challenger marks the beginning of a new iteration
self.iteration_done = False
curr_budget = self.all_budgets[self.stage]
# if all instances have been executed, then reset and move on to next config
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
n_insts = int(curr_budget) - prev_budget
else:
n_insts = len(self.inst_seed_pairs)
# In the case of multiprocessing, we will have launched instance/seeds
# which are not completed, yet running. To proactively move to a new challenger,
# we account for them in the n_insts_remaining calculation
running_instances = self._count_running_instances_for_challenger(run_history)
n_insts_remaining = n_insts - (self.curr_inst_idx + running_instances)
# if there are instances pending, finish running configuration
if self.running_challenger and n_insts_remaining > 0:
challenger = self.running_challenger
new_challenger = False
else:
# select next configuration
if self.stage == 0:
# first stage, so sample from configurations/chooser provided
challenger = self._next_challenger(
challengers=challengers,
chooser=chooser,
run_history=run_history,
repeat_configs=repeat_configs,
)
if challenger is None:
# If no challenger was sampled from the EPM or
# initial challengers, it might mean that the EPM
# is proposing a configuration that is currently running.
# There is a filtering on the above _next_challenger to return
# None if the proposed config us already in the run history
# To get a new config, we wait for more data
return RunInfoIntent.WAIT, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
new_challenger = True
else:
# sample top configs from previously sampled configurations
try:
challenger = self.configs_to_run.pop(0)
new_challenger = False
except IndexError:
# self.configs_to_run is populated via update_stage,
# which is triggered after the completion of a run
# If by there are no more configs to run (which is the case
# if we run into a IndexError),
return RunInfoIntent.SKIP, RunInfo(
config=None,
instance=None,
instance_specific="0",
seed=0,
cutoff=self.cutoff,
capped=False,
budget=0.0,
source_id=self.identifier,
)
if challenger:
# reset instance index for the new challenger
self.curr_inst_idx = 0
self._chall_indx += 1
self.running_challenger = challenger
# If there is a brand new challenger, there will be no
# running instances
running_instances = 0
# calculating the incumbent's performance for adaptive capping
# this check is required because:
# - there is no incumbent performance for the first ever 'intensify' run (from initial design)
# - during the 1st intensify run, the incumbent shouldn't be capped after being compared against itself
if incumbent and incumbent != challenger:
inc_runs = run_history.get_runs_for_config(
incumbent, only_max_observed_budget=True
)
inc_sum_cost = run_history.sum_cost(
config=incumbent, instance_seed_budget_keys=inc_runs
)
else:
inc_sum_cost = np.inf
if self.first_run:
self.logger.info(
"First run, no incumbent provided; challenger is assumed to be the incumbent"
)
incumbent = challenger
# selecting instance-seed subset for this budget, depending on the kind of budget
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
curr_insts = self.inst_seed_pairs[int(prev_budget) : int(curr_budget)]
else:
curr_insts = self.inst_seed_pairs
self.logger.debug(" Running challenger - %s" % str(challenger))
# run the next instance-seed pair for the given configuration
instance, seed = curr_insts[self.curr_inst_idx + running_instances]
# selecting cutoff if running adaptive capping
cutoff = self.cutoff
if self.run_obj_time:
cutoff = self._adapt_cutoff(
challenger=challenger, run_history=run_history, inc_sum_cost=inc_sum_cost
)
if cutoff is not None and cutoff <= 0:
# ran out of time to validate challenger
self.logger.debug(
"Stop challenger intensification due to adaptive capping."
)
self.curr_inst_idx = np.inf
self.logger.debug("Cutoff for challenger: %s" % str(cutoff))
# For testing purposes, this attribute highlights whether a
# new challenger is proposed or not. Not required from a functional
# perspective
self.new_challenger = new_challenger
capped = False
if (self.cutoff is not None) and (cutoff < self.cutoff): # type: ignore[operator] # noqa F821
capped = True
self.run_tracker.append((challenger, instance, seed))
return RunInfoIntent.RUN, RunInfo(
config=challenger,
instance=instance,
instance_specific=self.instance_specifics.get(instance, "0"),
seed=seed,
cutoff=cutoff,
capped=capped,
budget=0.0 if self.instance_as_budget else curr_budget,
source_id=self.identifier,
)
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def _update_stage(self, run_history: RunHistory) -> None:
"""
Update tracking information for a new stage/iteration and update statistics.
This method is called to initialize stage variables and after all configurations
of a successive halving stage are completed.
Parameters
----------
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
"""
if not hasattr(self, "stage"):
# initialize all relevant variables for first run
# (this initialization is not a part of init because hyperband uses the same init method and has a )
# to track iteration and stage
self.sh_iters = 0
self.stage = 0
# to track challengers across stages
self.configs_to_run = [] # type: typing.List[Configuration]
self.curr_inst_idx = 0
self.running_challenger = None
self.success_challengers = set() # successful configs
self.do_not_advance_challengers = (
set()
) # configs which are successful, but should not be advanced
self.fail_challengers = set() # capped configs and other failures
self.fail_chal_offset = 0
else:
self.stage += 1
# only uncapped challengers are considered valid for the next iteration
valid_challengers = list(
(self.success_challengers | self.do_not_advance_challengers)
- self.fail_challengers
)
if self.stage < len(self.all_budgets) and len(valid_challengers) > 0:
# if this is the next stage in same iteration,
# use top 'k' from the evaluated configurations for next iteration
# determine 'k' for the next iteration - at least 1
next_n_chal = int(max(1, self.n_configs_in_stage[self.stage]))
# selecting the top 'k' challengers for the next iteration
configs_to_run = self._top_k(
configs=valid_challengers, run_history=run_history, k=next_n_chal
)
self.configs_to_run = [
config
for config in configs_to_run
if config not in self.do_not_advance_challengers
]
# if some runs were capped, top_k returns less than the required configurations
# to handle that, we keep track of how many configurations are missing
# (since they are technically failed here too)
missing_challengers = int(self.n_configs_in_stage[self.stage]) - len(
self.configs_to_run
)
if missing_challengers > 0:
self.fail_chal_offset = missing_challengers
else:
self.fail_chal_offset = 0
if next_n_chal == missing_challengers:
next_stage = True
self.logger.info(
"Successive Halving iteration-step: %d-%d with "
"budget [%.2f / %d] - expected %d new challenger(s), but "
"no configurations propagated to the next budget.",
self.sh_iters + 1,
self.stage + 1,
self.all_budgets[self.stage],
self.max_budget,
self.n_configs_in_stage[self.stage],
)
else:
next_stage = False
else:
next_stage = True
if next_stage:
# update stats for the prev iteration
self.stats.update_average_configs_per_intensify(n_configs=self._chall_indx)
# reset stats for the new iteration
self._ta_time = 0
self._chall_indx = 0
self.num_run = 0
self.iteration_done = True
self.sh_iters += 1
self.stage = 0
self.run_tracker = {}
self.configs_to_run = []
self.fail_chal_offset = 0
# randomize instance-seed pairs per successive halving run, if user specifies
if self.instance_order == "shuffle":
self.rs.shuffle(self.inst_seed_pairs)
# to track configurations for the next stage
self.success_challengers = set() # successful configs
self.do_not_advance_challengers = (
set()
) # successful, but should not be advanced to the next budget/stage
self.fail_challengers = set() # capped/failed configs
self.curr_inst_idx = 0
self.running_challenger = None
|
def _update_stage(self, run_history: RunHistory) -> None:
"""
Update tracking information for a new stage/iteration and update statistics.
This method is called to initialize stage variables and after all configurations
of a successive halving stage are completed.
Parameters
----------
run_history : smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
"""
if not hasattr(self, "stage"):
# initialize all relevant variables for first run
# (this initialization is not a part of init because hyperband uses the same init method and has a )
# to track iteration and stage
self.sh_iters = 0
self.stage = 0
# to track challengers across stages
self.configs_to_run = [] # type: typing.List[Configuration]
self.curr_inst_idx = 0
self.running_challenger = None
self.success_challengers = set() # successful configs
self.do_not_advance_challengers = (
set()
) # configs which are successful, but should not be advanced
self.fail_challengers = set() # capped configs and other failures
self.fail_chal_offset = 0
else:
self.stage += 1
# only uncapped challengers are considered valid for the next iteration
valid_challengers = list(
(self.success_challengers | self.do_not_advance_challengers)
- self.fail_challengers
)
if self.stage < len(self.all_budgets) and len(valid_challengers) > 0:
# if this is the next stage in same iteration,
# use top 'k' from the evaluated configurations for next iteration
# determine 'k' for the next iteration - at least 1
next_n_chal = int(max(1, self.n_configs_in_stage[self.stage]))
# selecting the top 'k' challengers for the next iteration
configs_to_run = self._top_k(
configs=valid_challengers, run_history=run_history, k=next_n_chal
)
self.configs_to_run = [
config
for config in configs_to_run
if config not in self.do_not_advance_challengers
]
# if some runs were capped, top_k returns less than the required configurations
# to handle that, we keep track of how many configurations are missing
# (since they are technically failed here too)
missing_challengers = int(self.n_configs_in_stage[self.stage]) - len(
self.configs_to_run
)
if missing_challengers > 0:
self.fail_chal_offset = missing_challengers
else:
self.fail_chal_offset = 0
if next_n_chal == missing_challengers:
next_stage = True
self.logger.info(
"Successive Halving iteration-step: %d-%d with "
"budget [%.2f / %d] - expected %d new challenger(s), but "
"no configurations propagated to the next budget.",
self.sh_iters + 1,
self.stage + 1,
self.all_budgets[self.stage],
self.max_budget,
self.n_configs_in_stage[self.stage],
)
else:
next_stage = False
else:
next_stage = True
if next_stage:
# update stats for the prev iteration
self.stats.update_average_configs_per_intensify(n_configs=self._chall_indx)
# reset stats for the new iteration
self._ta_time = 0
self._chall_indx = 0
self.num_run = 0
self.iteration_done = True
self.sh_iters += 1
self.stage = 0
self.run_tracker = []
self.configs_to_run = []
self.fail_chal_offset = 0
# randomize instance-seed pairs per successive halving run, if user specifies
if self.instance_order == "shuffle":
self.rs.shuffle(self.inst_seed_pairs)
# to track configurations for the next stage
self.success_challengers = set() # successful configs
self.do_not_advance_challengers = (
set()
) # successful, but should not be advanced to the next budget/stage
self.fail_challengers = set() # capped/failed configs
self.curr_inst_idx = 0
self.running_challenger = None
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def _top_k(
self, configs: typing.List[Configuration], run_history: RunHistory, k: int
) -> typing.List[Configuration]:
"""
Selects the top 'k' configurations from the given list based on their performance.
This retrieves the performance for each configuration from the runhistory and checks
that the highest budget they've been evaluated on is the same for each of the configurations.
Parameters
----------
configs: typing.List[Configuration]
list of configurations to filter from
run_history: smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
k: int
number of configurations to select
Returns
-------
typing.List[Configuration]
top challenger configurations, sorted in increasing costs
"""
# extracting costs for each given configuration
config_costs = {}
# sample list instance-seed-budget key to act as base
run_key = run_history.get_runs_for_config(configs[0], only_max_observed_budget=True)
for c in configs:
# ensuring that all configurations being compared are run on the same set of instance, seed & budget
cur_run_key = run_history.get_runs_for_config(c, only_max_observed_budget=True)
# Move to compare set -- get_runs_for_config queries form a dictionary
# which is not an ordered structure. Some queries to that dictionary returned unordered
# list which wrongly trigger the below if
if set(cur_run_key) != set(run_key):
raise ValueError(
"Cannot compare configs that were run on different instances-seeds-budgets: %s vs %s"
% (run_key, cur_run_key)
)
config_costs[c] = run_history.get_cost(c)
configs_sorted = [
k for k, v in sorted(config_costs.items(), key=lambda item: item[1])
]
# select top configurations only
top_configs = configs_sorted[:k]
return top_configs
|
def _top_k(
self, configs: typing.List[Configuration], run_history: RunHistory, k: int
) -> typing.List[Configuration]:
"""
Selects the top 'k' configurations from the given list based on their performance.
This retrieves the performance for each configuration from the runhistory and checks
that the highest budget they've been evaluated on is the same for each of the configurations.
Parameters
----------
configs: typing.List[Configuration]
list of configurations to filter from
run_history: smac.runhistory.runhistory.RunHistory
stores all runs we ran so far
k: int
number of configurations to select
Returns
-------
typing.List[Configuration]
top challenger configurations, sorted in increasing costs
"""
# extracting costs for each given configuration
config_costs = {}
# sample list instance-seed-budget key to act as base
run_key = run_history.get_runs_for_config(configs[0], only_max_observed_budget=True)
for c in configs:
# ensuring that all configurations being compared are run on the same set of instance, seed & budget
cur_run_key = run_history.get_runs_for_config(c, only_max_observed_budget=True)
if cur_run_key != run_key:
raise ValueError(
"Cannot compare configs that were run on different instances-seeds-budgets: %s vs %s"
% (run_key, cur_run_key)
)
config_costs[c] = run_history.get_cost(c)
configs_sorted = sorted(config_costs, key=config_costs.get)
# select top configurations only
top_configs = configs_sorted[:k]
return top_configs
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def _launched_all_configs_for_current_stage(self, run_history: RunHistory) -> bool:
"""
This procedure queries if the addition of currently finished configs
and running configs are sufficient for the current stage.
If more configs are needed, it will return False.
Parameters
----------
run_history : RunHistory
stores all runs we ran so far
Returns
-------
bool: Whether or not to launch more configurations/instances/seed pairs
"""
# selecting instance-seed subset for this budget, depending on the kind of budget
curr_budget = self.all_budgets[self.stage]
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
curr_insts = self.inst_seed_pairs[int(prev_budget) : int(curr_budget)]
else:
curr_insts = self.inst_seed_pairs
# _count_running_instances_for_challenger will count the running instances
# of the last challenger. It makes sense here, because we assume that if we
# moved to a new challenger, all instances have been launched for a previous
# challenger
running_instances = self._count_running_instances_for_challenger(run_history)
n_insts_remaining = len(curr_insts) - (self.curr_inst_idx + running_instances)
# Check which of the current configs is running
my_configs = [c for c, i, s, b in self.run_tracker]
running_configs = set()
tracked_configs = self.success_challengers.union(self.fail_challengers).union(
self.do_not_advance_challengers
)
for k, v in run_history.data.items():
# Our goal here is to account for number of challengers available
# We care if the challenger is running only if is is not tracked in
# success/fails/do not advance
# In other words, in each SH iteration we have to run N configs on
# M instance/seed pairs. This part of the code makes sure that N different
# configurations are launched (we only move to a new config after M
# instance-seed pairs on that config are launched)
# Notice that this number N of configs tracked in num_chal_available
# is a set of processed configurations + the running challengers
# so we do not want to double count configurations
# n_insts_remaining variable above accounts for the last active configuration only
if run_history.ids_config[k.config_id] in tracked_configs:
continue
if v.status == StatusType.RUNNING:
if run_history.ids_config[k.config_id] in my_configs:
running_configs.add(k.config_id)
# The total number of runs for this stage account for finished configurations
# (success + failed + do not advance) + the offset + running but not finished
# configurations. Also we account for the instances not launched for the
# currently running configuration
num_chal_available = (
len(
self.success_challengers
| self.fail_challengers
| self.do_not_advance_challengers
)
+ self.fail_chal_offset
+ len(running_configs)
)
if (
num_chal_available == self.n_configs_in_stage[self.stage]
and n_insts_remaining <= 0
):
return True
else:
return False
|
def _launched_all_configs_for_current_stage(self, run_history: RunHistory) -> bool:
"""
This procedure queries if the addition of currently finished configs
and running configs are sufficient for the current stage.
If more configs are needed, it will return False.
Parameters
----------
run_history : RunHistory
stores all runs we ran so far
Returns
-------
bool: Whether or not to launch more configurations/instances/seed pairs
"""
# selecting instance-seed subset for this budget, depending on the kind of budget
curr_budget = self.all_budgets[self.stage]
if self.instance_as_budget:
prev_budget = int(self.all_budgets[self.stage - 1]) if self.stage > 0 else 0
curr_insts = self.inst_seed_pairs[int(prev_budget) : int(curr_budget)]
else:
curr_insts = self.inst_seed_pairs
# _count_running_instances_for_challenger will count the running instances
# of the last challenger. It makes sense here, because we assume that if we
# moved to a new challenger, all instances have been launched for a previous
# challenger
running_instances = self._count_running_instances_for_challenger(run_history)
n_insts_remaining = len(curr_insts) - (self.curr_inst_idx + running_instances)
# Check which of the current configs is running
my_configs = [c for c, i, s in self.run_tracker]
running_configs = set()
for k, v in run_history.data.items():
if v.status == StatusType.RUNNING:
if run_history.ids_config[k.config_id] in my_configs:
running_configs.add(k.config_id)
# The total number of runs for this stage account for finished configurations
# (success + failed + do not advance) + the offset + running but not finished
# configurations. Also we account for the instances not launched for the
# currently running configuration
num_chal_available = (
len(
self.success_challengers
| self.fail_challengers
| self.do_not_advance_challengers
)
+ self.fail_chal_offset
+ len(running_configs)
)
if (
num_chal_available == self.n_configs_in_stage[self.stage]
and n_insts_remaining <= 0
):
return True
else:
return False
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def _get_runs(
self,
configs: Union[str, typing.List[Configuration]],
insts: Union[str, typing.List[str]],
repetitions: int = 1,
runhistory: RunHistory = None,
) -> typing.Tuple[typing.List[_Run], RunHistory]:
"""
Generate list of SMAC-TAE runs to be executed. This means
combinations of configs with all instances on a certain number of seeds.
side effect: Adds runs that don't need to be reevaluated to self.rh!
Parameters
----------
configs: str or list<Configuration>
string or directly a list of Configuration
str from [def, inc, def+inc, wallclock_time, cpu_time, all]
time evaluates at cpu- or wallclock-timesteps of:
[max_time/2^0, max_time/2^1, max_time/2^3, ..., default]
with max_time being the highest recorded time
insts: str or list<str>
what instances to use for validation, either from
[train, test, train+test] or directly a list of instances
repetitions: int
number of seeds per instance/config-pair to be evaluated
runhistory: RunHistory
optional, try to reuse this runhistory and save some runs
Returns
-------
runs: list<_Run>
list with _Runs
[_Run(config=CONFIG1,inst=INSTANCE1,seed=SEED1,inst_specs=INST_SPECIFICS1),
_Run(config=CONFIG2,inst=INSTANCE2,seed=SEED2,inst_specs=INST_SPECIFICS2),
...]
"""
# Get relevant configurations and instances
if isinstance(configs, str):
configs = self._get_configs(configs)
if isinstance(insts, str):
instances = sorted(self._get_instances(insts)) # type: typing.Sequence[typing.Union[str, None]]
elif insts is not None:
instances = sorted(insts)
else:
instances = [None]
# If no instances are given, fix the instances to one "None" instance
if not instances:
instances = [None]
# If algorithm is deterministic, fix repetitions to 1
if self.scen.deterministic and repetitions != 1: # type: ignore[attr-defined] # noqa F821
self.logger.warning(
"Specified %d repetitions, but fixing to 1, "
"because algorithm is deterministic.",
repetitions,
)
repetitions = 1
# Extract relevant information from given runhistory
inst_seed_config = self._process_runhistory(configs, instances, runhistory)
# Now create the actual run-list
runs = []
# Counter for runs without the need of recalculation
runs_from_rh = 0
# If we reuse runs, we want to return them as well
new_rh = RunHistory()
for i in instances:
for rep in range(repetitions):
# First, find a seed and add all the data we can take from the
# given runhistory to "our" validation runhistory.
configs_evaluated = [] # type: Configuration
if runhistory and i in inst_seed_config:
# Choose seed based on most often evaluated inst-seed-pair
seed, configs_evaluated = inst_seed_config[i].pop(0)
# Delete inst if all seeds are used
if not inst_seed_config[i]:
inst_seed_config.pop(i)
# Add runs to runhistory
for c in configs_evaluated[:]:
runkey = RunKey(runhistory.config_ids[c], i, seed)
cost, time, status, start, end, additional_info = runhistory.data[
runkey
]
if status in [
StatusType.CRASHED,
StatusType.ABORT,
StatusType.CAPPED,
]:
# Not properly executed target algorithm runs should be repeated
configs_evaluated.remove(c)
continue
new_rh.add(
c,
cost,
time,
status,
instance_id=i,
seed=seed,
starttime=start,
endtime=end,
additional_info=additional_info,
)
runs_from_rh += 1
else:
# If no runhistory or no entries for instance, get new seed
seed = self.rng.randint(MAXINT)
# We now have a seed and add all configs that are not already
# evaluated on that seed to the runs-list. This way, we
# guarantee the same inst-seed-pairs for all configs.
for config in [c for c in configs if c not in configs_evaluated]:
# Only use specifics if specific exists, else use string "0"
specs = (
self.scen.instance_specific[i]
if i and i in self.scen.instance_specific
else "0"
)
runs.append(_Run(config=config, inst=i, seed=seed, inst_specs=specs))
self.logger.info(
"Collected %d runs from %d configurations on %d "
"instances with %d repetitions. Reusing %d runs from "
"given runhistory.",
len(runs),
len(configs),
len(instances),
repetitions,
runs_from_rh,
)
return runs, new_rh
|
def _get_runs(
self,
configs: Union[str, typing.List[Configuration]],
insts: Union[str, typing.List[str]],
repetitions: int = 1,
runhistory: RunHistory = None,
) -> typing.Tuple[typing.List[_Run], RunHistory]:
"""
Generate list of SMAC-TAE runs to be executed. This means
combinations of configs with all instances on a certain number of seeds.
side effect: Adds runs that don't need to be reevaluated to self.rh!
Parameters
----------
configs: str or list<Configuration>
string or directly a list of Configuration
str from [def, inc, def+inc, wallclock_time, cpu_time, all]
time evaluates at cpu- or wallclock-timesteps of:
[max_time/2^0, max_time/2^1, max_time/2^3, ..., default]
with max_time being the highest recorded time
insts: str or list<str>
what instances to use for validation, either from
[train, test, train+test] or directly a list of instances
repetitions: int
number of seeds per instance/config-pair to be evaluated
runhistory: RunHistory
optional, try to reuse this runhistory and save some runs
Returns
-------
runs: list<_Run>
list with _Runs
[_Run(config=CONFIG1,inst=INSTANCE1,seed=SEED1,inst_specs=INST_SPECIFICS1),
_Run(config=CONFIG2,inst=INSTANCE2,seed=SEED2,inst_specs=INST_SPECIFICS2),
...]
"""
# Get relevant configurations and instances
if isinstance(configs, str):
configs = self._get_configs(configs)
if isinstance(insts, str):
instances = self._get_instances(insts) # type: typing.Sequence[typing.Union[str, None]]
elif insts is not None:
instances = insts
else:
instances = [None]
# If no instances are given, fix the instances to one "None" instance
if not instances:
instances = [None]
# If algorithm is deterministic, fix repetitions to 1
if self.scen.deterministic and repetitions != 1: # type: ignore[attr-defined] # noqa F821
self.logger.warning(
"Specified %d repetitions, but fixing to 1, "
"because algorithm is deterministic.",
repetitions,
)
repetitions = 1
# Extract relevant information from given runhistory
inst_seed_config = self._process_runhistory(configs, instances, runhistory)
# Now create the actual run-list
runs = []
# Counter for runs without the need of recalculation
runs_from_rh = 0
# If we reuse runs, we want to return them as well
new_rh = RunHistory()
for i in sorted(instances):
for rep in range(repetitions):
# First, find a seed and add all the data we can take from the
# given runhistory to "our" validation runhistory.
configs_evaluated = [] # type: Configuration
if runhistory and i in inst_seed_config:
# Choose seed based on most often evaluated inst-seed-pair
seed, configs_evaluated = inst_seed_config[i].pop(0)
# Delete inst if all seeds are used
if not inst_seed_config[i]:
inst_seed_config.pop(i)
# Add runs to runhistory
for c in configs_evaluated[:]:
runkey = RunKey(runhistory.config_ids[c], i, seed)
cost, time, status, start, end, additional_info = runhistory.data[
runkey
]
if status in [
StatusType.CRASHED,
StatusType.ABORT,
StatusType.CAPPED,
]:
# Not properly executed target algorithm runs should be repeated
configs_evaluated.remove(c)
continue
new_rh.add(
c,
cost,
time,
status,
instance_id=i,
seed=seed,
starttime=start,
endtime=end,
additional_info=additional_info,
)
runs_from_rh += 1
else:
# If no runhistory or no entries for instance, get new seed
seed = self.rng.randint(MAXINT)
# We now have a seed and add all configs that are not already
# evaluated on that seed to the runs-list. This way, we
# guarantee the same inst-seed-pairs for all configs.
for config in [c for c in configs if c not in configs_evaluated]:
# Only use specifics if specific exists, else use string "0"
specs = (
self.scen.instance_specific[i]
if i and i in self.scen.instance_specific
else "0"
)
runs.append(_Run(config=config, inst=i, seed=seed, inst_specs=specs))
self.logger.info(
"Collected %d runs from %d configurations on %d "
"instances with %d repetitions. Reusing %d runs from "
"given runhistory.",
len(runs),
len(configs),
len(instances),
repetitions,
runs_from_rh,
)
return runs, new_rh
|
https://github.com/automl/SMAC3/issues/695
|
INFO:smac.facade.smac_bohb_facade.BOHB4HPO:Estimated cost of incumbent: 2147483647.000000
Traceback (most recent call last):
File "BOHB4HPO_sgd_instances.py", line 79, in <module>
main()
File "BOHB4HPO_sgd_instances.py", line 75, in main
incumbent = smac.optimize()
File "/home/eggenspk/Work/git/SMAC3/smac/facade/smac_ac_facade.py", line 597, in optimize
incumbent = self.solver.run()
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 278, in run
self._incorporate_run_results(run_info, result, time_left)
File "/home/eggenspk/Work/git/SMAC3/smac/optimizer/smbo.py", line 481, in _incorporate_run_results
result=result,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/parallel_scheduling.py", line 256, in process_results
log_traj=log_traj,
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/hyperband.py", line 169, in process_results
log_traj=log_traj)
File "/home/eggenspk/Work/git/SMAC3/smac/intensification/successive_halving.py", line 340, in process_results
curr_budget = self.all_budgets[self.stage]
AttributeError: '_SuccessiveHalving' object has no attribute 'stage'
|
AttributeError
|
def _transform_arguments(self):
self.n_features = len(self.feature_dict)
self.feature_array = None
if self.overall_obj[:3] in ["PAR", "par"]:
par_str = self.overall_obj[3:]
elif self.overall_obj[:4] in ["mean", "MEAN"]:
par_str = self.overall_obj[4:]
# Check for par-value as in "par10"/ "mean5"
if len(par_str) > 0:
self.par_factor = int(par_str)
else:
self.logger.debug("No par-factor detected. Using 1 by default.")
self.par_factor = 1
# read instance files
if self.train_inst_fn:
if os.path.isfile(self.train_inst_fn):
self.train_insts = self.in_reader.read_instance_file(self.train_inst_fn)
else:
self.logger.error("Have not found instance file: %s" % (self.train_inst_fn))
sys.exit(1)
if self.test_inst_fn:
if os.path.isfile(self.test_inst_fn):
self.test_insts = self.in_reader.read_instance_file(self.test_inst_fn)
else:
self.logger.error(
"Have not found test instance file: %s" % (self.test_inst_fn)
)
sys.exit(1)
self.instance_specific = {}
def extract_instance_specific(instance_list):
insts = []
for inst in instance_list:
if len(inst) > 1:
self.instance_specific[inst[0]] = " ".join(inst[1:])
insts.append(inst[0])
return insts
self.train_insts = extract_instance_specific(self.train_insts)
if self.test_insts:
self.test_insts = extract_instance_specific(self.test_insts)
# read feature file
if self.feature_fn:
if os.path.isfile(self.feature_fn):
self.feature_dict = self.in_reader.read_instance_features_file(
self.feature_fn
)[1]
if self.feature_dict:
self.feature_array = []
for inst_ in self.train_insts:
self.feature_array.append(self.feature_dict[inst_])
self.feature_array = numpy.array(self.feature_array)
self.n_features = self.feature_array.shape[1]
# reduce dimensionality of features of larger than PCA_DIM
if self.feature_array.shape[1] > self.PCA_DIM:
X = self.feature_array
# scale features
X = MinMaxScaler().fit_transform(X)
X = numpy.nan_to_num(X) # if features with max == min
# PCA
pca = PCA(n_components=self.PCA_DIM)
self.feature_array = pca.fit_transform(X)
self.n_features = self.feature_array.shape[1]
# update feature dictionary
for feat, inst_ in zip(self.feature_array, self.train_insts):
self.feature_dict[inst_] = feat
# read pcs file
if self.pcs_fn and os.path.isfile(self.pcs_fn):
with open(self.pcs_fn) as fp:
pcs_str = fp.readlines()
self.cs = pcs.read(pcs_str)
self.cs.seed(42)
elif self.pcs_fn:
self.logger.error("Have not found pcs file: %s" % (self.pcs_fn))
sys.exit(1)
# you cannot set output dir to None directly
# because None is replaced by default always
if self.output_dir == "":
self.output_dir = None
self.logger.debug("Deactivate output directory.")
else:
self.logger.info("Output to %s" % (self.output_dir))
|
def _transform_arguments(self):
self.n_features = len(self.feature_dict)
self.feature_array = None
if self.overall_obj[:3] in ["PAR", "par"]:
self.par_factor = int(self.overall_obj[3:])
elif self.overall_obj[:4] in ["mean", "MEAN"]:
self.par_factor = int(self.overall_obj[4:])
else:
self.par_factor = 1
# read instance files
if self.train_inst_fn:
if os.path.isfile(self.train_inst_fn):
self.train_insts = self.in_reader.read_instance_file(self.train_inst_fn)
else:
self.logger.error("Have not found instance file: %s" % (self.train_inst_fn))
sys.exit(1)
if self.test_inst_fn:
if os.path.isfile(self.test_inst_fn):
self.test_insts = self.in_reader.read_instance_file(self.test_inst_fn)
else:
self.logger.error(
"Have not found test instance file: %s" % (self.test_inst_fn)
)
sys.exit(1)
self.instance_specific = {}
def extract_instance_specific(instance_list):
insts = []
for inst in instance_list:
if len(inst) > 1:
self.instance_specific[inst[0]] = " ".join(inst[1:])
insts.append(inst[0])
return insts
self.train_insts = extract_instance_specific(self.train_insts)
if self.test_insts:
self.test_insts = extract_instance_specific(self.test_insts)
# read feature file
if self.feature_fn:
if os.path.isfile(self.feature_fn):
self.feature_dict = self.in_reader.read_instance_features_file(
self.feature_fn
)[1]
if self.feature_dict:
self.feature_array = []
for inst_ in self.train_insts:
self.feature_array.append(self.feature_dict[inst_])
self.feature_array = numpy.array(self.feature_array)
self.n_features = self.feature_array.shape[1]
# reduce dimensionality of features of larger than PCA_DIM
if self.feature_array.shape[1] > self.PCA_DIM:
X = self.feature_array
# scale features
X = MinMaxScaler().fit_transform(X)
X = numpy.nan_to_num(X) # if features with max == min
# PCA
pca = PCA(n_components=self.PCA_DIM)
self.feature_array = pca.fit_transform(X)
self.n_features = self.feature_array.shape[1]
# update feature dictionary
for feat, inst_ in zip(self.feature_array, self.train_insts):
self.feature_dict[inst_] = feat
# read pcs file
if self.pcs_fn and os.path.isfile(self.pcs_fn):
with open(self.pcs_fn) as fp:
pcs_str = fp.readlines()
self.cs = pcs.read(pcs_str)
self.cs.seed(42)
elif self.pcs_fn:
self.logger.error("Have not found pcs file: %s" % (self.pcs_fn))
sys.exit(1)
# you cannot set output dir to None directly
# because None is replaced by default always
if self.output_dir == "":
self.output_dir = None
self.logger.debug("Deactivate output directory.")
else:
self.logger.info("Output to %s" % (self.output_dir))
|
https://github.com/automl/SMAC3/issues/165
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 48, in main_cli
scen = Scenario(args_.scenario_file, misc_args)
File "/home/lindauer/git/SMAC3/smac/scenario/scenario.py", line 94, in __init__
self._transform_arguments()
File "/home/lindauer/git/SMAC3/smac/scenario/scenario.py", line 265, in _transform_arguments
self.par_factor = int(self.overall_obj[4:])
ValueError: invalid literal for int() with base 10: ''
|
ValueError
|
def run(
self,
config,
instance=None,
cutoff=None,
memory_limit=None,
seed=12345,
instance_specific="0",
):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : str
problem instance
cutoff : int, optional
Wallclock time limit of the target algorithm. If no value is
provided no limit will be enforced.
memory_limit : int, optional
Memory limit in MB enforced on the target algorithm If no
value is provided no limit will be enforced.
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
arguments = {
"logger": logging.getLogger("pynisher"),
"wall_time_in_s": cutoff,
"mem_in_mb": memory_limit,
}
obj = pynisher.enforce_limits(**arguments)(self.ta)
if instance:
rval = obj(config, instance, seed)
else:
rval = obj(config, seed)
if isinstance(rval, tuple):
result = rval[0]
additional_run_info = rval[1]
else:
result = rval
additional_run_info = {}
if obj.exit_status is pynisher.TimeoutException:
status = StatusType.TIMEOUT
cost = 1234567890
elif obj.exit_status is pynisher.MemorylimitException:
status = StatusType.MEMOUT
cost = 1234567890
elif obj.exit_status == 0 and result is not None:
status = StatusType.SUCCESS
cost = result
else:
status = StatusType.CRASHED
cost = 1234567890 # won't be used for the model
runtime = float(obj.wall_clock_time)
return status, cost, runtime, additional_run_info
|
def run(
self,
config,
instance=None,
cutoff=None,
memory_limit=None,
seed=12345,
instance_specific="0",
):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : str
problem instance
cutoff : int, optional
Wallclock time limit of the target algorithm. If no value is
provided no limit will be enforced.
memory_limit : int, optional
Memory limit in MB enforced on the target algorithm If no
value is provided no limit will be enforced.
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
arguments = {
"logger": logging.getLogger("pynisher"),
"wall_time_in_s": cutoff,
"mem_in_mb": memory_limit,
}
obj = pynisher.enforce_limits(**arguments)(self.func)
if instance:
rval = obj(config, instance, seed)
else:
rval = obj(config, seed)
if isinstance(rval, tuple):
result = rval[0]
additional_run_info = rval[1]
else:
result = rval
additional_run_info = {}
if obj.exit_status is pynisher.TimeoutException:
status = StatusType.TIMEOUT
cost = 1234567890
elif obj.exit_status is pynisher.MemorylimitException:
status = StatusType.MEMOUT
cost = 1234567890
elif obj.exit_status == 0 and result is not None:
status = StatusType.SUCCESS
cost = result
else:
status = StatusType.CRASHED
cost = 1234567890 # won't be used for the model
runtime = float(obj.wall_clock_time)
if self.run_obj == "runtime":
if status != StatusType.SUCCESS:
cost = cutoff * self.par_factor
else:
cost = runtime
return status, cost, runtime, additional_run_info
|
https://github.com/automl/SMAC3/issues/56
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 47, in main_cli
smbo.run(max_iters=args_.max_iterations)
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 282, in run
self.incumbent = self.run_initial_design()
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 239, in run_initial_design
instance_specific=self.scenario.instance_specific.get(rand_inst, "0"))
File "/home/lindauer/git/SMAC3/smac/tae/execute_ta_run.py", line 103, in start
if self._supports_memory_limit is True:
AttributeError: 'ExecuteTARunOld' object has no attribute '_supports_memory_limit'
|
AttributeError
|
def __init__(self, ta, stats, run_obj="runtime", par_factor=1):
"""
Constructor
Parameters
----------
ta : list
target algorithm command line as list of arguments
stats: Stats()
stats object to collect statistics about runtime and so on
run_obj: str
run objective of SMAC
"""
self.ta = ta
self.stats = stats
self.run_obj = run_obj
self.par_factor = par_factor
self.logger = logging.getLogger(self.__class__.__name__)
self._supports_memory_limit = False
|
def __init__(self, ta, stats, run_obj="runtime"):
"""
Constructor
Parameters
----------
ta : list
target algorithm command line as list of arguments
stats: Stats()
stats object to collect statistics about runtime and so on
run_obj: str
run objective of SMAC
"""
self.ta = ta
self.stats = stats
self.logger = logging.getLogger("ExecuteTARun")
self._supports_memory_limit = False
|
https://github.com/automl/SMAC3/issues/56
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 47, in main_cli
smbo.run(max_iters=args_.max_iterations)
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 282, in run
self.incumbent = self.run_initial_design()
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 239, in run_initial_design
instance_specific=self.scenario.instance_specific.get(rand_inst, "0"))
File "/home/lindauer/git/SMAC3/smac/tae/execute_ta_run.py", line 103, in start
if self._supports_memory_limit is True:
AttributeError: 'ExecuteTARunOld' object has no attribute '_supports_memory_limit'
|
AttributeError
|
def start(
self,
config,
instance,
cutoff=None,
memory_limit=None,
seed=12345,
instance_specific="0",
):
"""
wrapper function for ExecuteTARun.run() to check configuration budget before the runs
and to update stats after run
Parameters
----------
config : dictionary
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if self.stats.is_budget_exhausted():
self.logger.debug(
"Skip target algorithm run due to exhausted configuration budget"
)
return StatusType.ABORT, np.nan, 0, {"misc": "exhausted bugdet -- ABORT"}
if cutoff is not None:
cutoff = int(math.ceil(cutoff))
if memory_limit is not None:
memory_limit = int(math.ceil(memory_limit))
additional_arguments = {}
if self._supports_memory_limit is True:
additional_arguments["memory_limit"] = memory_limit
elif self._supports_memory_limit is False and memory_limit is not None:
raise ValueError(
"Target algorithm executor %s does not support "
"restricting the memory usage." % self.__class__.__name__
)
status, cost, runtime, additional_info = self.run(
config=config,
instance=instance,
cutoff=cutoff,
seed=seed,
instance_specific=instance_specific,
**additional_arguments,
)
# update SMAC stats
self.stats.ta_runs += 1
self.stats.ta_time_used += float(runtime)
if self.run_obj == "runtime":
if status != StatusType.SUCCESS:
cost = cutoff * self.par_factor
else:
cost = runtime
self.logger.debug(
"Return: Status: %d, cost: %f, time. %f, additional: %s"
% (status, cost, runtime, str(additional_info))
)
return status, cost, runtime, additional_info
|
def start(
self,
config,
instance,
cutoff=None,
memory_limit=None,
seed=12345,
instance_specific="0",
):
"""
wrapper function for ExecuteTARun.run() to check configuration budget before the runs
and to update stats after run
Parameters
----------
config : dictionary
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if self.stats.is_budget_exhausted():
self.logger.debug(
"Skip target algorithm run due to exhausted configuration budget"
)
return StatusType.ABORT, np.nan, 0, {"misc": "exhausted bugdet -- ABORT"}
if cutoff is not None:
cutoff = int(math.ceil(cutoff))
if memory_limit is not None:
memory_limit = int(math.ceil(memory_limit))
additional_arguments = {}
if self._supports_memory_limit is True:
additional_arguments["memory_limit"] = memory_limit
else:
raise ValueError(
"Target algorithm executor %s does not support "
"restricting the memory usage." % self.__class__.__name__
)
status, cost, runtime, additional_info = self.run(
config=config,
instance=instance,
cutoff=cutoff,
seed=seed,
instance_specific=instance_specific,
**additional_arguments,
)
# update SMAC stats
self.stats.ta_runs += 1
self.stats.ta_time_used += float(runtime)
self.logger.debug(
"Return: Status: %d, cost: %f, time. %f, additional: %s"
% (status, cost, runtime, str(additional_info))
)
return status, cost, runtime, additional_info
|
https://github.com/automl/SMAC3/issues/56
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 47, in main_cli
smbo.run(max_iters=args_.max_iterations)
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 282, in run
self.incumbent = self.run_initial_design()
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 239, in run_initial_design
instance_specific=self.scenario.instance_specific.get(rand_inst, "0"))
File "/home/lindauer/git/SMAC3/smac/tae/execute_ta_run.py", line 103, in start
if self._supports_memory_limit is True:
AttributeError: 'ExecuteTARunOld' object has no attribute '_supports_memory_limit'
|
AttributeError
|
def run(self, config, instance=None, cutoff=None, seed=12345, instance_specific="0"):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information -- ignored here
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if instance is None:
instance = "0"
if cutoff is None:
cutoff = 99999999999999
# TOOD: maybe replace fixed instance specific and cutoff_length (0) to
# other value
cmd = []
cmd.extend(self.ta)
cmd.extend(
[
"--instance",
instance,
"--cutoff",
str(cutoff),
"--seed",
str(seed),
"--config",
]
)
for p in config:
if not config[p] is None:
cmd.extend(["-" + str(p), str(config[p])])
self.logger.debug("Calling: %s" % (" ".join(cmd)))
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, universal_newlines=True)
stdout_, stderr_ = p.communicate()
self.logger.debug("Stdout: %s" % (stdout_))
self.logger.debug("Stderr: %s" % (stderr_))
results = {"status": "CRASHED", "cost": 1234567890}
for line in stdout_.split("\n"):
if line.startswith("Result of this algorithm run:"):
fields = ":".join(line.split(":")[1:])
results = json.loads(fields)
if results["status"] in ["SAT", "UNSAT", "SUCCESS"]:
status = StatusType.SUCCESS
elif results["status"] in ["TIMEOUT"]:
status = StatusType.TIMEOUT
elif results["status"] in ["CRASHED"]:
status = StatusType.CRASHED
elif results["status"] in ["ABORT"]:
status = StatusType.ABORT
self.logger.error("Target algorithm returned ABORT -- Exit!")
sys.exit(43)
elif results["status"] in ["MEMOUT"]:
status = StatusType.MEMOUT
if status in [StatusType.CRASHED, StatusType.ABORT]:
self.logger.warn("Target algorithm crashed. Last 5 lines of stdout and stderr")
self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
self.logger.warn("\n".join(stderr_.split("\n")[-5:]))
if results.get("runtime") is None:
self.logger.warn(
"The target algorithm has not returned a runtime -- imputed by 0."
)
# (TODO) Check 0
results["runtime"] = 0
runtime = float(results["runtime"])
if self.run_obj == "quality" and results.get("cost") is None:
self.logger.error(
"The target algorithm has not returned a quality/cost value"
+ "although we optimize cost."
)
# (TODO) Do not return 0
results["cost"] = 0
if self.run_obj == "runtime":
cost = float(results["runtime"])
else:
cost = float(results["cost"])
del results["status"]
try:
del results["runtime"]
except KeyError:
pass
try:
del results["cost"]
except KeyError:
pass
return status, cost, runtime, results
|
def run(self, config, instance=None, cutoff=None, seed=12345, instance_specific="0"):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information -- ignored here
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if instance is None:
instance = "0"
if cutoff is None:
cutoff = 99999999999999
# TOOD: maybe replace fixed instance specific and cutoff_length (0) to
# other value
cmd = []
cmd.extend(self.ta)
cmd.extend(
[
"--instance",
instance,
"--cutoff",
str(cutoff),
"--seed",
str(seed),
"--config",
]
)
for p in config:
if not config[p] is None:
cmd.extend(["-" + str(p), str(config[p])])
self.logger.debug("Calling: %s" % (" ".join(cmd)))
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, universal_newlines=True)
stdout_, stderr_ = p.communicate()
self.logger.debug("Stdout: %s" % (stdout_))
self.logger.debug("Stderr: %s" % (stderr_))
results = {"status": "CRASHED", "cost": 1234567890}
for line in stdout_.split("\n"):
if line.startswith("Result of this algorithm run:"):
fields = ":".join(line.split(":")[1:])
results = json.loads(fields)
if results["status"] in ["SAT", "UNSAT", "SUCCESS"]:
status = StatusType.SUCCESS
elif results["status"] in ["TIMEOUT"]:
status = StatusType.TIMEOUT
elif results["status"] in ["CRASHED"]:
status = StatusType.CRASHED
elif results["status"] in ["ABORT"]:
status = StatusType.ABORT
self.logger.error("Target algorithm returned ABORT -- Exit!")
sys.exit(43)
elif results["status"] in ["MEMOUT"]:
status = StatusType.MEMOUT
if status in [StatusType.CRASHED, StatusType.ABORT]:
self.logger.warn("Target algorithm crashed. Last 5 lines of stdout and stderr")
self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
self.logger.warn("\n".join(stderr_.split("\n")[-5:]))
if results.get("runtime") is None:
self.logger.warn(
"The target algorithm has not returned a runtime -- imputed by 0."
)
# (TODO) Check 0
results["runtime"] = 0
runtime = float(results["runtime"])
if self.run_obj == "quality" and results.get("cost") is None:
self.logger.error(
"The target algorithm has not returned a quality/cost value"
+ "although we optimize cost."
)
# (TODO) Do not return 0
results["cost"] = 0
if self.run_obj == "runtime":
if status != StatusType.SUCCESS:
cost = float(results["runtime"]) * self.par_factor
else:
cost = float(results["runtime"])
else:
cost = float(results["cost"])
del results["status"]
try:
del results["runtime"]
except KeyError:
pass
try:
del results["cost"]
except KeyError:
pass
return status, cost, runtime, results
|
https://github.com/automl/SMAC3/issues/56
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 47, in main_cli
smbo.run(max_iters=args_.max_iterations)
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 282, in run
self.incumbent = self.run_initial_design()
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 239, in run_initial_design
instance_specific=self.scenario.instance_specific.get(rand_inst, "0"))
File "/home/lindauer/git/SMAC3/smac/tae/execute_ta_run.py", line 103, in start
if self._supports_memory_limit is True:
AttributeError: 'ExecuteTARunOld' object has no attribute '_supports_memory_limit'
|
AttributeError
|
def run(self, config, instance=None, cutoff=None, seed=12345, instance_specific="0"):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if instance is None:
instance = "0"
if cutoff is None:
cutoff = 99999999999999.0
# TOOD: maybe replace fixed instance specific and cutoff_length (0) to
# other value
cmd = []
cmd.extend(self.ta)
cmd.extend([instance, instance_specific, str(cutoff), "0", str(seed)])
for p in config:
if not config[p] is None:
cmd.extend(["-" + str(p), str(config[p])])
self.logger.debug("Calling: %s" % (" ".join(cmd)))
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, universal_newlines=True)
stdout_, stderr_ = p.communicate()
self.logger.debug("Stdout: %s" % (stdout_))
self.logger.debug("Stderr: %s" % (stderr_))
status = "CRASHED"
quality = 1234567890
runtime = 1234567890
additional_info = {}
for line in stdout_.split("\n"):
if (
line.startswith("Result of this algorithm run:")
or line.startswith("Result for ParamILS")
or line.startswith("Result for SMAC")
):
fields = line.split(":")[1].split(",")
fields = list(map(lambda x: x.strip(" "), fields))
if len(fields) == 5:
status, runtime, runlength, quality, seed = fields
additional_info = {}
else:
status, runtime, runlength, quality, seed, additional_info = fields
additional_info = {"additional_info": additional_info}
runtime = min(float(runtime), cutoff)
quality = float(quality)
seed = int(seed)
if status in ["SAT", "UNSAT", "SUCCESS"]:
status = StatusType.SUCCESS
elif status in ["TIMEOUT"]:
status = StatusType.TIMEOUT
elif status in ["CRASHED"]:
status = StatusType.CRASHED
elif status in ["ABORT"]:
status = StatusType.ABORT
self.logger.error("Target algorithm returned ABORT -- Exit!")
sys.exit(43)
elif status in ["MEMOUT"]:
status = StatusType.MEMOUT
if status in [StatusType.CRASHED, StatusType.ABORT]:
self.logger.warn("Target algorithm crashed. Last 5 lines of stdout and stderr")
self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
self.logger.warn("\n".join(stderr_.split("\n")[-5:]))
if self.run_obj == "runtime":
cost = runtime
else:
cost = quality
return status, cost, float(runtime), additional_info
|
def run(self, config, instance=None, cutoff=None, seed=12345, instance_specific="0"):
"""
runs target algorithm <self.ta> with configuration <config> on
instance <instance> with instance specifics <specifics>
for at most <cutoff> seconds and random seed <seed>
Parameters
----------
config : dictionary (or similar)
dictionary param -> value
instance : string
problem instance
cutoff : double
runtime cutoff
seed : int
random seed
instance_specific: str
instance specific information (e.g., domain file or solution)
Returns
-------
status: enum of StatusType (int)
{SUCCESS, TIMEOUT, CRASHED, ABORT}
cost: float
cost/regret/quality/runtime (float) (None, if not returned by TA)
runtime: float
runtime (None if not returned by TA)
additional_info: dict
all further additional run information
"""
if instance is None:
instance = "0"
if cutoff is None:
cutoff = 99999999999999.0
# TOOD: maybe replace fixed instance specific and cutoff_length (0) to
# other value
cmd = []
cmd.extend(self.ta)
cmd.extend([instance, instance_specific, str(cutoff), "0", str(seed)])
for p in config:
if not config[p] is None:
cmd.extend(["-" + str(p), str(config[p])])
self.logger.debug("Calling: %s" % (" ".join(cmd)))
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, universal_newlines=True)
stdout_, stderr_ = p.communicate()
self.logger.debug("Stdout: %s" % (stdout_))
self.logger.debug("Stderr: %s" % (stderr_))
status = "CRASHED"
quality = 1234567890
runtime = 1234567890
additional_info = {}
for line in stdout_.split("\n"):
if (
line.startswith("Result of this algorithm run:")
or line.startswith("Result for ParamILS")
or line.startswith("Result for SMAC")
):
fields = line.split(":")[1].split(",")
fields = list(map(lambda x: x.strip(" "), fields))
if len(fields) == 5:
status, runtime, runlength, quality, seed = fields
additional_info = {}
else:
status, runtime, runlength, quality, seed, additional_info = fields
additional_info = {"additional_info": additional_info}
runtime = min(float(runtime), cutoff)
quality = float(quality)
seed = int(seed)
if status in ["SAT", "UNSAT", "SUCCESS"]:
status = StatusType.SUCCESS
elif status in ["TIMEOUT"]:
status = StatusType.TIMEOUT
elif status in ["CRASHED"]:
status = StatusType.CRASHED
elif status in ["ABORT"]:
status = StatusType.ABORT
self.logger.error("Target algorithm returned ABORT -- Exit!")
sys.exit(43)
elif status in ["MEMOUT"]:
status = StatusType.MEMOUT
if status in [StatusType.CRASHED, StatusType.ABORT]:
self.logger.warn("Target algorithm crashed. Last 5 lines of stdout and stderr")
self.logger.warn("\n".join(stdout_.split("\n")[-5:]))
self.logger.warn("\n".join(stderr_.split("\n")[-5:]))
if self.run_obj == "runtime":
if status != StatusType.SUCCESS:
cost = runtime * self.par_factor
else:
cost = runtime
else:
cost = quality
return status, cost, float(runtime), additional_info
|
https://github.com/automl/SMAC3/issues/56
|
Traceback (most recent call last):
File "/home/lindauer/git/SMAC3/scripts/smac", line 20, in <module>
smac.main_cli()
File "/home/lindauer/git/SMAC3/smac/smac_cli.py", line 47, in main_cli
smbo.run(max_iters=args_.max_iterations)
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 282, in run
self.incumbent = self.run_initial_design()
File "/home/lindauer/git/SMAC3/smac/smbo/smbo.py", line 239, in run_initial_design
instance_specific=self.scenario.instance_specific.get(rand_inst, "0"))
File "/home/lindauer/git/SMAC3/smac/tae/execute_ta_run.py", line 103, in start
if self._supports_memory_limit is True:
AttributeError: 'ExecuteTARunOld' object has no attribute '_supports_memory_limit'
|
AttributeError
|
def add(
self, config, cost, time, status, instance_id=None, seed=None, additional_info=None
):
"""
adds a data of a new target algorithm (TA) run;
it will update data if the same key values are used
(config, instance_id, seed)
Attributes
----------
config : dict (or other type -- depending on config space module)
parameter configuratoin
cost: float
cost of TA run (will be minimized)
time: float
runtime of TA run
status: str
status in {SUCCESS, TIMEOUT, CRASHED, ABORT, MEMOUT}
instance_id: str
str representing an instance (default: None)
seed: int
random seed used by TA (default: None)
additional_info: dict
additional run infos (could include further returned
information from TA or fields such as start time and host_id)
"""
config_id = self.config_ids.get(config)
if config_id is None:
self._n_id += 1
self.config_ids[config] = self._n_id
config_id = self.config_ids.get(config)
self.ids_config[self._n_id] = config
k = self.RunKey(config_id, instance_id, seed)
v = self.RunValue(cost, time, status, additional_info)
self.data[k] = v
|
def add(
self, config, cost, time, status, instance_id=None, seed=None, additional_info=None
):
"""
adds a data of a new target algorithm (TA) run;
it will update data if the same key values are used
(config, instance_id, seed)
Attributes
----------
config : dict (or other type -- depending on config space module)
parameter configuratoin
cost: float
cost of TA run (will be minimized)
time: float
runtime of TA run
status: str
status in {SUCCESS, TIMEOUT, CRASHED, ABORT, MEMOUT}
instance_id: str
str representing an instance (default: None)
seed: int
random seed used by TA (default: None)
additional_info: dict
additional run infos (could include further returned
information from TA or fields such as start time and host_id)
"""
# TODO: replace str casting of config when we have something hashable
# as a config object
# TODO JTS: We might have to execute one config multiple times
# since the results can be noisy and then we can't simply
# overwrite the old config result here!
config_id = self.config_ids.get(config.__repr__())
if config_id is None:
self._n_id += 1
self.config_ids[config.__repr__()] = self._n_id
config_id = self.config_ids.get(config.__repr__())
self.ids_config[self._n_id] = config
k = self.RunKey(config_id, instance_id, seed)
v = self.RunValue(cost, time, status, additional_info)
self.data[k] = v
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def update_cost(self, config, cost):
config_id = self.config_ids[config]
self.cost_per_config[config_id] = cost
|
def update_cost(self, config, cost):
config_id = self.config_ids[config.__repr__()]
self.cost_per_config[config_id] = cost
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def get_cost(self, config):
config_id = self.config_ids[config]
return self.cost_per_config[config_id]
|
def get_cost(self, config):
config_id = self.config_ids[config.__repr__()]
return self.cost_per_config[config_id]
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def save_json(self, fn="runhistory.json"):
"""
saves runhistory on disk
Parameters
----------
fn : str
file name
"""
configs = {id_: conf.get_dictionary() for id_, conf in self.ids_config.items()}
data = [
(
[
int(k.config_id),
str(k.instance_id) if k.instance_id is not None else None,
int(k.seed),
],
list(v),
)
for k, v in self.data.items()
]
with open(fn, "w") as fp:
json.dump({"data": data, "configs": configs}, fp)
|
def save_json(self, fn="runhistory.json"):
"""
saves runhistory on disk
Parameters
----------
fn : str
file name
"""
id_vec = dict(
[(id_, conf.get_array().tolist()) for id_, conf in self.ids_config.items()]
)
data = [
(
[
int(k.config_id),
str(k.instance_id) if k.instance_id is not None else None,
int(k.seed),
],
list(v),
)
for k, v in self.data.items()
]
with open(fn, "w") as fp:
json.dump({"data": data, "id_config": id_vec}, fp)
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def load_json(self, fn, cs):
"""Load and runhistory in json representation from disk.
Overwrites current runthistory!
Parameters
----------
fn : str
file name to load from
cs : ConfigSpace
instance of configuration space
"""
with open(fn) as fp:
all_data = json.load(fp)
self.ids_config = {
int(id_): Configuration(cs, values=values)
for id_, values in all_data["configs"].items()
}
self.config_ids = {
Configuration(cs, values=values): int(id_)
for id_, values in all_data["configs"].items()
}
self._n_id = len(self.config_ids)
self.data = {
self.RunKey(int(k[0]), k[1], int(k[2])): self.RunValue(
float(v[0]), float(v[1]), v[2], v[3]
)
for k, v in all_data["data"]
}
|
def load_json(self, fn, cs):
"""Load and runhistory in json representation from disk.
Overwrites current runthistory!
Parameters
----------
fn : str
file name to load from
cs : ConfigSpace
instance of configuration space
"""
with open(fn) as fp:
all_data = json.load(fp)
self.ids_config = dict(
[
(int(id_), Configuration(cs, vector=numpy.array(vec)))
for id_, vec in all_data["id_config"].items()
]
)
self.config_ids = dict(
[
(Configuration(cs, vector=numpy.array(vec)).__repr__(), id_)
for id_, vec in all_data["id_config"].items()
]
)
self._n_id = len(self.config_ids)
self.data = dict(
[
(
self.RunKey(int(k[0]), k[1], int(k[2])),
self.RunValue(float(v[0]), float(v[1]), v[2], v[3]),
)
for k, v in all_data["data"]
]
)
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def _runtime(config, run_history, instance_seed_pairs=None):
"""Return array of all runtimes for the given config for further calculations.
Parameters
----------
config : Configuration
configuration to calculate objective for
run_history : RunHistory
RunHistory object from which the objective value is computed.
instance_seed_pairs : list, optional (default=None)
list of tuples of instance-seeds pairs. If None, the run_history is
queried for all runs of the given configuration.
Returns
----------
list
"""
try:
id_ = run_history.config_ids[config]
except KeyError: # challenger was not running so far
return []
if instance_seed_pairs is None:
instance_seed_pairs = run_history.get_runs_for_config(config)
runtimes = []
for i, r in instance_seed_pairs:
k = run_history.RunKey(id_, i, r)
runtimes.append(run_history.data[k].time)
return runtimes
|
def _runtime(config, run_history, instance_seed_pairs=None):
"""Return array of all runtimes for the given config for further calculations.
Parameters
----------
config : Configuration
configuration to calculate objective for
run_history : RunHistory
RunHistory object from which the objective value is computed.
instance_seed_pairs : list, optional (default=None)
list of tuples of instance-seeds pairs. If None, the run_history is
queried for all runs of the given configuration.
Returns
----------
list
"""
try:
id_ = run_history.config_ids[config.__repr__()]
except KeyError: # challenger was not running so far
return []
if instance_seed_pairs is None:
instance_seed_pairs = run_history.get_runs_for_config(config)
runtimes = []
for i, r in instance_seed_pairs:
k = run_history.RunKey(id_, i, r)
runtimes.append(run_history.data[k].time)
return runtimes
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def _cost(config, run_history, instance_seed_pairs=None):
"""Return array of all costs for the given config for further calculations.
Parameters
----------
config : Configuration
configuration to calculate objective for
run_history : RunHistory
RunHistory object from which the objective value is computed.
instance_seed_pairs : list, optional (default=None)
list of tuples of instance-seeds pairs. If None, the run_history is
queried for all runs of the given configuration.
Returns
----------
list
"""
try:
id_ = run_history.config_ids[config]
except KeyError: # challenger was not running so far
return []
if instance_seed_pairs is None:
instance_seed_pairs = run_history.get_runs_for_config(config)
costs = []
for i, r in instance_seed_pairs:
k = run_history.RunKey(id_, i, r)
costs.append(run_history.data[k].cost)
return costs
|
def _cost(config, run_history, instance_seed_pairs=None):
"""Return array of all costs for the given config for further calculations.
Parameters
----------
config : Configuration
configuration to calculate objective for
run_history : RunHistory
RunHistory object from which the objective value is computed.
instance_seed_pairs : list, optional (default=None)
list of tuples of instance-seeds pairs. If None, the run_history is
queried for all runs of the given configuration.
Returns
----------
list
"""
try:
id_ = run_history.config_ids[config.__repr__()]
except KeyError: # challenger was not running so far
return []
if instance_seed_pairs is None:
instance_seed_pairs = run_history.get_runs_for_config(config)
costs = []
for i, r in instance_seed_pairs:
k = run_history.RunKey(id_, i, r)
costs.append(run_history.data[k].cost)
return costs
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def _add_in_old_format(self, train_perf, incumbent_id, incumbent):
"""
adds entries to old SMAC2-like trajectory file
Parameters
----------
train_perf: float
estimated performance on training (sub)set
incumbent_id: int
id of incumbent
incumbent: Configuration()
current incumbent configuration
"""
conf = []
for p in incumbent:
if not incumbent[p] is None:
conf.append("%s='%s'" % (p, repr(incumbent[p])))
ta_time_used = self.stats.ta_time_used
wallclock_time = self.stats.get_used_wallclock_time()
with open(self.old_traj_fn, "a") as fp:
fp.write(
"%f, %f, %f, %d, %f, %s\n"
% (
ta_time_used,
train_perf,
wallclock_time,
incumbent_id,
wallclock_time - ta_time_used,
", ".join(conf),
)
)
|
def _add_in_old_format(self, train_perf, incumbent_id, incumbent):
"""
adds entries to old SMAC2-like trajectory file
Parameters
----------
train_perf: float
estimated performance on training (sub)set
incumbent_id: int
id of incumbent
incumbent: Configuration()
current incumbent configuration
"""
conf = []
for p in incumbent:
if not incumbent[p] is None:
conf.append("%s='%s'" % (p, incumbent[p]))
ta_time_used = self.stats.ta_time_used
wallclock_time = self.stats.get_used_wallclock_time()
with open(self.old_traj_fn, "a") as fp:
fp.write(
"%f, %f, %f, %d, %f, %s\n"
% (
ta_time_used,
train_perf,
wallclock_time,
incumbent_id,
wallclock_time - ta_time_used,
", ".join(conf),
)
)
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def _add_in_aclib_format(self, train_perf, incumbent_id, incumbent):
"""
adds entries to AClib2-like trajectory file
Parameters
----------
train_perf: float
estimated performance on training (sub)set
incumbent_id: int
id of incumbent
incumbent: Configuration()
current incumbent configuration
"""
conf = []
for p in incumbent:
if not incumbent[p] is None:
conf.append("%s='%s'" % (p, repr(incumbent[p])))
ta_time_used = self.stats.ta_time_used
wallclock_time = self.stats.get_used_wallclock_time()
traj_entry = {
"cpu_time": ta_time_used,
"total_cpu_time": None, # TODO: fix this
"wallclock_time": wallclock_time,
"evaluations": self.stats.ta_runs,
"cost": train_perf,
"incumbent": conf,
}
with open(self.aclib_traj_fn, "a") as fp:
json.dump(traj_entry, fp)
fp.write("\n")
|
def _add_in_aclib_format(self, train_perf, incumbent_id, incumbent):
"""
adds entries to AClib2-like trajectory file
Parameters
----------
train_perf: float
estimated performance on training (sub)set
incumbent_id: int
id of incumbent
incumbent: Configuration()
current incumbent configuration
"""
conf = []
for p in incumbent:
if not incumbent[p] is None:
conf.append("%s='%s'" % (p, incumbent[p]))
ta_time_used = self.stats.ta_time_used
wallclock_time = self.stats.get_used_wallclock_time()
traj_entry = {
"cpu_time": ta_time_used,
"total_cpu_time": None, # TODO: fix this
"wallclock_time": wallclock_time,
"evaluations": self.stats.ta_runs,
"cost": train_perf,
"incumbent": conf,
}
with open(self.aclib_traj_fn, "a") as fp:
json.dump(traj_entry, fp)
fp.write("\n")
|
https://github.com/automl/SMAC3/issues/17
|
KeyError Traceback (most recent call last)
<ipython-input-10-a9f6024df581> in <module>()
38 smbo = SMBO(scenario=scenario, rng=1, tae_runner=taf)
39 Stats.scenario = scenario
---> 40 smbo.run(max_iters=100)
41
42 print("Final Incumbent: %s" % (smbo.incumbent))
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/smbo.pyc in run(self, max_iters)
291 run_history=self.runhistory,
292 objective=self.objective,
--> 293 time_bound=max(0.01, time_spend))
294
295 # TODO: Write run history into database
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/intensification.pyc in intensify(self, challengers, incumbent, run_history, objective, time_bound)
226 chall_inst_seeds = set(map(lambda x: (
227 x.instance, x.seed), run_history.get_runs_for_config(challenger)))
--> 228 chal_perf = objective(challenger, run_history, chall_inst_seeds)
229 run_history.update_cost(challenger, chal_perf)
230
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in average_cost(config, run_history, instance_seed_pairs)
116 float
117 """
--> 118 return np.mean(_cost(config, run_history, instance_seed_pairs))
119
120 def sum_cost(config, run_history, instance_seed_pairs=None):
/home/feurerm/virtualenvs/ml2016/local/lib/python2.7/site-packages/smac/smbo/objective.pyc in _cost(config, run_history, instance_seed_pairs)
93 for i, r in instance_seed_pairs:
94 k = run_history.RunKey(id_, i, r)
---> 95 costs.append(run_history.data[k].cost)
96 return costs
97
KeyError: RunKey(config_id=36, instance_id='2', seed=931243971)
|
KeyError
|
def _update_package_cache(self):
if (
not self.package_caching
or not config.cache_packages_path
or not config.write_package_cache
or not self.success
):
return
# see PackageCache.add_variants_async
if not system.is_production_rez_install:
return
pkgcache = PackageCache(config.cache_packages_path)
pkgcache.add_variants_async(self.resolved_packages)
|
def _update_package_cache(self):
if (
not self.package_caching
or not config.cache_packages_path
or not config.write_package_cache
):
return
# see PackageCache.add_variants_async
if not system.is_production_rez_install:
return
pkgcache = PackageCache(config.cache_packages_path)
pkgcache.add_variants_async(self.resolved_packages)
|
https://github.com/nerdvegas/rez/issues/905
|
Traceback (most recent call last):
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/bin/rez/rez", line 8, in <module>
sys.exit(run_rez())
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/cli/_entry_points.py", line 64, in run_rez
return run()
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/cli/_main.py", line 160, in run
returncode = run_cmd()
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/cli/_main.py", line 152, in run_cmd
return func(opts, opts.parser, extra_arg_groups)
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/cli/env.py", line 211, in command
package_caching=(not opts.no_pkg_cache)
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/resolved_context.py", line 316, in __init__
self._update_package_cache()
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/resolved_context.py", line 1572, in _update_package_cache
pkgcache.add_variants_async(self.resolved_packages)
File "/library/rez/user_packages/rez-pre-release/rez/2.61.0.pr/os-CentOS-7/python-2.7/lib/python2.7/site-packages/rez/package_cache.py", line 379, in add_variants_async
for variant in variants:
TypeError: 'NoneType' object is not iterable
|
TypeError
|
def get_function(self):
def equal_scalar(vals):
return pd.Series(vals) == self.value
return equal_scalar
|
def get_function(self):
def equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
return equal_scalar
|
https://github.com/alteryx/featuretools/issues/496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-8bb8b3216cc3> in <module>
6 target_entity=MONTH_ENTITY_NAME,
7 features_only=not RUN_DFS,
----> 8 **dfs_definition
9 )
10
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\synthesis\dfs.py in dfs(entities, relationships, entityset, target_entity, cutoff_time, instance_ids, agg_primitives, trans_primitives, groupby_trans_primitives, allowed_paths, max_depth, ignore_entities, ignore_variables, seed_features, drop_contains, drop_exact, where_primitives, max_features, cutoff_time_in_index, save_progress, features_only, training_window, approximate, chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types)
233 n_jobs=n_jobs,
234 dask_kwargs=dask_kwargs,
--> 235 verbose=verbose)
236 return feature_matrix, features
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_feature_matrix(features, entityset, cutoff_time, instance_ids, entities, relationships, cutoff_time_in_index, training_window, approximate, save_progress, verbose, chunk_size, n_jobs, dask_kwargs, profile)
252 target_time=target_time,
253 pass_columns=pass_columns,
--> 254 dask_kwargs=dask_kwargs or {})
255 else:
256 feature_matrix = linear_calculate_chunks(chunks=chunks,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in parallel_calculate_chunks(chunks, features, approximate, training_window, verbose, save_progress, entityset, n_jobs, no_unapproximated_aggs, cutoff_df_time_var, target_time, pass_columns, dask_kwargs)
600 pbar = make_tqdm_iterator(total=len(_chunks), bar_format=pbar_str)
601 for batch in iterator:
--> 602 results = client.gather(batch)
603 for result in results:
604 feature_matrix.append(result)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in gather(self, futures, errors, maxsize, direct, asynchronous)
1654 return self.sync(self._gather, futures, errors=errors,
1655 direct=direct, local_worker=local_worker,
-> 1656 asynchronous=asynchronous)
1657
1658 @gen.coroutine
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in sync(self, func, *args, **kwargs)
674 return future
675 else:
--> 676 return sync(self.loop, func, *args, **kwargs)
677
678 def __repr__(self):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in sync(loop, func, *args, **kwargs)
275 e.wait(10)
276 if error[0]:
--> 277 six.reraise(*error[0])
278 else:
279 return result[0]
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in f()
260 if timeout is not None:
261 future = gen.with_timeout(timedelta(seconds=timeout), future)
--> 262 result[0] = yield future
263 except Exception as exc:
264 error[0] = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
727
728 try:
--> 729 value = future.result()
730 except Exception:
731 exc_info = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
734 if exc_info is not None:
735 try:
--> 736 yielded = self.gen.throw(*exc_info) # type: ignore
737 finally:
738 # Break up a reference to itself
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in _gather(self, futures, errors, direct, local_worker)
1495 six.reraise(type(exception),
1496 exception,
-> 1497 traceback)
1498 if errors == 'skip':
1499 bad_keys.add(key)
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
690 value = tp()
691 if value.__traceback__ is not tb:
--> 692 raise value.with_traceback(tb)
693 raise value
694 finally:
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\dask\compatibility.py in apply()
91 def apply(func, args, kwargs=None):
92 if kwargs:
---> 93 return func(*args, **kwargs)
94 else:
95 return func(*args)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_chunk()
349 ids,
350 precalculated_features=precalculated_features,
--> 351 training_window=window)
352
353 id_name = _feature_matrix.index.name
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\utils.py in wrapped()
34 def wrapped(*args, **kwargs):
35 if save_progress is None:
---> 36 r = method(*args, **kwargs)
37 else:
38 time = args[0].to_pydatetime()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calc_results()
323 precalculated_features=precalculated_features,
324 ignored=all_approx_feature_set,
--> 325 profile=profile)
326 return matrix
327
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in calculate_all_features()
195
196 handler = self._feature_type_handler(test_feature)
--> 197 result_frame = handler(group, input_frames)
198
199 output_frames_type = self.feature_tree.output_frames_type(test_feature)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in _calculate_transform_features()
321 values = feature_func(*variable_data, time=self.time_last)
322 else:
--> 323 values = feature_func(*variable_data)
324
325 # if we don't get just the values, the assignment breaks when indexes don't match
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\primitives\standard\binary_transform.py in equal_scalar()
159 def equal_scalar(vals):
160 # case to correct pandas type for comparison
--> 161 return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
162 return equal_scalar
163
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\generic.py in astype()
5689 # else, only a single dtype is given
5690 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5691 **kwargs)
5692 return self._constructor(new_data).__finalize__(self)
5693
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in astype()
529
530 def astype(self, dtype, **kwargs):
--> 531 return self.apply('astype', dtype=dtype, **kwargs)
532
533 def convert(self, **kwargs):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in apply()
393 copy=align_copy)
394
--> 395 applied = getattr(b, f)(**kwargs)
396 result_blocks = _extend_blocks(applied, result_blocks)
397
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in astype()
532 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
533 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 534 **kwargs)
535
536 def _astype(self, dtype, copy=False, errors='raise', values=None,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in _astype()
631
632 # _astype_nansafe works fine with 1-d only
--> 633 values = astype_nansafe(values.ravel(), dtype, copy=True)
634
635 # TODO(extension)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\dtypes\cast.py in astype_nansafe()
700 if copy or is_object_dtype(arr) or is_object_dtype(dtype):
701 # Explicit copy, or required since NumPy can't view from / to object.
--> 702 return arr.astype(dtype, copy=True)
703
704 return arr.view(dtype)
ValueError: could not convert string to float: 'Sázava'
|
ValueError
|
def equal_scalar(vals):
return pd.Series(vals) == self.value
|
def equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
|
https://github.com/alteryx/featuretools/issues/496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-8bb8b3216cc3> in <module>
6 target_entity=MONTH_ENTITY_NAME,
7 features_only=not RUN_DFS,
----> 8 **dfs_definition
9 )
10
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\synthesis\dfs.py in dfs(entities, relationships, entityset, target_entity, cutoff_time, instance_ids, agg_primitives, trans_primitives, groupby_trans_primitives, allowed_paths, max_depth, ignore_entities, ignore_variables, seed_features, drop_contains, drop_exact, where_primitives, max_features, cutoff_time_in_index, save_progress, features_only, training_window, approximate, chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types)
233 n_jobs=n_jobs,
234 dask_kwargs=dask_kwargs,
--> 235 verbose=verbose)
236 return feature_matrix, features
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_feature_matrix(features, entityset, cutoff_time, instance_ids, entities, relationships, cutoff_time_in_index, training_window, approximate, save_progress, verbose, chunk_size, n_jobs, dask_kwargs, profile)
252 target_time=target_time,
253 pass_columns=pass_columns,
--> 254 dask_kwargs=dask_kwargs or {})
255 else:
256 feature_matrix = linear_calculate_chunks(chunks=chunks,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in parallel_calculate_chunks(chunks, features, approximate, training_window, verbose, save_progress, entityset, n_jobs, no_unapproximated_aggs, cutoff_df_time_var, target_time, pass_columns, dask_kwargs)
600 pbar = make_tqdm_iterator(total=len(_chunks), bar_format=pbar_str)
601 for batch in iterator:
--> 602 results = client.gather(batch)
603 for result in results:
604 feature_matrix.append(result)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in gather(self, futures, errors, maxsize, direct, asynchronous)
1654 return self.sync(self._gather, futures, errors=errors,
1655 direct=direct, local_worker=local_worker,
-> 1656 asynchronous=asynchronous)
1657
1658 @gen.coroutine
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in sync(self, func, *args, **kwargs)
674 return future
675 else:
--> 676 return sync(self.loop, func, *args, **kwargs)
677
678 def __repr__(self):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in sync(loop, func, *args, **kwargs)
275 e.wait(10)
276 if error[0]:
--> 277 six.reraise(*error[0])
278 else:
279 return result[0]
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in f()
260 if timeout is not None:
261 future = gen.with_timeout(timedelta(seconds=timeout), future)
--> 262 result[0] = yield future
263 except Exception as exc:
264 error[0] = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
727
728 try:
--> 729 value = future.result()
730 except Exception:
731 exc_info = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
734 if exc_info is not None:
735 try:
--> 736 yielded = self.gen.throw(*exc_info) # type: ignore
737 finally:
738 # Break up a reference to itself
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in _gather(self, futures, errors, direct, local_worker)
1495 six.reraise(type(exception),
1496 exception,
-> 1497 traceback)
1498 if errors == 'skip':
1499 bad_keys.add(key)
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
690 value = tp()
691 if value.__traceback__ is not tb:
--> 692 raise value.with_traceback(tb)
693 raise value
694 finally:
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\dask\compatibility.py in apply()
91 def apply(func, args, kwargs=None):
92 if kwargs:
---> 93 return func(*args, **kwargs)
94 else:
95 return func(*args)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_chunk()
349 ids,
350 precalculated_features=precalculated_features,
--> 351 training_window=window)
352
353 id_name = _feature_matrix.index.name
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\utils.py in wrapped()
34 def wrapped(*args, **kwargs):
35 if save_progress is None:
---> 36 r = method(*args, **kwargs)
37 else:
38 time = args[0].to_pydatetime()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calc_results()
323 precalculated_features=precalculated_features,
324 ignored=all_approx_feature_set,
--> 325 profile=profile)
326 return matrix
327
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in calculate_all_features()
195
196 handler = self._feature_type_handler(test_feature)
--> 197 result_frame = handler(group, input_frames)
198
199 output_frames_type = self.feature_tree.output_frames_type(test_feature)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in _calculate_transform_features()
321 values = feature_func(*variable_data, time=self.time_last)
322 else:
--> 323 values = feature_func(*variable_data)
324
325 # if we don't get just the values, the assignment breaks when indexes don't match
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\primitives\standard\binary_transform.py in equal_scalar()
159 def equal_scalar(vals):
160 # case to correct pandas type for comparison
--> 161 return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
162 return equal_scalar
163
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\generic.py in astype()
5689 # else, only a single dtype is given
5690 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5691 **kwargs)
5692 return self._constructor(new_data).__finalize__(self)
5693
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in astype()
529
530 def astype(self, dtype, **kwargs):
--> 531 return self.apply('astype', dtype=dtype, **kwargs)
532
533 def convert(self, **kwargs):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in apply()
393 copy=align_copy)
394
--> 395 applied = getattr(b, f)(**kwargs)
396 result_blocks = _extend_blocks(applied, result_blocks)
397
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in astype()
532 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
533 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 534 **kwargs)
535
536 def _astype(self, dtype, copy=False, errors='raise', values=None,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in _astype()
631
632 # _astype_nansafe works fine with 1-d only
--> 633 values = astype_nansafe(values.ravel(), dtype, copy=True)
634
635 # TODO(extension)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\dtypes\cast.py in astype_nansafe()
700 if copy or is_object_dtype(arr) or is_object_dtype(dtype):
701 # Explicit copy, or required since NumPy can't view from / to object.
--> 702 return arr.astype(dtype, copy=True)
703
704 return arr.view(dtype)
ValueError: could not convert string to float: 'Sázava'
|
ValueError
|
def get_function(self):
def not_equal_scalar(vals):
return pd.Series(vals) != self.value
return not_equal_scalar
|
def get_function(self):
def not_equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) != self.value
return not_equal_scalar
|
https://github.com/alteryx/featuretools/issues/496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-8bb8b3216cc3> in <module>
6 target_entity=MONTH_ENTITY_NAME,
7 features_only=not RUN_DFS,
----> 8 **dfs_definition
9 )
10
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\synthesis\dfs.py in dfs(entities, relationships, entityset, target_entity, cutoff_time, instance_ids, agg_primitives, trans_primitives, groupby_trans_primitives, allowed_paths, max_depth, ignore_entities, ignore_variables, seed_features, drop_contains, drop_exact, where_primitives, max_features, cutoff_time_in_index, save_progress, features_only, training_window, approximate, chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types)
233 n_jobs=n_jobs,
234 dask_kwargs=dask_kwargs,
--> 235 verbose=verbose)
236 return feature_matrix, features
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_feature_matrix(features, entityset, cutoff_time, instance_ids, entities, relationships, cutoff_time_in_index, training_window, approximate, save_progress, verbose, chunk_size, n_jobs, dask_kwargs, profile)
252 target_time=target_time,
253 pass_columns=pass_columns,
--> 254 dask_kwargs=dask_kwargs or {})
255 else:
256 feature_matrix = linear_calculate_chunks(chunks=chunks,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in parallel_calculate_chunks(chunks, features, approximate, training_window, verbose, save_progress, entityset, n_jobs, no_unapproximated_aggs, cutoff_df_time_var, target_time, pass_columns, dask_kwargs)
600 pbar = make_tqdm_iterator(total=len(_chunks), bar_format=pbar_str)
601 for batch in iterator:
--> 602 results = client.gather(batch)
603 for result in results:
604 feature_matrix.append(result)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in gather(self, futures, errors, maxsize, direct, asynchronous)
1654 return self.sync(self._gather, futures, errors=errors,
1655 direct=direct, local_worker=local_worker,
-> 1656 asynchronous=asynchronous)
1657
1658 @gen.coroutine
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in sync(self, func, *args, **kwargs)
674 return future
675 else:
--> 676 return sync(self.loop, func, *args, **kwargs)
677
678 def __repr__(self):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in sync(loop, func, *args, **kwargs)
275 e.wait(10)
276 if error[0]:
--> 277 six.reraise(*error[0])
278 else:
279 return result[0]
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in f()
260 if timeout is not None:
261 future = gen.with_timeout(timedelta(seconds=timeout), future)
--> 262 result[0] = yield future
263 except Exception as exc:
264 error[0] = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
727
728 try:
--> 729 value = future.result()
730 except Exception:
731 exc_info = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
734 if exc_info is not None:
735 try:
--> 736 yielded = self.gen.throw(*exc_info) # type: ignore
737 finally:
738 # Break up a reference to itself
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in _gather(self, futures, errors, direct, local_worker)
1495 six.reraise(type(exception),
1496 exception,
-> 1497 traceback)
1498 if errors == 'skip':
1499 bad_keys.add(key)
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
690 value = tp()
691 if value.__traceback__ is not tb:
--> 692 raise value.with_traceback(tb)
693 raise value
694 finally:
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\dask\compatibility.py in apply()
91 def apply(func, args, kwargs=None):
92 if kwargs:
---> 93 return func(*args, **kwargs)
94 else:
95 return func(*args)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_chunk()
349 ids,
350 precalculated_features=precalculated_features,
--> 351 training_window=window)
352
353 id_name = _feature_matrix.index.name
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\utils.py in wrapped()
34 def wrapped(*args, **kwargs):
35 if save_progress is None:
---> 36 r = method(*args, **kwargs)
37 else:
38 time = args[0].to_pydatetime()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calc_results()
323 precalculated_features=precalculated_features,
324 ignored=all_approx_feature_set,
--> 325 profile=profile)
326 return matrix
327
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in calculate_all_features()
195
196 handler = self._feature_type_handler(test_feature)
--> 197 result_frame = handler(group, input_frames)
198
199 output_frames_type = self.feature_tree.output_frames_type(test_feature)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in _calculate_transform_features()
321 values = feature_func(*variable_data, time=self.time_last)
322 else:
--> 323 values = feature_func(*variable_data)
324
325 # if we don't get just the values, the assignment breaks when indexes don't match
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\primitives\standard\binary_transform.py in equal_scalar()
159 def equal_scalar(vals):
160 # case to correct pandas type for comparison
--> 161 return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
162 return equal_scalar
163
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\generic.py in astype()
5689 # else, only a single dtype is given
5690 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5691 **kwargs)
5692 return self._constructor(new_data).__finalize__(self)
5693
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in astype()
529
530 def astype(self, dtype, **kwargs):
--> 531 return self.apply('astype', dtype=dtype, **kwargs)
532
533 def convert(self, **kwargs):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in apply()
393 copy=align_copy)
394
--> 395 applied = getattr(b, f)(**kwargs)
396 result_blocks = _extend_blocks(applied, result_blocks)
397
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in astype()
532 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
533 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 534 **kwargs)
535
536 def _astype(self, dtype, copy=False, errors='raise', values=None,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in _astype()
631
632 # _astype_nansafe works fine with 1-d only
--> 633 values = astype_nansafe(values.ravel(), dtype, copy=True)
634
635 # TODO(extension)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\dtypes\cast.py in astype_nansafe()
700 if copy or is_object_dtype(arr) or is_object_dtype(dtype):
701 # Explicit copy, or required since NumPy can't view from / to object.
--> 702 return arr.astype(dtype, copy=True)
703
704 return arr.view(dtype)
ValueError: could not convert string to float: 'Sázava'
|
ValueError
|
def not_equal_scalar(vals):
return pd.Series(vals) != self.value
|
def not_equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) != self.value
|
https://github.com/alteryx/featuretools/issues/496
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-6-8bb8b3216cc3> in <module>
6 target_entity=MONTH_ENTITY_NAME,
7 features_only=not RUN_DFS,
----> 8 **dfs_definition
9 )
10
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\synthesis\dfs.py in dfs(entities, relationships, entityset, target_entity, cutoff_time, instance_ids, agg_primitives, trans_primitives, groupby_trans_primitives, allowed_paths, max_depth, ignore_entities, ignore_variables, seed_features, drop_contains, drop_exact, where_primitives, max_features, cutoff_time_in_index, save_progress, features_only, training_window, approximate, chunk_size, n_jobs, dask_kwargs, verbose, return_variable_types)
233 n_jobs=n_jobs,
234 dask_kwargs=dask_kwargs,
--> 235 verbose=verbose)
236 return feature_matrix, features
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_feature_matrix(features, entityset, cutoff_time, instance_ids, entities, relationships, cutoff_time_in_index, training_window, approximate, save_progress, verbose, chunk_size, n_jobs, dask_kwargs, profile)
252 target_time=target_time,
253 pass_columns=pass_columns,
--> 254 dask_kwargs=dask_kwargs or {})
255 else:
256 feature_matrix = linear_calculate_chunks(chunks=chunks,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in parallel_calculate_chunks(chunks, features, approximate, training_window, verbose, save_progress, entityset, n_jobs, no_unapproximated_aggs, cutoff_df_time_var, target_time, pass_columns, dask_kwargs)
600 pbar = make_tqdm_iterator(total=len(_chunks), bar_format=pbar_str)
601 for batch in iterator:
--> 602 results = client.gather(batch)
603 for result in results:
604 feature_matrix.append(result)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in gather(self, futures, errors, maxsize, direct, asynchronous)
1654 return self.sync(self._gather, futures, errors=errors,
1655 direct=direct, local_worker=local_worker,
-> 1656 asynchronous=asynchronous)
1657
1658 @gen.coroutine
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in sync(self, func, *args, **kwargs)
674 return future
675 else:
--> 676 return sync(self.loop, func, *args, **kwargs)
677
678 def __repr__(self):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in sync(loop, func, *args, **kwargs)
275 e.wait(10)
276 if error[0]:
--> 277 six.reraise(*error[0])
278 else:
279 return result[0]
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\utils.py in f()
260 if timeout is not None:
261 future = gen.with_timeout(timedelta(seconds=timeout), future)
--> 262 result[0] = yield future
263 except Exception as exc:
264 error[0] = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
727
728 try:
--> 729 value = future.result()
730 except Exception:
731 exc_info = sys.exc_info()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\tornado\gen.py in run(self)
734 if exc_info is not None:
735 try:
--> 736 yielded = self.gen.throw(*exc_info) # type: ignore
737 finally:
738 # Break up a reference to itself
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\distributed\client.py in _gather(self, futures, errors, direct, local_worker)
1495 six.reraise(type(exception),
1496 exception,
-> 1497 traceback)
1498 if errors == 'skip':
1499 bad_keys.add(key)
~\AppData\Roaming\Python\Python37\site-packages\six.py in reraise(tp, value, tb)
690 value = tp()
691 if value.__traceback__ is not tb:
--> 692 raise value.with_traceback(tb)
693 raise value
694 finally:
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\dask\compatibility.py in apply()
91 def apply(func, args, kwargs=None):
92 if kwargs:
---> 93 return func(*args, **kwargs)
94 else:
95 return func(*args)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calculate_chunk()
349 ids,
350 precalculated_features=precalculated_features,
--> 351 training_window=window)
352
353 id_name = _feature_matrix.index.name
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\utils.py in wrapped()
34 def wrapped(*args, **kwargs):
35 if save_progress is None:
---> 36 r = method(*args, **kwargs)
37 else:
38 time = args[0].to_pydatetime()
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\calculate_feature_matrix.py in calc_results()
323 precalculated_features=precalculated_features,
324 ignored=all_approx_feature_set,
--> 325 profile=profile)
326 return matrix
327
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in calculate_all_features()
195
196 handler = self._feature_type_handler(test_feature)
--> 197 result_frame = handler(group, input_frames)
198
199 output_frames_type = self.feature_tree.output_frames_type(test_feature)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\computational_backends\pandas_backend.py in _calculate_transform_features()
321 values = feature_func(*variable_data, time=self.time_last)
322 else:
--> 323 values = feature_func(*variable_data)
324
325 # if we don't get just the values, the assignment breaks when indexes don't match
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\featuretools\primitives\standard\binary_transform.py in equal_scalar()
159 def equal_scalar(vals):
160 # case to correct pandas type for comparison
--> 161 return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
162 return equal_scalar
163
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\generic.py in astype()
5689 # else, only a single dtype is given
5690 new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
-> 5691 **kwargs)
5692 return self._constructor(new_data).__finalize__(self)
5693
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in astype()
529
530 def astype(self, dtype, **kwargs):
--> 531 return self.apply('astype', dtype=dtype, **kwargs)
532
533 def convert(self, **kwargs):
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\managers.py in apply()
393 copy=align_copy)
394
--> 395 applied = getattr(b, f)(**kwargs)
396 result_blocks = _extend_blocks(applied, result_blocks)
397
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in astype()
532 def astype(self, dtype, copy=False, errors='raise', values=None, **kwargs):
533 return self._astype(dtype, copy=copy, errors=errors, values=values,
--> 534 **kwargs)
535
536 def _astype(self, dtype, copy=False, errors='raise', values=None,
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\internals\blocks.py in _astype()
631
632 # _astype_nansafe works fine with 1-d only
--> 633 values = astype_nansafe(values.ravel(), dtype, copy=True)
634
635 # TODO(extension)
c:\users\jan.hynek\appdata\local\programs\python\python37-32\lib\site-packages\pandas\core\dtypes\cast.py in astype_nansafe()
700 if copy or is_object_dtype(arr) or is_object_dtype(dtype):
701 # Explicit copy, or required since NumPy can't view from / to object.
--> 702 return arr.astype(dtype, copy=True)
703
704 return arr.view(dtype)
ValueError: could not convert string to float: 'Sázava'
|
ValueError
|
def load_resource(self, item):
resource = super(ConfigTable, self).load_resource(item)
sse_info = resource.pop("Ssedescription", None)
if sse_info is None:
return resource
resource["SSEDescription"] = sse_info
for k, r in (("KmsmasterKeyArn", "KMSMasterKeyArn"), ("Ssetype", "SSEType")):
if k in sse_info:
sse_info[r] = sse_info.pop(k)
return resource
|
def load_resource(self, item):
resource = super(ConfigTable, self).load_resource(item)
resource["CreationDateTime"] = datetime.fromtimestamp(
resource["CreationDateTime"] / 1000.0
)
if (
"BillingModeSummary" in resource
and "LastUpdateToPayPerRequestDateTime" in resource["BillingModeSummary"]
):
resource["BillingModeSummary"]["LastUpdateToPayPerRequestDateTime"] = (
datetime.fromtimestamp(
resource["BillingModeSummary"]["LastUpdateToPayPerRequestDateTime"]
/ 1000.0
)
)
sse_info = resource.pop("Ssedescription", None)
if sse_info is None:
return resource
resource["SSEDescription"] = sse_info
for k, r in (("KmsmasterKeyArn", "KMSMasterKeyArn"), ("Ssetype", "SSEType")):
if k in sse_info:
sse_info[r] = sse_info.pop(k)
return resource
|
https://github.com/cloud-custodian/cloud-custodian/issues/6470
|
[ERROR] TypeError: unsupported operand type(s) for /: 'datetime.datetime' and 'float'
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 165, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1140, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 853, in run
resources = super(ConfigRuleMode, self).run(event, lambda_context)
File "/var/task/c7n/policy.py", line 437, in run
resources = self.resolve_resources(event)
File "/var/task/c7n/policy.py", line 835, in resolve_resources
return [source.load_resource(self.cfg_event['configurationItem'])]
File "/var/task/c7n/resources/dynamodb.py", line 23, in load_resource
resource['CreationDateTime'] = datetime.fromtimestamp(resource['CreationDateTime'] / 1000.0)
|
TypeError
|
def run(self, event, lambda_context):
cfg_event = json.loads(event["invokingEvent"])
resource_type = self.policy.resource_manager.resource_type.cfn_type
resource_id = self.policy.resource_manager.resource_type.id
client = self._get_client()
token = event.get("resultToken")
matched_resources = set()
for r in PullMode.run(self):
matched_resources.add(r[resource_id])
unmatched_resources = set()
for r in self.policy.resource_manager.get_resource_manager(
self.policy.resource_type
).resources():
if r[resource_id] not in matched_resources:
unmatched_resources.add(r[resource_id])
evaluations = [
dict(
ComplianceResourceType=resource_type,
ComplianceResourceId=r,
ComplianceType="NON_COMPLIANT",
OrderingTimestamp=cfg_event["notificationCreationTime"],
Annotation="The resource is not compliant with policy:%s."
% (self.policy.name),
)
for r in matched_resources
]
if evaluations and token:
self.put_evaluations(client, token, evaluations)
evaluations = [
dict(
ComplianceResourceType=resource_type,
ComplianceResourceId=r,
ComplianceType="COMPLIANT",
OrderingTimestamp=cfg_event["notificationCreationTime"],
Annotation="The resource is compliant with policy:%s." % (self.policy.name),
)
for r in unmatched_resources
]
if evaluations and token:
self.put_evaluations(client, token, evaluations)
return list(matched_resources)
|
def run(self, event, lambda_context):
cfg_event = json.loads(event["invokingEvent"])
resource_type = self.policy.resource_manager.resource_type.cfn_type
resource_id = self.policy.resource_manager.resource_type.id
client = self._get_client()
matched_resources = set()
for r in PullMode.run(self):
matched_resources.add(r[resource_id])
unmatched_resources = set()
for r in self.policy.resource_manager.get_resource_manager(
self.policy.resource_type
).resources():
if r[resource_id] not in matched_resources:
unmatched_resources.add(r[resource_id])
evaluations = [
dict(
ComplianceResourceType=resource_type,
ComplianceResourceId=r,
ComplianceType="NON_COMPLIANT",
OrderingTimestamp=cfg_event["notificationCreationTime"],
Annotation="The resource is not compliant with policy:%s."
% (self.policy.name),
)
for r in matched_resources
]
if evaluations:
self.policy.resource_manager.retry(
client.put_evaluations,
Evaluations=evaluations,
ResultToken=event.get("resultToken", "No token found."),
)
evaluations = [
dict(
ComplianceResourceType=resource_type,
ComplianceResourceId=r,
ComplianceType="COMPLIANT",
OrderingTimestamp=cfg_event["notificationCreationTime"],
Annotation="The resource is compliant with policy:%s." % (self.policy.name),
)
for r in unmatched_resources
]
if evaluations:
self.policy.resource_manager.retry(
client.put_evaluations,
Evaluations=evaluations,
ResultToken=event.get("resultToken", "No token found."),
)
return list(matched_resources)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6457
|
[ERROR] 2021-02-16T16:23:00.481Z befa31da-95e0-41c6-b47c-c7eb9c0338d7 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 165, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1140, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 790, in run
self.policy.resource_manager.retry(
File "/var/task/c7n/utils.py", line 444, in _retry
return func(*args, **kw)
File "/var/runtime/botocore/client.py", line 357, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/runtime/botocore/client.py", line 676, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.ValidationException: An error occurred (ValidationException) when calling the PutEvaluations operation: 1 validation error detected: Value '[com.amazonaws.starling.dove.Evaluation@27de0032, com.amazonaws.starling.dove.Evaluation@35e1583c, com.amazonaws.starling.dove.Evaluation@57c577dd, com.amazonaws.starling.dove.Evaluation@83d31304, com.amazonaws.starling.dove.Evaluation@88179743, com.amazonaws.starling.dove.Evaluation@43843883, com.amazonaws.starling.dove.Evaluation@5275a3f1, com.amazonaws.starling.dove.Evaluation@eb15ef74, com.amazonaws.starling.dove.Evaluation@85ae8df3, com.amazonaws.starling.dove.Evaluation@986519ca, com.amazonaws.starling.dove.Evaluation@a66adf4, com.amazonaws.starling.dove.Evaluation@14c8caae, com.amazonaws.starling.dove.Evaluation@732687a2, com.amazonaws.starling.dove.Evaluation@ab113137, com.amazonaws.starling.dove.Evaluation@f732086e, com.amazonaws.starling.dove.Evaluation@ee1be098, com.amazonaws.starling.dove.Evaluation@49e0cace, com.amazonaws.starling.dove.Evaluation@de6946db, com.amazonaws.starling.dove.Evaluation@5b4bc798, com.amazonaws.starling.dove.Evaluation@795bd423, com.amazonaws.starling.dove.Evaluation@ec34a188, com.amazonaws.starling.dove.Evaluation@a919f601, com.amazonaws.starling.dove.Evaluation@fa876e93, com.amazonaws.starling.dove.Evaluation@a8bd23c9, com.amazonaws.starling.dove.Evaluation@b4e94a84, com.amazonaws.starling.dove.Evaluation@cb541157, com.amazonaws.starling.dove.Evaluation@d5fe6f5a, com.amazonaws.starling.dove.Evaluation@5845d674, com.amazonaws.starling.dove.Evaluation@7a304149, com.amazonaws.starling.dove.Evaluation@c9ced591, com.amazonaws.starling.dove.Evaluation@17cebbd2, com.amazonaws.starling.dove.Evaluation@830c779b, com.amazonaws.starling.dove.Evaluation@197b7b20, com.amazonaws.starling.dove.Evaluation@e55e1cb8, com.amazonaws.starling.dove.Evaluation@e728aeb, com.amazonaws.starling.dove.Evaluation@77cc8b4e, com.amazonaws.starling.dove.Evaluation@25a3f0b0, com.amazonaws.starling.dove.Evaluation@fc7ea9c9, com.amazonaws.starling.dove.Evaluation@12ed64d9, com.amazonaws.starling.dove.Evaluation@49d3cedc, com.amazonaws.starling.dove.Evaluation@36cb954a, com.amazonaws.starling.dove.Evaluation@e913a4de, com.amazonaws.starling.dove.Evaluation@7d2fe739, com.amazonaws.starling.dove.Evaluation@39b2db73, com.amazonaws.starling.dove.Evaluation@54de31b4, com.amazonaws.starling.dove.Evaluation@ecf16a63, com.amazonaws.starling.dove.Evaluation@196bf89a, com.amazonaws.starling.dove.Evaluation@c6614340, com.amazonaws.starling.dove.Evaluation@4508b7b0, com.amazonaws.starling.dove.Evaluation@d2cbce8f, com.amazonaws.starling.dove.Evaluation@4829b45a, com.amazonaws.starling.dove.Evaluation@32db6718, com.amazonaws.starling.dove.Evaluation@10000039, com.amazonaws.starling.dove.Evaluation@e78f25b1, com.amazonaws.starling.dove.Evaluation@d840dd29, com.amazonaws.starling.dove.Evaluation@8490f6c8, com.amazonaws.starling.dove.Evaluation@b6f85207, com.amazonaws.starling.dove.Evaluation@bb73afa0, com.amazonaws.starling.dove.Evaluation@81d07ab3, com.amazonaws.starling.dove.Evaluation@af6c90ca, com.amazonaws.starling.dove.Evaluation@3a5e59ad, com.amazonaws.starling.dove.Evaluation@75610b6c, com.amazonaws.starling.dove.Evaluation@563ac45a, com.amazonaws.starling.dove.Evaluation@f24b0c9c, com.amazonaws.starling.dove.Evaluation@96899edb, com.amazonaws.starling.dove.Evaluation@2c852755, com.amazonaws.starling.dove.Evaluation@8d39927e, com.amazonaws.starling.dove.Evaluation@17f6fbf3, com.amazonaws.starling.dove.Evaluation@2ae3f156, com.amazonaws.starling.dove.Evaluation@31563b2a, com.amazonaws.starling.dove.Evaluation@ed11d1d, com.amazonaws.starling.dove.Evaluation@b8f93562, com.amazonaws.starling.dove.Evaluation@b6ec6db8, com.amazonaws.starling.dove.Evaluation@d18ba10, com.amazonaws.starling.dove.Evaluation@aac714f3, com.amazonaws.starling.dove.Evaluation@c8983162, com.amazonaws.starling.dove.Evaluation@29b97b21, com.amazonaws.starling.dove.Evaluation@675b2efe, com.amazonaws.starling.dove.Evaluation@34b6e207, com.amazonaws.starling.dove.Evaluation@657ef526, com.amazonaws.starling.dove.Evaluation@fd2aedee, com.amazonaws.starling.dove.Evaluation@656c316, com.amazonaws.starling.dove.Evaluation@9c977e7f, com.amazonaws.starling.dove.Evaluation@4cc810f7, com.amazonaws.starling.dove.Evaluation@f4110bc4, com.amazonaws.starling.dove.Evaluation@a3e31c83, com.amazonaws.starling.dove.Evaluation@da1c5818, com.amazonaws.starling.dove.Evaluation@24695f80, com.amazonaws.starling.dove.Evaluation@c35b521c, com.amazonaws.starling.dove.Evaluation@79e698c5, com.amazonaws.starling.dove.Evaluation@b959fff1, com.amazonaws.starling.dove.Evaluation@5a215163, com.amazonaws.starling.dove.Evaluation@d479f02d, com.amazonaws.starling.dove.Evaluation@5ae04031, com.amazonaws.starling.dove.Evaluation@90335f18, com.amazonaws.starling.dove.Evaluation@51ef96b1, com.amazonaws.starling.dove.Evaluation@16a4459d, com.amazonaws.starling.dove.Evaluation@57bf4387, com.amazonaws.starling.dove.Evaluation@c1f5b8ed, com.amazonaws.starling.dove.Evaluation@997681d5, com.amazonaws.starling.dove.Evaluation@8ecc23d2, com.amazonaws.starling.dove.Evaluation@d0d49359, com.amazonaws.starling.dove.Evaluation@a1a1361d, com.amazonaws.starling.dove.Evaluation@228372b5, com.amazonaws.starling.dove.Evaluation@e4bfafb1, com.amazonaws.starling.dove.Evaluation@7a48a110, com.amazonaws.starling.dove.Evaluation@e7602849, com.amazonaws.starling.dove.Evaluation@dca3e7dc, com.amazonaws.starling.dove.Evaluation@d91f6c04, com.amazonaws.starling.dove.Evaluation@89232735, com.amazonaws.starling.dove.Evaluation@616889dd, com.amazonaws.starling.dove.Evaluation@ac4b9420, com.amazonaws.starling.dove.Evaluation@e0b3d2ba, com.amazonaws.starling.dove.Evaluation@97f202a8, com.amazonaws.starling.dove.Evaluation@d1ea9d66, com.amazonaws.starling.dove.Evaluation@e84562e1, com.amazonaws.starling.dove.Evaluation@4d14e584, com.amazonaws.starling.dove.Evaluation@5a15dd5b, com.amazonaws.starling.dove.Evaluation@2187c06f, com.amazonaws.starling.dove.Evaluation@c536cd0b, com.amazonaws.starling.dove.Evaluation@a0dd940a, com.amazonaws.starling.dove.Evaluation@b2d6026f, com.amazonaws.starling.dove.Evaluation@e966adc1, com.amazonaws.starling.dove.Evaluation@1c9c77ca, com.amazonaws.starling.dove.Evaluation@bd051259, com.amazonaws.starling.dove.Evaluation@b99cd7c6, com.amazonaws.starling.dove.Evaluation@ab8355ed, com.amazonaws.starling.dove.Evaluation@7d7cd341, com.amazonaws.starling.dove.Evaluation@87b1f372, com.amazonaws.starling.dove.Evaluation@f28c8c97]' at 'evaluations' failed to satisfy constraint: Member must have length less than or equal to 100
|
botocore.errorfactory.ValidationException
|
def get_mailer_requirements():
deps = [
"azure-keyvault",
"azure-storage-queue",
"azure-storage-blob",
"sendgrid",
] + list(CORE_DEPS)
requirements = generate_requirements(
deps,
ignore=["boto3", "botocore", "pywin32"],
exclude=["pkg_resources"],
include_self=True,
)
return requirements
|
def get_mailer_requirements():
deps = [
"azure-keyvault",
"azure-storage-queue",
"azure-storage-blob",
"sendgrid",
] + list(CORE_DEPS)
requirements = generate_requirements(
deps, ignore=["boto3", "botocore", "pywin32"], include_self=True
)
return requirements
|
https://github.com/cloud-custodian/cloud-custodian/issues/6012
|
root@8b79684d85a3:/# c7n-mailer
Traceback (most recent call last):
File "/usr/local/bin/c7n-mailer", line 5, in <module>
from c7n_mailer.cli import main
File "/usr/local/lib/python3.8/dist-packages/c7n_mailer/cli.py", line 9, in <module>
from c7n_mailer import deploy, utils
File "/usr/local/lib/python3.8/dist-packages/c7n_mailer/deploy.py", line 19, in <module>
from c7n.mu import (
ModuleNotFoundError: No module named 'c7n'
|
ModuleNotFoundError
|
def process(self, resources):
client = local_session(self.manager.session_factory).client("iam")
age = self.data.get("age")
disable = self.data.get("disable")
matched = self.data.get("matched")
if age:
threshold_date = datetime.datetime.now(tz=tzutc()) - timedelta(age)
for r in resources:
if "c7n:AccessKeys" not in r:
r["c7n:AccessKeys"] = client.list_access_keys(UserName=r["UserName"])[
"AccessKeyMetadata"
]
keys = r["c7n:AccessKeys"]
if matched:
m_keys = resolve_credential_keys(
r.get(CredentialReport.matched_annotation_key), keys
)
# It is possible for a _user_ to match multiple credential filters
# without having any single key match them all.
if not m_keys:
continue
keys = m_keys
for k in keys:
if age:
if not k["CreateDate"] < threshold_date:
continue
if disable:
client.update_access_key(
UserName=r["UserName"],
AccessKeyId=k["AccessKeyId"],
Status="Inactive",
)
else:
client.delete_access_key(
UserName=r["UserName"], AccessKeyId=k["AccessKeyId"]
)
|
def process(self, resources):
client = local_session(self.manager.session_factory).client("iam")
age = self.data.get("age")
disable = self.data.get("disable")
matched = self.data.get("matched")
if age:
threshold_date = datetime.datetime.now(tz=tzutc()) - timedelta(age)
for r in resources:
if "c7n:AccessKeys" not in r:
r["c7n:AccessKeys"] = client.list_access_keys(UserName=r["UserName"])[
"AccessKeyMetadata"
]
keys = r["c7n:AccessKeys"]
if matched:
m_keys = resolve_credential_keys(
r.get(CredentialReport.matched_annotation_key), keys
)
assert m_keys, "shouldn't have gotten this far without keys"
keys = m_keys
for k in keys:
if age:
if not k["CreateDate"] < threshold_date:
continue
if disable:
client.update_access_key(
UserName=r["UserName"],
AccessKeyId=k["AccessKeyId"],
Status="Inactive",
)
else:
client.delete_access_key(
UserName=r["UserName"], AccessKeyId=k["AccessKeyId"]
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6419
|
[ERROR] 2021-01-29T09:48:39.544Z 9911f614-77a7-465b-8544-743b132ff816 Error while executing policyTraceback (most recent call last): File "/var/task/c7n/policy.py", line 316, in run results = a.process(resources) File "/var/task/c7n/resources/iam.py", line 2211, in process assert m_keys, "shouldn't have gotten this far without keys"AssertionError: shouldn't have gotten this far without keys | [ERROR] 2021-01-29T09:48:39.544Z 9911f614-77a7-465b-8544-743b132ff816 Error while executing policy Traceback (most recent call last): File "/var/task/c7n/policy.py", line 316, in run results = a.process(resources) File "/var/task/c7n/resources/iam.py", line 2211, in process assert m_keys, "shouldn't have gotten this far without keys" AssertionError: shouldn't have gotten this far without keys
|
AssertionError
|
def process(self, resources, event):
if event is None:
return
user_info = self.get_tag_value(event)
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get("update", False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get("Tags", ()):
if tag["Key"] == self.data["tag"]:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
new_tags = {}
if user_info["user"]:
new_tags[self.data["tag"]] = user_info["user"]
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get("principal_id_tag", None)
if principal_id_key and user_info["id"]:
new_tags[principal_id_key] = user_info["id"]
if new_tags:
self.set_resource_tags(new_tags, untagged_resources)
return new_tags
|
def process(self, resources, event):
if event is None:
return
event = event["detail"]
utype = event["userIdentity"]["type"]
if utype not in self.data.get(
"user-type", ["AssumedRole", "IAMUser", "FederatedUser"]
):
return
user = None
if utype == "IAMUser":
user = event["userIdentity"]["userName"]
principal_id_value = event["userIdentity"].get("principalId", "")
elif utype == "AssumedRole" or utype == "FederatedUser":
user = event["userIdentity"]["arn"]
prefix, user = user.rsplit("/", 1)
principal_id_value = event["userIdentity"].get("principalId", "").split(":")[0]
# instance role
if user.startswith("i-"):
return
# lambda function (old style)
elif user.startswith("awslambda"):
return
if user is None:
return
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get("update", False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get("Tags", ()):
if tag["Key"] == self.data["tag"]:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get("tag")
new_tags = {self.data["tag"]: user}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get("principal_id_tag", None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in new_tags.items():
tag_action({"key": key, "value": value}, self.manager).process(
untagged_resources
)
return new_tags
|
https://github.com/cloud-custodian/cloud-custodian/issues/6251
|
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 166, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1138, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 526, in run
return PullMode.run(self)
File "/var/task/c7n/policy.py", line 317, in run
results = a.process(resources)
File "/var/task/c7n/resources/asg.py", line 1212, in process
instance_count = sum(list(w.map(self.process_asg, asgs)))
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator
yield fs.pop().result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/asg.py", line 1230, in process_asg
if not self.manager.config.dryrun and instances:
UnboundLocalError: local variable 'instances' referenced before assignment
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution Traceback (most recent call last): File "/var/task/c7n/handler.py", line 166, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1138, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 526, in run return PullMode.run(self) File "/var/task/c7n/policy.py", line 317, in run results = a.process(resources) File "/var/task/c7n/resources/asg.py", line 1212, in process instance_count = sum(list(w.map(self.process_asg, asgs))) File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator yield fs.pop().result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/var/task/c7n/resources/asg.py", line 1230, in process_asg if not self.manager.config.dryrun and instances: UnboundLocalError: local variable 'instances' referenced before assignment
|
UnboundLocalError
|
def process(self, asgs):
tags = self.get_tag_set()
error = None
client = self.get_client()
with self.executor_factory(max_workers=2) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(self.process_resource_set, client, asg_set, tags)] = (
asg_set
)
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
self.log.exception(
"Exception tagging tag:%s error:%s asg:%s"
% (
tags,
f.exception(),
", ".join([a["AutoScalingGroupName"] for a in asg_set]),
)
)
if error:
raise error
|
def process(self, asgs):
tags = self.get_tag_set()
error = None
client = self.get_client()
with self.executor_factory(max_workers=3) as w:
futures = {}
for asg_set in chunks(asgs, self.batch_size):
futures[w.submit(self.process_resource_set, client, asg_set, tags)] = (
asg_set
)
for f in as_completed(futures):
asg_set = futures[f]
if f.exception():
self.log.exception(
"Exception tagging tag:%s error:%s asg:%s"
% (
tags,
f.exception(),
", ".join([a["AutoScalingGroupName"] for a in asg_set]),
)
)
if error:
raise error
|
https://github.com/cloud-custodian/cloud-custodian/issues/6251
|
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 166, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1138, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 526, in run
return PullMode.run(self)
File "/var/task/c7n/policy.py", line 317, in run
results = a.process(resources)
File "/var/task/c7n/resources/asg.py", line 1212, in process
instance_count = sum(list(w.map(self.process_asg, asgs)))
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator
yield fs.pop().result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/asg.py", line 1230, in process_asg
if not self.manager.config.dryrun and instances:
UnboundLocalError: local variable 'instances' referenced before assignment
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution Traceback (most recent call last): File "/var/task/c7n/handler.py", line 166, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1138, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 526, in run return PullMode.run(self) File "/var/task/c7n/policy.py", line 317, in run results = a.process(resources) File "/var/task/c7n/resources/asg.py", line 1212, in process instance_count = sum(list(w.map(self.process_asg, asgs))) File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator yield fs.pop().result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/var/task/c7n/resources/asg.py", line 1230, in process_asg if not self.manager.config.dryrun and instances: UnboundLocalError: local variable 'instances' referenced before assignment
|
UnboundLocalError
|
def process_resource_set(self, client, asgs, tags):
tag_params = []
propagate = self.data.get("propagate", False)
for t in tags:
if "PropagateAtLaunch" not in t:
t["PropagateAtLaunch"] = propagate
for t in tags:
for a in asgs:
atags = dict(t)
atags["ResourceType"] = "auto-scaling-group"
atags["ResourceId"] = a["AutoScalingGroupName"]
tag_params.append(atags)
a.setdefault("Tags", []).append(atags)
self.manager.retry(client.create_or_update_tags, Tags=tag_params)
|
def process_resource_set(self, client, asgs, tags):
tag_params = []
propagate = self.data.get("propagate", False)
for t in tags:
if "PropagateAtLaunch" not in t:
t["PropagateAtLaunch"] = propagate
for t in tags:
for a in asgs:
atags = dict(t)
atags["ResourceType"] = "auto-scaling-group"
atags["ResourceId"] = a["AutoScalingGroupName"]
tag_params.append(atags)
self.manager.retry(client.create_or_update_tags, Tags=tag_params)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6251
|
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 166, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1138, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 526, in run
return PullMode.run(self)
File "/var/task/c7n/policy.py", line 317, in run
results = a.process(resources)
File "/var/task/c7n/resources/asg.py", line 1212, in process
instance_count = sum(list(w.map(self.process_asg, asgs)))
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator
yield fs.pop().result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/asg.py", line 1230, in process_asg
if not self.manager.config.dryrun and instances:
UnboundLocalError: local variable 'instances' referenced before assignment
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution Traceback (most recent call last): File "/var/task/c7n/handler.py", line 166, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1138, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 526, in run return PullMode.run(self) File "/var/task/c7n/policy.py", line 317, in run results = a.process(resources) File "/var/task/c7n/resources/asg.py", line 1212, in process instance_count = sum(list(w.map(self.process_asg, asgs))) File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator yield fs.pop().result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/var/task/c7n/resources/asg.py", line 1230, in process_asg if not self.manager.config.dryrun and instances: UnboundLocalError: local variable 'instances' referenced before assignment
|
UnboundLocalError
|
def process_asg(self, asg):
instance_ids = [i["InstanceId"] for i in asg["Instances"]]
tag_map = {
t["Key"]: t["Value"]
for t in asg.get("Tags", [])
if t["PropagateAtLaunch"] and not t["Key"].startswith("aws:")
}
if self.data.get("tags"):
tag_map = {k: v for k, v in tag_map.items() if k in self.data["tags"]}
if not tag_map and not self.get("trim", False):
self.log.error(
"No tags found to propagate on asg:{} tags configured:{}".format(
asg["AutoScalingGroupName"], self.data.get("tags")
)
)
tag_set = set(tag_map)
client = local_session(self.manager.session_factory).client("ec2")
if self.data.get("trim", False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun and instance_ids and tag_map:
client.create_tags(
Resources=instance_ids,
Tags=[{"Key": k, "Value": v} for k, v in tag_map.items()],
)
return len(instance_ids)
|
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client("ec2")
instance_ids = [i["InstanceId"] for i in asg["Instances"]]
tag_map = {
t["Key"]: t["Value"]
for t in asg.get("Tags", [])
if t["PropagateAtLaunch"] and not t["Key"].startswith("aws:")
}
if self.data.get("tags"):
tag_map = {k: v for k, v in tag_map.items() if k in self.data["tags"]}
tag_set = set(tag_map)
if self.data.get("trim", False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun and instances:
client.create_tags(
Resources=instance_ids,
Tags=[{"Key": k, "Value": v} for k, v in tag_map.items()],
)
return len(instance_ids)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6251
|
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 166, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1138, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 526, in run
return PullMode.run(self)
File "/var/task/c7n/policy.py", line 317, in run
results = a.process(resources)
File "/var/task/c7n/resources/asg.py", line 1212, in process
instance_count = sum(list(w.map(self.process_asg, asgs)))
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator
yield fs.pop().result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/asg.py", line 1230, in process_asg
if not self.manager.config.dryrun and instances:
UnboundLocalError: local variable 'instances' referenced before assignment
[ERROR] 2020-10-27T18:30:32.480Z ec614241-19ef-4d44-b3a0-400229378905 error during policy execution Traceback (most recent call last): File "/var/task/c7n/handler.py", line 166, in dispatch_event p.push(event, context) File "/var/task/c7n/policy.py", line 1138, in push return mode.run(event, lambda_ctx) File "/var/task/c7n/policy.py", line 526, in run return PullMode.run(self) File "/var/task/c7n/policy.py", line 317, in run results = a.process(resources) File "/var/task/c7n/resources/asg.py", line 1212, in process instance_count = sum(list(w.map(self.process_asg, asgs))) File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 611, in result_iterator yield fs.pop().result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 439, in result return self.__get_result() File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result raise self._exception File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/var/task/c7n/resources/asg.py", line 1230, in process_asg if not self.manager.config.dryrun and instances: UnboundLocalError: local variable 'instances' referenced before assignment
|
UnboundLocalError
|
def __init__(self, policy, data):
self.policy = policy
self.data = data
self.filters = self.data.get("conditions", [])
# for value_from usage / we use the conditions class
# to mimic a resource manager interface. we can't use
# the actual resource manager as we're overriding block
# filters which work w/ resource type metadata and our
# resource here is effectively the execution variables.
self.config = self.policy.options
rm = self.policy.resource_manager
self._cache = rm._cache
self.session_factory = rm.session_factory
# used by c7n-org to extend evaluation conditions
self.env_vars = {}
self.initialized = False
|
def __init__(self, policy, data):
self.policy = policy
self.data = data
self.filters = self.data.get("conditions", [])
# for value_from usage / we use the conditions class
# to mimic a resource manager interface. we can't use
# the actual resource manager as we're overriding block
# filters which work w/ resource type metadata and our
# resource here is effectively the execution variables.
self.config = self.policy.options
rm = self.policy.resource_manager
self._cache = rm._cache
self.session_factory = rm.session_factory
# used by c7n-org to extend evaluation conditions
self.env_vars = {}
|
https://github.com/cloud-custodian/cloud-custodian/issues/6217
|
custodian run ~/Projects/policies/policy.yaml -v -s ~/Projects/policies/.custodian
2020-10-15 16:44:26,694: custodian.commands:DEBUG Loaded file /Users/kevinkessels/Projects/policies/policy.yaml. Contains 1 policies
Traceback (most recent call last):
File "/Users/kevinkessels/Projects/custodian/venv/bin/custodian", line 8, in <module>
sys.exit(main())
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/cli.py", line 352, in main
command(config)
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/commands.py", line 124, in _load_policies
p.validate()
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 1046, in validate
self.conditions.validate()
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 929, in validate
self.filters = self.filter_registry.parse(self.filters, self)
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 122, in parse
results.append(self.factory(d, manager))
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 146, in factory
filter_type = data.get('type')
AttributeError: 'ValueFilter' object has no attribute 'get'
|
AttributeError
|
def validate(self):
if not self.initialized:
self.filters.extend(self.convert_deprecated())
self.filters = self.filter_registry.parse(self.filters, self)
self.initialized = True
|
def validate(self):
self.filters.extend(self.convert_deprecated())
self.filters = self.filter_registry.parse(self.filters, self)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6217
|
custodian run ~/Projects/policies/policy.yaml -v -s ~/Projects/policies/.custodian
2020-10-15 16:44:26,694: custodian.commands:DEBUG Loaded file /Users/kevinkessels/Projects/policies/policy.yaml. Contains 1 policies
Traceback (most recent call last):
File "/Users/kevinkessels/Projects/custodian/venv/bin/custodian", line 8, in <module>
sys.exit(main())
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/cli.py", line 352, in main
command(config)
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/commands.py", line 124, in _load_policies
p.validate()
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 1046, in validate
self.conditions.validate()
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/policy.py", line 929, in validate
self.filters = self.filter_registry.parse(self.filters, self)
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 122, in parse
results.append(self.factory(d, manager))
File "/Users/kevinkessels/Projects/custodian/venv/lib/python3.8/site-packages/c7n/filters/core.py", line 146, in factory
filter_type = data.get('type')
AttributeError: 'ValueFilter' object has no attribute 'get'
|
AttributeError
|
def augment(self, resources):
return [r for r in resources if self.manager.resource_type.id in r]
|
def augment(self, resources):
for r in resources:
r["Tags"] = r.pop("TagSet", [])
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/6155
|
2020-09-25 19:37:54,488: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/Users/rgopala1/custodian/lib/python3.7/site-packages/c7n/policy.py", line 291, in run
resources = self.policy.resource_manager.resources()
File "/Users/rgopala1/custodian/lib/python3.7/site-packages/c7n/query.py", line 509, in resources
resources = self.filter_resources(resources)
File "/Users/rgopala1/custodian/lib/python3.7/site-packages/c7n/manager.py", line 121, in filter_resources
resources = f.process(resources, event)
File "/Users/rgopala1/custodian/lib/python3.7/site-packages/c7n/resources/shield.py", line 87, in process
for arn, r in zip(self.manager.get_arns(resources), resources):
File "/Users/rgopala1/custodian/lib/python3.7/site-packages/c7n/query.py", line 588, in get_arns
_id = r[id_key]
KeyError: 'AllocationId'
|
KeyError
|
def render_event_pattern(self):
event_type = self.data.get("type")
pattern = self.data.get("pattern")
payload = {}
if pattern:
payload.update(pattern)
if event_type == "cloudtrail":
payload["detail-type"] = ["AWS API Call via CloudTrail"]
self.resolve_cloudtrail_payload(payload)
if event_type == "cloudtrail":
if "signin.amazonaws.com" in payload["detail"]["eventSource"]:
payload["detail-type"] = ["AWS Console Sign In via CloudTrail"]
elif event_type == "guard-duty":
payload["source"] = ["aws.guardduty"]
payload["detail-type"] = ["GuardDuty Finding"]
if "resource-filter" in self.data:
payload.update(
{
"detail": {
"resource": {"resourceType": [self.data["resource-filter"]]}
}
}
)
elif event_type == "ec2-instance-state":
payload["source"] = ["aws.ec2"]
payload["detail-type"] = ["EC2 Instance State-change Notification"]
# Technically could let empty be all events, but likely misconfig
payload["detail"] = {"state": self.data.get("events", [])}
elif event_type == "asg-instance-state":
payload["source"] = ["aws.autoscaling"]
events = []
for e in self.data.get("events", []):
events.append(self.ASG_EVENT_MAPPING.get(e, e))
payload["detail-type"] = events
elif event_type == "phd":
payload["source"] = ["aws.health"]
payload.setdefault("detail", {})
if self.data.get("events"):
payload["detail"].update({"eventTypeCode": list(self.data["events"])})
if self.data.get("categories", []):
payload["detail"]["eventTypeCategory"] = self.data["categories"]
elif event_type == "hub-finding":
payload["source"] = ["aws.securityhub"]
payload["detail-type"] = ["Security Hub Findings - Imported"]
elif event_type == "hub-action":
payload["source"] = ["aws.securityhub"]
payload["detail-type"] = [
"Security Hub Findings - Custom Action",
"Security Hub Insight Results",
]
elif event_type == "periodic":
pass
else:
raise ValueError("Unknown lambda event source type: %s" % event_type)
if not payload:
return None
if self.data.get("pattern"):
payload = merge_dict(payload, self.data["pattern"])
return json.dumps(payload)
|
def render_event_pattern(self):
event_type = self.data.get("type")
pattern = self.data.get("pattern")
payload = {}
if pattern:
payload.update(pattern)
if event_type == "cloudtrail":
payload["detail-type"] = ["AWS API Call via CloudTrail"]
self.resolve_cloudtrail_payload(payload)
if event_type == "cloudtrail":
if "signin.amazonaws.com" in payload["detail"]["eventSource"]:
payload["detail-type"] = ["AWS Console Sign In via CloudTrail"]
elif event_type == "guard-duty":
payload["source"] = ["aws.guardduty"]
payload["detail-type"] = ["GuardDuty Finding"]
if "resource-filter" in self.data:
payload.update(
{
"detail": {
"resource": {"resourceType": [self.data["resource-filter"]]}
}
}
)
elif event_type == "ec2-instance-state":
payload["source"] = ["aws.ec2"]
payload["detail-type"] = ["EC2 Instance State-change Notification"]
# Technically could let empty be all events, but likely misconfig
payload["detail"] = {"state": self.data.get("events", [])}
elif event_type == "asg-instance-state":
payload["source"] = ["aws.autoscaling"]
events = []
for e in self.data.get("events", []):
events.append(self.ASG_EVENT_MAPPING.get(e, e))
payload["detail-type"] = events
elif event_type == "phd":
payload["source"] = ["aws.health"]
if self.data.get("events"):
payload["detail"] = {"eventTypeCode": list(self.data["events"])}
if self.data.get("categories", []):
payload["detail"]["eventTypeCategory"] = self.data["categories"]
elif event_type == "hub-finding":
payload["source"] = ["aws.securityhub"]
payload["detail-type"] = ["Security Hub Findings - Imported"]
elif event_type == "hub-action":
payload["source"] = ["aws.securityhub"]
payload["detail-type"] = [
"Security Hub Findings - Custom Action",
"Security Hub Insight Results",
]
elif event_type == "periodic":
pass
else:
raise ValueError("Unknown lambda event source type: %s" % event_type)
if not payload:
return None
if self.data.get("pattern"):
payload = merge_dict(payload, self.data["pattern"])
return json.dumps(payload)
|
https://github.com/cloud-custodian/cloud-custodian/issues/6135
|
2020-09-17 09:37:55,669: custodian.policy:INFO Provisioning policy lambda: health-event-notify region: us-east-1
2020-09-17 09:37:55,924: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/kapilt/projects/cloud-custodian/c7n/policy.py", line 508, in provision
return manager.publish(
File "/home/kapilt/projects/cloud-custodian/c7n/mu.py", line 397, in publish
if e.add(func):
File "/home/kapilt/projects/cloud-custodian/c7n/mu.py", line 1104, in add
pattern = self.render_event_pattern()
File "/home/kapilt/projects/cloud-custodian/c7n/mu.py", line 1080, in render_event_pattern
payload['detail']['eventTypeCategory'] = self.data['categories']
KeyError: 'detail'
|
KeyError
|
def process(self, resources):
client = local_session(self.manager.session_factory).client("iam")
error = None
if self.data.get("force", False):
policy_setter = self.manager.action_registry["set-policy"](
{"state": "detached", "arn": "*"}, self.manager
)
policy_setter.process(resources)
for r in resources:
if self.data.get("force", False):
self.detach_inline_policies(client, r)
try:
client.delete_role(RoleName=r["RoleName"])
except client.exceptions.DeleteConflictException as e:
self.log.warning(
"Role:%s cannot be deleted, set force to detach policy and delete"
% r["Arn"]
)
error = e
except (
client.exceptions.NoSuchEntityException,
client.exceptions.UnmodifiableEntityException,
):
continue
if error:
raise error
|
def process(self, resources):
client = local_session(self.manager.session_factory).client("iam")
error = None
if self.data.get("force", False):
policy_setter = self.manager.action_registry["set-policy"](
{"state": "detached", "arn": "*"}, self.manager
)
policy_setter.process(resources)
for r in resources:
try:
client.delete_role(RoleName=r["RoleName"])
except client.exceptions.DeleteConflictException as e:
self.log.warning(
"Role:%s cannot be deleted, set force to detach policy and delete"
% r["Arn"]
)
error = e
except client.exceptions.NoSuchEntityException:
continue
except client.exceptions.UnmodifiableEntityException:
continue
if error:
raise error
|
https://github.com/cloud-custodian/cloud-custodian/issues/6109
|
2020-09-08 10:48:33,898: custodian.actions:WARNING Role:arn:aws:iam::role cannot be deleted, set force to detach policy and delete
2020-09-08 10:48:34,042: custodian.output:ERROR Error while executing policy
and delete
2020-09-08 10:48:34,042: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "site-packages/c7n/policy.py", line 313, in run
results = a.process(resources)
File "site-packages/c7n/resources/iam.py", line 1095, in process
raise error
File "site-packages/c7n/resources/iam.py", line 1084, in process
client.delete_role(RoleName=r['RoleName'])
File "site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.DeleteConflictException: An error occurred (DeleteConflict) when calling the DeleteRole operation: Cannot delete entity, must delete policies first.
2020-09-08 10:48:34,044: custodian.commands:ERROR Error while executing policy remove-role, continuing
Traceback (most recent call last):
File "site-packages/c7n/commands.py", line 272, in run
policy()
File "site-packages/c7n/policy.py", line 1175, in __call__
resources = mode.run()
File "site-packages/c7n/policy.py", line 313, in run
results = a.process(resources)
File "site-packages/c7n/resources/iam.py", line 1095, in process
raise error
File "site-packages/c7n/resources/iam.py", line 1084, in process
client.delete_role(RoleName=r['RoleName'])
File "site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.errorfactory.DeleteConflictException: An error occurred (DeleteConflict) when calling the DeleteRole operation: Cannot delete entity, must delete policies first.
2020-09-08 10:48:34,044: custodian.commands:ERROR The following policies had errors while executing
- remove-role
|
botocore.errorfactory.DeleteConflictException
|
def process_resource(self, client, r, related_tags, tag_keys, tag_action):
tags = {}
resource_tags = {
t["Key"]: t["Value"]
for t in r.get("Tags", [])
if not t["Key"].startswith("aws:")
}
if tag_keys == "*":
tags = {
k: v
for k, v in related_tags.items()
if resource_tags.get(k) != v and not k.startswith("aws:")
}
else:
tags = {
k: v
for k, v in related_tags.items()
if k in tag_keys and resource_tags.get(k) != v
}
if not tags:
return
if not isinstance(tag_action, UniversalTag):
tags = [{"Key": k, "Value": v} for k, v in tags.items()]
tag_action.process_resource_set(client, resource_set=[r], tags=tags)
return True
|
def process_resource(self, client, r, related_tags, tag_keys, tag_action):
tags = {}
resource_tags = {
t["Key"]: t["Value"]
for t in r.get("Tags", [])
if not t["Key"].startswith("aws:")
}
if tag_keys == "*":
tags = {k: v for k, v in related_tags.items() if resource_tags.get(k) != v}
else:
tags = {
k: v
for k, v in related_tags.items()
if k in tag_keys and resource_tags.get(k) != v
}
if not tags:
return
if not isinstance(tag_action, UniversalTag):
tags = [{"Key": k, "Value": v} for k, v in tags.items()]
tag_action.process_resource_set(client, resource_set=[r], tags=tags)
return True
|
https://github.com/cloud-custodian/cloud-custodian/issues/5975
|
$ custodian run --verbose --cache-period 0 --region us-east-1 -p aws-copy-tags-from-instance-to-ebs-volume -s . daily_policies.yml
2020-07-21 15:46:54,904: custodian.cache:DEBUG Disabling cache
2020-07-21 15:46:54,905: custodian.commands:DEBUG Loaded file daily_policies.yml. Contains 3 policies
2020-07-21 15:46:55,510: custodian.output:DEBUG Storing output with <LogFile file://./aws-copy-tags-from-instance-to-ebs-volume/custodian-run.log>
2020-07-21 15:46:55,517: custodian.policy:DEBUG Running policy:aws-copy-tags-from-instance-to-ebs-volume resource:aws.ebs region:us-east-1 c7n:0.9.4
2020-07-21 15:47:00,540: custodian.resources.ebs:DEBUG Filtered from 1150 to 1118 ebs
2020-07-21 15:47:00,540: custodian.policy:INFO policy:aws-copy-tags-from-instance-to-ebs-volume resource:aws.ebs region:us-east-1 count:1118 time:5.02
2020-07-21 15:47:05,964: custodian.output:DEBUG metric:ResourceCount Count:1118 policy:aws-copy-tags-from-instance-to-ebs-volume restype:aws.ebs scope:policy
2020-07-21 15:47:05,965: custodian.output:DEBUG metric:PolicyException Count:1 policy:aws-copy-tags-from-instance-to-ebs-volume restype:aws.ebs
2020-07-21 15:47:05,965: custodian.output:DEBUG metric:ApiCalls Count:4 policy:aws-copy-tags-from-instance-to-ebs-volume restype:aws.ebs
2020-07-21 15:47:05,968: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/policy.py", line 323, in run
results = a.process(resources)
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 1046, in process
elif self.process_resource(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 1071, in process_resource
tag_action.process_resource_set(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 430, in process_resource_set
self.manager.retry(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/utils.py", line 373, in _retry
return func(*args, **kw)
File "/home/gshelby/.local/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/gshelby/.local/lib/python3.8/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidParameterValue) when calling the CreateTags operation: Value ( aws:autoscaling:groupName ) for parameter key is invalid. Tag keys starting with 'aws:' are reserved for internal use
2020-07-21 15:47:05,971: custodian.commands:ERROR Error while executing policy aws-copy-tags-from-instance-to-ebs-volume, continuing
Traceback (most recent call last):
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/commands.py", line 282, in run
policy()
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/policy.py", line 1176, in __call__
resources = mode.run()
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/policy.py", line 323, in run
results = a.process(resources)
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 1046, in process
elif self.process_resource(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 1071, in process_resource
tag_action.process_resource_set(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/tags.py", line 430, in process_resource_set
self.manager.retry(
File "/home/gshelby/.local/lib/python3.8/site-packages/c7n/utils.py", line 373, in _retry
return func(*args, **kw)
File "/home/gshelby/.local/lib/python3.8/site-packages/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/home/gshelby/.local/lib/python3.8/site-packages/botocore/client.py", line 635, in _make_api_call
raise error_class(parsed_response, operation_name)
botocore.exceptions.ClientError: An error occurred (InvalidParameterValue) when calling the CreateTags operation: Value ( aws:autoscaling:groupName ) for parameter key is invalid. Tag keys starting with 'aws:' are reserved for internal use
2020-07-21 15:47:05,985: custodian.commands:ERROR The following policies had errors while executing
- aws-copy-tags-from-instance-to-ebs-volume
|
botocore.exceptions.ClientError
|
def __init__(self, policy, data):
self.policy = policy
self.data = data
self.filters = self.data.get("conditions", [])
# for value_from usage / we use the conditions class
# to mimic a resource manager interface. we can't use
# the actual resource manager as we're overriding block
# filters which work w/ resource type metadata and our
# resource here is effectively the execution variables.
self.config = self.policy.options
rm = self.policy.resource_manager
self._cache = rm._cache
self.session_factory = rm.session_factory
# used by c7n-org to extend evaluation conditions
self.env_vars = {}
|
def __init__(self, policy, data):
self.policy = policy
self.data = data
self.filters = self.data.get("conditions", [])
# used by c7n-org to extend evaluation conditions
self.env_vars = {}
|
https://github.com/cloud-custodian/cloud-custodian/issues/5941
|
Traceback (most recent call last):
File "/root/c7n/lib/python3.8/site-packages/c7n/cli.py", line 362, in main
command(config)
File "/root/c7n/lib/python3.8/site-packages/c7n/commands.py", line 136, in _load_policies
return f(options, list(policies))
File "/root/c7n/lib/python3.8/site-packages/c7n/commands.py", line 282, in run
policy()
File "/root/c7n/lib/python3.8/site-packages/c7n/policy.py", line 1171, in __call__
elif not self.is_runnable():
File "/root/c7n/lib/python3.8/site-packages/c7n/policy.py", line 1011, in is_runnable
return self.conditions.evaluate(event)
File "/root/c7n/lib/python3.8/site-packages/c7n/policy.py", line 945, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/root/c7n/lib/python3.8/site-packages/c7n/policy.py", line 945, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/root/c7n/lib/python3.8/site-packages/c7n/filters/core.py", line 530, in process
return super(ValueFilter, self).process(resources, event)
File "/root/c7n/lib/python3.8/site-packages/c7n/filters/core.py", line 217, in process
return list(filter(self, resources))
File "/root/c7n/lib/python3.8/site-packages/c7n/filters/core.py", line 517, in __call__
matched = self.match(i)
File "/root/c7n/lib/python3.8/site-packages/c7n/filters/core.py", line 569, in match
values = ValuesFrom(self.data['value_from'], self.manager)
File "/root/c7n/lib/python3.8/site-packages/c7n/resolver.py", line 132, in __init__
'account_id': manager.config.account_id,
AttributeError: 'PolicyConditions' object has no attribute 'config'
/root/c7n/lib/python3.8/site-packages/c7n/resolver.py(132)__init__()
-> 'account_id': manager.config.account_id,
(Pdb)
|
AttributeError
|
def get_resources(self, resource_ids):
# augment will turn these into resource dictionaries
return resource_ids
|
def get_resources(self, resource_ids):
client = local_session(self.manager.session_factory).client("es")
return client.describe_elasticsearch_domains(DomainNames=resource_ids)[
"DomainStatusList"
]
|
https://github.com/cloud-custodian/cloud-custodian/issues/5916
|
2020-06-30T07:11:16.082-07:00
START RequestId: <sorry> Version: $LATEST
2020-06-30T07:11:16.731-07:00
[INFO] 2020-06-30T14:11:16.730Z <sorry> Processing event
{
"version": "0",
"id": "<sorry>",
"detail-type": "AWS API Call via CloudTrail",
"source": "aws.es",
"account": "<sorry>",
"time": "2020-06-30T14:10:46Z",
"region": "us-east-1",
"resources": [],
"detail": {
"eventVersion": "1.05",
"userIdentity": {
"type": "AssumedRole",
"principalId": "<sorry>",
"arn": "arn:aws:sts::<sorry>",
"accountId": "<sorry>",
"accessKeyId": "<sorry>",
"sessionContext": {
"sessionIssuer": {
"type": "Role",
"principalId": "<sorry>",
"arn": "arn:aws:iam::<sorry>",
"accountId": "<sorry>",
"userName": "<sorry>"
},
"webIdFederationData": {},
"attributes": {
"mfaAuthenticated": "false",
"creationDate": "2020-06-30T13:59:53Z"
}
}
},
"eventTime": "2020-06-30T14:10:46Z",
"eventSource": "es.amazonaws.com",
"eventName": "CreateElasticsearchDomain",
"awsRegion": "us-east-1",
"sourceIPAddress": "<sorry>",
"userAgent": "aws-internal/3 aws-sdk-java/1.11.753 Linux/4.9.217-0.1.ac.205.84.332.metal1.x86_64 OpenJDK_64-Bit_Server_VM/25.242-b08 java/1.8.0_242 vendor/Oracle_Corporation",
"requestParameters": {
"elasticsearchVersion": "7.4",
"vPCOptions": {
"subnetIds": [
"subnet-<sorry>"
],
"securityGroupIds": [
"sg-<sorry>"
]
},
"nodeToNodeEncryptionOptions": {
"enabled": true
},
"snapshotOptions": {},
"domainEndpointOptions": {
"enforceHTTPS": true
},
"encryptionAtRestOptions": {
"enabled": false
},
"eBSOptions": {
"eBSEnabled": true,
"volumeSize": 10,
"volumeType": "gp2"
},
"advancedOptions": {
"rest.action.multi.allow_explicit_index": "true"
},
"elasticsearchClusterConfig": {
"zoneAwarenessEnabled": false,
"instanceType": "r5.large.elasticsearch",
"dedicatedMasterEnabled": false,
"instanceCount": 1
},
"domainName": "not-rest-encrypted",
"accessPolicies": "<sorry>"
},
"responseElements": {
"domainStatus": {
"created": true,
"serviceSoftwareOptions": {
"description": "There is no software update available for this domain.",
"optionalDeployment": true,
"currentVersion": "",
"newVersion": "",
"updateAvailable": false,
"automatedUpdateDate": "Dec 31, 1969 4:00:00 PM",
"updateStatus": "COMPLETED",
"cancellable": false
},
"domainEndpointOptions": {
"enforceHTTPS": true,
"tLSSecurityPolicy": "Policy-Min-TLS-1-0-2019-07"
},
"elasticsearchClusterConfig": {
"zoneAwarenessEnabled": false,
"instanceType": "r5.large.elasticsearch",
"dedicatedMasterEnabled": false,
"instanceCount": 1,
"warmEnabled": false
},
"cognitoOptions": {
"enabled": false
},
"encryptionAtRestOptions": {
"enabled": false
},
"advancedOptions": {
"rest.action.multi.allow_explicit_index": "true"
},
"upgradeProcessing": false,
"snapshotOptions": {},
"eBSOptions": {
"eBSEnabled": true,
"volumeSize": 10,
"volumeType": "gp2"
},
"elasticsearchVersion": "7.4",
"vPCOptions": {
"vPCId": "vpc-<sorry>",
"availabilityZones": [
"us-east-1a"
],
"securityGroupIds": [
"sg-<sorry>"
],
"subnetIds": [
"subnet-<sorry>"
]
},
"nodeToNodeEncryptionOptions": {
"enabled": true
},
"processing": true,
"aRN": "arn:aws:es:us-east-1:<sorry>:domain/not-rest-encrypted",
"domainId": "<sorry>/not-rest-encrypted",
"deleted": false,
"domainName": "not-rest-encrypted",
"accessPolicies": "<sorry>",
"advancedSecurityOptions": {
"internalUserDatabaseEnabled": false,
"enabled": false
}
}
},
"requestID": "<sorry>",
"eventID": "<sorry>",
"eventType": "AwsApiCall"
},
"debug": true
}
2020-06-30T07:11:16.731-07:00
[DEBUG] 2020-06-30T14:11:16.731Z 6f9ce181-bead-4ae0-a0d3-27b1c55c01e4 Disabling cache
2020-06-30T07:11:16.731-07:00
[WARNING] 2020-06-30T14:11:16.731Z 6f9ce181-bead-4ae0-a0d3-27b1c55c01e4 Custodian reserves policy lambda tags starting with custodian - policy specifies custodian-info
2020-06-30T07:11:16.731-07:00
[INFO] 2020-06-30T14:11:16.731Z 6f9ce181-bead-4ae0-a0d3-27b1c55c01e4 Found resource ids:['not-rest-encrypted']
2020-06-30T07:11:17.066-07:00
[ERROR] 2020-06-30T14:11:17.60Z 6f9ce181-bead-4ae0-a0d3-27b1c55c01e4 error during policy execution
Traceback (most recent call last):
File "/var/task/c7n/handler.py", line 176, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1135, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 444, in run
resources = self.resolve_resources(event)
File "/var/task/c7n/policy.py", line 639, in resolve_resources
return super().resolve_resources(event)
File "/var/task/c7n/policy.py", line 426, in resolve_resources
resources = self.policy.resource_manager.get_resources(resource_ids)
File "/var/task/c7n/query.py", line 545, in get_resources
resources = self.augment(resources)
File "/var/task/c7n/query.py", line 557, in augment
return self.source.augment(resources)
File "/var/task/c7n/resources/elasticsearch.py", line 50, in augment
results.extend(_augment(resource_set))
File "/var/task/c7n/resources/elasticsearch.py", line 40, in _augment
resources = self.manager.retry(
File "/var/task/c7n/utils.py", line 373, in _retry
return func(*args, **kw)
File "/var/runtime/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/runtime/botocore/client.py", line 598, in _make_api_call
request_dict = self._convert_to_request_dict(
File "/var/runtime/botocore/client.py", line 646, in _convert_to_request_dict
request_dict = self._serializer.serialize_to_request(
File "/var/runtime/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter DomainNames[0], value: {'DomainId': '<sorry>/not-rest-encrypted', 'DomainName': 'not-rest-encrypted', 'ARN': 'arn:aws:es:us-east-1:<sorry>:domain/not-rest-encrypted', 'Created': True, 'Deleted': False, 'Processing': True, 'UpgradeProcessing': False, 'ElasticsearchVersion': '7.4', 'ElasticsearchClusterConfig': {'InstanceType': 'r5.large.elasticsearch', 'InstanceCount': 1, 'DedicatedMasterEnabled': False, 'ZoneAwarenessEnabled': False, 'WarmEnabled': False}, 'EBSOptions': {'EBSEnabled': True, 'VolumeType': 'gp2', 'VolumeSize': 10}, 'AccessPolicies': '{"Version":"2012-10-17","Statement":[{<sorry>"}]}', 'SnapshotOptions': {}, 'VPCOptions': {'VPCId': 'vpc-<sorry>', 'SubnetIds': ['subnet-<sorry>'], 'AvailabilityZones': ['us-east-1a'], 'SecurityGroupIds': ['sg-<sorry>']}, 'CognitoOptions': {'Enabled': False}, 'EncryptionAtRestOptions': {'Enabled': False}, 'NodeToNodeEncryptionOptions': {'Enabled': True}, 'AdvancedOptions': {'rest.action.multi.allow_explicit_index': 'true'}, 'ServiceSoftwareOptions': {'CurrentVersion': '', 'NewVersion': '', 'UpdateAvailable': False, 'Cancellable': False, 'UpdateStatus': 'COMPLETED', 'Description': 'There is no software update available for this domain.', 'AutomatedUpdateDate': datetime.datetime(1970, 1, 1, 0, 0, tzinfo=tzlocal()), 'OptionalDeployment': True}, 'DomainEndpointOptions': {'EnforceHTTPS': True, 'TLSSecurityPolicy': 'Policy-Min-TLS-1-0-2019-07'}, 'AdvancedSecurityOptions': {'Enabled': False, 'InternalUserDatabaseEnabled': False}}, type: <class 'dict'>, valid types: <class 'str'>
2020-06-30T07:11:17.067-07:00
[ERROR] ParamValidationError: Parameter validation failed:
Invalid type for parameter DomainNames[0], value: {'DomainId': '<sorry>/not-rest-encrypted', 'DomainName': 'not-rest-encrypted', 'ARN': 'arn:aws:es:us-east-1:<sorry>:domain/not-rest-encrypted', 'Created': True, 'Deleted': False, 'Processing': True, 'UpgradeProcessing': False, 'ElasticsearchVersion': '7.4', 'ElasticsearchClusterConfig': {'InstanceType': 'r5.large.elasticsearch', 'InstanceCount': 1, 'DedicatedMasterEnabled': False, 'ZoneAwarenessEnabled': False, 'WarmEnabled': False}, 'EBSOptions': {'EBSEnabled': True, 'VolumeType': 'gp2', 'VolumeSize': 10}, 'AccessPolicies': '{"Version":"2012-10-17","Statement":[{<sorry>}]}', 'SnapshotOptions': {}, 'VPCOptions': {'VPCId': 'vpc-<sorry>', 'SubnetIds': ['subnet-<sorry>'], 'AvailabilityZones': ['us-east-1a'], 'SecurityGroupIds': ['sg-<sorry>']}, 'CognitoOptions': {'Enabled': False}, 'EncryptionAtRestOptions': {'Enabled': False}, 'NodeToNodeEncryptionOptions': {'Enabled': True}, 'AdvancedOptions': {'rest.action.multi.allow_explicit_index': 'true'}, 'ServiceSoftwareOptions': {'CurrentVersion': '', 'NewVersion': '', 'UpdateAvailable': False, 'Cancellable': False, 'UpdateStatus': 'COMPLETED', 'Description': 'There is no software update available for this domain.', 'AutomatedUpdateDate': datetime.datetime(1970, 1, 1, 0, 0, tzinfo=tzlocal()), 'OptionalDeployment': True}, 'DomainEndpointOptions': {'EnforceHTTPS': True, 'TLSSecurityPolicy': 'Policy-Min-TLS-1-0-2019-07'}, 'AdvancedSecurityOptions': {'Enabled': False, 'InternalUserDatabaseEnabled': False}}, type: <class 'dict'>, valid types: <class 'str'>
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 176, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1135, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 444, in run
resources = self.resolve_resources(event)
File "/var/task/c7n/policy.py", line 639, in resolve_resources
return super().resolve_resources(event)
File "/var/task/c7n/policy.py", line 426, in resolve_resources
resources = self.policy.resource_manager.get_resources(resource_ids)
File "/var/task/c7n/query.py", line 545, in get_resources
resources = self.augment(resources)
File "/var/task/c7n/query.py", line 557, in augment
return self.source.augment(resources)
File "/var/task/c7n/resources/elasticsearch.py", line 50, in augment
results.extend(_augment(resource_set))
File "/var/task/c7n/resources/elasticsearch.py", line 40, in _augment
resources = self.manager.retry(
File "/var/task/c7n/utils.py", line 373, in _retry
return func(*args, **kw)
File "/var/runtime/botocore/client.py", line 316, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/var/runtime/botocore/client.py", line 598, in _make_api_call
request_dict = self._convert_to_request_dict(
File "/var/runtime/botocore/client.py", line 646, in _convert_to_request_dict
request_dict = self._serializer.serialize_to_request(
File "/var/runtime/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
2020-06-30T07:11:17.069-07:00
END RequestId: <sorry>
2020-06-30T07:11:17.069-07:00
REPORT RequestId: <sorry> Duration: 986.45 ms Billed Duration: 1000 ms Memory Size: 512 MB Max Memory Used: 81 MB Init Duration: 414.96 ms
|
botocore.exceptions.ParamValidationError
|
def augment(self, resources):
results = []
client = local_session(self.session_factory).client("ecs")
for task_def_set in resources:
response = self.retry(
client.describe_task_definition,
taskDefinition=task_def_set,
include=["TAGS"],
)
r = response["taskDefinition"]
r["tags"] = response.get("tags", [])
results.append(r)
ecs_tag_normalize(results)
return results
|
def augment(self, resources):
results = []
client = local_session(self.session_factory).client("ecs")
for task_def_set in resources:
response = client.describe_task_definition(
taskDefinition=task_def_set, include=["TAGS"]
)
r = response["taskDefinition"]
r["tags"] = response.get("tags", [])
results.append(r)
ecs_tag_normalize(results)
return results
|
https://github.com/cloud-custodian/cloud-custodian/issues/5911
|
15:22:29
2020-06-30 15:22:29,380: custodian.commands:ERROR Error while executing policy check-ecs-def, continuing
15:22:29
Traceback (most recent call last):
15:22:29
File "/usr/local/lib/python3.7/dist-packages/c7n/commands.py", line 282, in run
15:22:29
policy()
15:22:29
File "/usr/local/lib/python3.7/dist-packages/c7n/policy.py", line 1176, in __call__
15:22:29
resources = mode.run()
15:22:29
File "/usr/local/lib/python3.7/dist-packages/c7n/policy.py", line 291, in run
15:22:29
resources = self.policy.resource_manager.resources()
15:22:29
File "/usr/local/lib/python3.7/dist-packages/c7n/query.py", line 502, in resources
15:22:29
resources = self.augment(resources)
15:22:29
File "/usr/local/lib/python3.7/dist-packages/c7n/resources/ecs.py", line 489, in augment
15:22:29
include=['TAGS'])
15:22:29
File "/usr/local/lib/python3.7/dist-packages/botocore/client.py", line 316, in _api_call
15:22:29
return self._make_api_call(operation_name, kwargs)
15:22:29
File "/usr/local/lib/python3.7/dist-packages/botocore/client.py", line 626, in _make_api_call
15:22:29
raise error_class(parsed_response, operation_name)
15:22:29
botocore.exceptions.ClientError: An error occurred (ThrottlingException) when calling the DescribeTaskDefinition operation (reached max retries: 4): Rate exceeded
|
botocore.exceptions.ClientError
|
def load_data(
self, policy_data, file_uri, validate=None, session_factory=None, config=None
):
self.structure.validate(policy_data)
# Use passed in policy exec configuration or default on loader
config = config or self.policy_config
# track policy resource types and only load if needed.
rtypes = set(self.structure.get_resource_types(policy_data))
missing = load_resources(list(rtypes))
if missing:
self._handle_missing_resources(policy_data, missing)
if schema and (
validate is not False or (validate is None and self.default_schema_validate)
):
errors = self.validator.validate(policy_data, tuple(rtypes))
if errors:
raise PolicyValidationError(
"Failed to validate policy %s\n %s\n" % (errors[1], errors[0])
)
collection = self.collection_class.from_data(policy_data, config, session_factory)
# non schema validation of policies isnt optional its
# become a lazy initialization point for resources.
#
# it would be good to review where we do validation
# as we also have to do after provider policy
# initialization due to the region expansion.
#
# ie we should defer this to callers
# [p.validate() for p in collection]
return collection
|
def load_data(
self, policy_data, file_uri, validate=None, session_factory=None, config=None
):
self.structure.validate(policy_data)
# Use passed in policy exec configuration or default on loader
config = config or self.policy_config
# track policy resource types and only load if needed.
rtypes = set(self.structure.get_resource_types(policy_data))
missing = load_resources(list(rtypes))
if missing:
self._handle_missing_resources(policy_data, missing)
if validate is not False or (validate is None and self.default_schema_validate):
errors = self.validator.validate(policy_data, tuple(rtypes))
if errors:
raise PolicyValidationError(
"Failed to validate policy %s\n %s\n" % (errors[1], errors[0])
)
collection = self.collection_class.from_data(policy_data, config, session_factory)
# non schema validation of policies isnt optional its
# become a lazy initialization point for resources.
#
# it would be good to review where we do validation
# as we also have to do after provider policy
# initialization due to the region expansion.
#
# ie we should defer this to callers
# [p.validate() for p in collection]
return collection
|
https://github.com/cloud-custodian/cloud-custodian/issues/5829
|
[ERROR] RuntimeError: missing jsonschema dependency
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 175, in dispatch_event
p.validate()
File "/var/task/c7n/policy.py", line 1047, in validate
f.validate()
File "/var/task/c7n/filters/missing.py", line 49, in validate
collection = PolicyLoader(
File "/var/task/c7n/loader.py", line 136, in load_data
errors = self.validator.validate(policy_data, tuple(rtypes))
File "/var/task/c7n/loader.py", line 47, in validate
self.gen_schema(tuple(sorted(resource_types)))
File "/var/task/c7n/loader.py", line 74, in gen_schema
self.validator = v = self._gen_schema(resource_types)
File "/var/task/c7n/loader.py", line 82, in _gen_schema
raise RuntimeError("missing jsonschema dependency")
|
RuntimeError
|
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client("ec2")
instance_ids = [i["InstanceId"] for i in asg["Instances"]]
tag_map = {
t["Key"]: t["Value"]
for t in asg.get("Tags", [])
if t["PropagateAtLaunch"] and not t["Key"].startswith("aws:")
}
if self.data.get("tags"):
tag_map = {k: v for k, v in tag_map.items() if k in self.data["tags"]}
tag_set = set(tag_map)
if self.data.get("trim", False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun and instances:
client.create_tags(
Resources=instance_ids,
Tags=[{"Key": k, "Value": v} for k, v in tag_map.items()],
)
return len(instance_ids)
|
def process_asg(self, asg):
client = local_session(self.manager.session_factory).client("ec2")
instance_ids = [i["InstanceId"] for i in asg["Instances"]]
tag_map = {
t["Key"]: t["Value"]
for t in asg.get("Tags", [])
if t["PropagateAtLaunch"] and not t["Key"].startswith("aws:")
}
if self.data.get("tags"):
tag_map = {k: v for k, v in tag_map.items() if k in self.data["tags"]}
tag_set = set(tag_map)
if self.data.get("trim", False):
instances = [self.instance_map[i] for i in instance_ids]
self.prune_instance_tags(client, asg, tag_set, instances)
if not self.manager.config.dryrun:
client.create_tags(
Resources=instance_ids,
Tags=[{"Key": k, "Value": v} for k, v in tag_map.items()],
)
return len(instance_ids)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5086
|
[ERROR] 2019-11-19T02:56:38.336Z b1c8af86-d773-412d-b755-638faf7698e8 Error while executing policy
20:56:38 Traceback (most recent call last):
20:56:38 File "/var/task/c7n/policy.py", line 320, in run
20:56:38 results = a.process(resources)
20:56:38 File "/var/task/c7n/resources/asg.py", line 1200, in process
20:56:38 instance_count = sum(list(w.map(self.process_asg, asgs)))
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 598, in result_iterator
20:56:38 yield fs.pop().result()
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 435, in result
20:56:38 return self.__get_result()
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
20:56:38 raise self._exception
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/thread.py", line 57, in run
20:56:38 result = self.fn(*self.args, **self.kwargs)
20:56:38 File "/var/task/c7n/resources/asg.py", line 1221, in process_asg
20:56:38 Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
20:56:38 File "/var/runtime/botocore/client.py", line 357, in _api_call
20:56:38 return self._make_api_call(operation_name, kwargs)
20:56:38 File "/var/runtime/botocore/client.py", line 661, in _make_api_call
20:56:38 raise error_class(parsed_response, operation_name)
20:56:38 botocore.exceptions.ClientError: An error occurred (MissingParameter) when calling the CreateTags operation: The request must contain the parameter resourceIdSet
20:56:38 [ERROR] 2019-11-19T02:56:38.338Z b1c8af86-d773-412d-b755-638faf7698e8 error during policy execution
20:56:38 Traceback (most recent call last):
20:56:38 File "/var/task/c7n/handler.py", line 168, in dispatch_event
20:56:38 p.push(event, context)
20:56:38 File "/var/task/c7n/policy.py", line 991, in push
20:56:38 return mode.run(event, lambda_ctx)
20:56:38 File "/var/task/c7n/policy.py", line 591, in run
20:56:38 return PullMode.run(self)
20:56:38 File "/var/task/c7n/policy.py", line 320, in run
20:56:38 results = a.process(resources)
20:56:38 File "/var/task/c7n/resources/asg.py", line 1200, in process
20:56:38 instance_count = sum(list(w.map(self.process_asg, asgs)))
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 598, in result_iterator
20:56:38 yield fs.pop().result()
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 435, in result
20:56:38 return self.__get_result()
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result
20:56:38 raise self._exception
20:56:38 File "/var/lang/lib/python3.7/concurrent/futures/thread.py", line 57, in run
20:56:38 result = self.fn(*self.args, **self.kwargs)
20:56:38 File "/var/task/c7n/resources/asg.py", line 1221, in process_asg
20:56:38 Tags=[{'Key': k, 'Value': v} for k, v in tag_map.items()])
20:56:38 File "/var/runtime/botocore/client.py", line 357, in _api_call
20:56:38 return self._make_api_call(operation_name, kwargs)
20:56:38 File "/var/runtime/botocore/client.py", line 661, in _make_api_call
20:56:38 raise error_class(parsed_response, operation_name)
20:56:38 botocore.exceptions.ClientError: An error occurred (MissingParameter) when calling the CreateTags operation: The request must contain the parameter resourceIdSet
20:56:38 [ERROR] ClientError: An error occurred (MissingParameter) when calling the CreateTags operation: The request must contain the parameter resourceIdSet Traceback (most recent call last): File "/var/task/custodian_policy.py", line 4, in run return handler.dispatch_event(event, context) File "/var/task/c7n/handler.py", line 168, in dispatch_event p.push(event, context) File "/var/task/c7
20:56:38 END RequestId: b1c8af86-d773-412d-b755-638faf7698e8
|
botocore.exceptions.ClientError
|
def factory(self, data, manager=None):
"""Factory func for filters.
data - policy config for filters
manager - resource type manager (ec2, s3, etc)
"""
# Make the syntax a little nicer for common cases.
if isinstance(data, dict) and len(data) == 1 and "type" not in data:
op = list(data.keys())[0]
if op == "or":
return self["or"](data, self, manager)
elif op == "and":
return self["and"](data, self, manager)
elif op == "not":
return self["not"](data, self, manager)
return ValueFilter(data, manager)
if isinstance(data, str):
filter_type = data
data = {"type": data}
else:
filter_type = data.get("type")
if not filter_type:
raise PolicyValidationError("%s Invalid Filter %s" % (self.plugin_type, data))
filter_class = self.get(filter_type)
if filter_class is not None:
return filter_class(data, manager)
else:
raise PolicyValidationError(
"%s Invalid filter type %s" % (self.plugin_type, data)
)
|
def factory(self, data, manager=None):
"""Factory func for filters.
data - policy config for filters
manager - resource type manager (ec2, s3, etc)
"""
# Make the syntax a little nicer for common cases.
if isinstance(data, dict) and len(data) == 1 and "type" not in data:
op = list(data.keys())[0]
if op == "or":
return Or(data, self, manager)
elif op == "and":
return And(data, self, manager)
elif op == "not":
return Not(data, self, manager)
return ValueFilter(data, manager)
if isinstance(data, str):
filter_type = data
data = {"type": data}
else:
filter_type = data.get("type")
if not filter_type:
raise PolicyValidationError("%s Invalid Filter %s" % (self.plugin_type, data))
filter_class = self.get(filter_type)
if filter_class is not None:
return filter_class(data, manager)
else:
raise PolicyValidationError(
"%s Invalid filter type %s" % (self.plugin_type, data)
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def get_block_operator(self):
"""Determine the immediate parent boolean operator for a filter"""
# Top level operator is `and`
block = self.get_block_parent()
if block.type in ("and", "or", "not"):
return block.type
return "and"
|
def get_block_operator(self):
"""Determine the immediate parent boolean operator for a filter"""
# Top level operator is `and`
block_stack = ["and"]
for f in self.manager.iter_filters(block_end=True):
if f is None:
block_stack.pop()
continue
if f.type in ("and", "or", "not"):
block_stack.append(f.type)
if f == self:
break
return block_stack[-1]
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def process_value_type(self, sentinel, value, resource):
if self.vtype == "normalize" and isinstance(value, str):
return sentinel, value.strip().lower()
elif self.vtype == "expr":
sentinel = self.get_resource_value(sentinel, resource)
return sentinel, value
elif self.vtype == "integer":
try:
value = int(str(value).strip())
except ValueError:
value = 0
elif self.vtype == "size":
try:
return sentinel, len(value)
except TypeError:
return sentinel, 0
elif self.vtype == "unique_size":
try:
return sentinel, len(set(value))
except TypeError:
return sentinel, 0
elif self.vtype == "swap":
return value, sentinel
elif self.vtype == "date":
return parse_date(sentinel), parse_date(value)
elif self.vtype == "age":
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) - timedelta(sentinel)
value = parse_date(value)
if value is None:
# compatiblity
value = 0
# Reverse the age comparison, we want to compare the value being
# greater than the sentinel typically. Else the syntax for age
# comparisons is intuitively wrong.
return value, sentinel
elif self.vtype == "cidr":
s = parse_cidr(sentinel)
v = parse_cidr(value)
if isinstance(s, ipaddress._BaseAddress) and isinstance(
v, ipaddress._BaseNetwork
):
return v, s
return s, v
elif self.vtype == "cidr_size":
cidr = parse_cidr(value)
if cidr:
return sentinel, cidr.prefixlen
return sentinel, 0
# Allows for expiration filtering, for events in the future as opposed
# to events in the past which age filtering allows for.
elif self.vtype == "expiration":
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) + timedelta(sentinel)
value = parse_date(value)
if value is None:
value = 0
return sentinel, value
# Allows for comparing version numbers, for things that you expect a minimum version number.
elif self.vtype == "version":
s = ComparableVersion(sentinel)
v = ComparableVersion(value)
return s, v
return sentinel, value
|
def process_value_type(self, sentinel, value, resource):
if self.vtype == "normalize":
return sentinel, value.strip().lower()
elif self.vtype == "expr":
sentinel = self.get_resource_value(sentinel, resource)
return sentinel, value
elif self.vtype == "integer":
try:
value = int(str(value).strip())
except ValueError:
value = 0
elif self.vtype == "size":
try:
return sentinel, len(value)
except TypeError:
return sentinel, 0
elif self.vtype == "unique_size":
try:
return sentinel, len(set(value))
except TypeError:
return sentinel, 0
elif self.vtype == "swap":
return value, sentinel
elif self.vtype == "date":
return parse_date(sentinel), parse_date(value)
elif self.vtype == "age":
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) - timedelta(sentinel)
value = parse_date(value)
if value is None:
# compatiblity
value = 0
# Reverse the age comparison, we want to compare the value being
# greater than the sentinel typically. Else the syntax for age
# comparisons is intuitively wrong.
return value, sentinel
elif self.vtype == "cidr":
s = parse_cidr(sentinel)
v = parse_cidr(value)
if isinstance(s, ipaddress._BaseAddress) and isinstance(
v, ipaddress._BaseNetwork
):
return v, s
return s, v
elif self.vtype == "cidr_size":
cidr = parse_cidr(value)
if cidr:
return sentinel, cidr.prefixlen
return sentinel, 0
# Allows for expiration filtering, for events in the future as opposed
# to events in the past which age filtering allows for.
elif self.vtype == "expiration":
if not isinstance(sentinel, datetime.datetime):
sentinel = datetime.datetime.now(tz=tzutc()) + timedelta(sentinel)
value = parse_date(value)
if value is None:
value = 0
return sentinel, value
# Allows for comparing version numbers, for things that you expect a minimum version number.
elif self.vtype == "version":
s = ComparableVersion(sentinel)
v = ComparableVersion(value)
return s, v
return sentinel, value
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def iter_filters(self, block_end=False):
return iter_filters(self.filters, block_end=block_end)
|
def iter_filters(self, block_end=False):
queue = deque(self.filters)
while queue:
f = queue.popleft()
if f and f.type in ("or", "and", "not"):
if block_end:
queue.appendleft(None)
for gf in f.filters:
queue.appendleft(gf)
yield f
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def validate(self):
self.filters.extend(self.convert_deprecated())
self.filters = self.filter_registry.parse(self.filters, self)
|
def validate(self):
self.filters.extend(self.convert_deprecated())
self.filters = self.filter_registry.parse(
self.filters, self.policy.resource_manager
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def __call__(self):
"""Run policy in default mode"""
mode = self.get_execution_mode()
if isinstance(mode, ServerlessExecutionMode) or self.options.dryrun:
self._trim_runtime_filters()
if self.options.dryrun:
resources = PullMode(self).run()
elif not self.is_runnable():
resources = []
elif isinstance(mode, ServerlessExecutionMode):
resources = mode.provision()
else:
resources = mode.run()
# clear out resource manager post run, to clear cache
self.resource_manager = self.load_resource_manager()
return resources
|
def __call__(self):
"""Run policy in default mode"""
mode = self.get_execution_mode()
if self.options.dryrun:
resources = PullMode(self).run()
elif not self.is_runnable():
resources = []
elif isinstance(mode, ServerlessExecutionMode):
resources = mode.provision()
else:
resources = mode.run()
# clear out resource manager post run, to clear cache
self.resource_manager = self.load_resource_manager()
return resources
|
https://github.com/cloud-custodian/cloud-custodian/issues/5724
|
$ custodian run -d -v -s out -p run-asg-auto-tag-creator ./auto-tag-creator.yaml
2020-05-06 15:47:07,205: custodian.commands:DEBUG Loaded file ./auto-tag-creator.yaml. Contains 11 policies
2020-05-06 15:47:07,223: custodian.aws:DEBUG using default region:us-west-2 from boto
2020-05-06 15:47:07,778: custodian.commands:ERROR Error while executing policy run-asg-auto-tag-creator, continuing
Traceback (most recent call last):
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 1056, in __call__
resources = PullMode(self).run()
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 279, in run
if not self.policy.is_runnable():
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 906, in is_runnable
return self.conditions.evaluate(event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in evaluate
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/policy.py", line 843, in <listcomp>
state = all([f.process([policy_vars], event) for f in self.filters])
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 299, in process
return self.process_set(resources, event)
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in process_set
resource_map = {r[rtype_id]: r for r in resources}
File "/Users/tstansell/.local/c7n/lib/python3.7/site-packages/c7n/filters/core.py", line 314, in <dictcomp>
resource_map = {r[rtype_id]: r for r in resources}
KeyError: 'AutoScalingGroupName'
|
KeyError
|
def get_resource_date(self, asg):
cfg = self.launch_info.get(asg)
if cfg is None:
cfg = {}
ami = self.images.get(cfg.get("ImageId"), {})
return parse(ami.get(self.date_attribute, "2000-01-01T01:01:01.000Z"))
|
def get_resource_date(self, asg):
cfg = self.launch_info.get(asg)
ami = self.images.get(cfg.get("ImageId"), {})
return parse(ami.get(self.date_attribute, "2000-01-01T01:01:01.000Z"))
|
https://github.com/cloud-custodian/cloud-custodian/issues/5748
|
custodian run --dryrun --profile abc asg-ami.yml --region us-east-1 -s asg-ami --cache-period 0 -v
2020-05-11 16:26:37,495: custodian.cache:DEBUG Disabling cache
2020-05-11 16:26:37,496: custodian.commands:DEBUG Loaded file asg-ami.yml. Contains 1 policies
2020-05-11 16:26:38,023: custodian.output:DEBUG Storing output with <LogFile asg-ami/custodian-run.log>
2020-05-11 16:26:38,165: custodian.policy:DEBUG Running policy:asg-ami
resource:asg region:us-east-1 c7n:0.9.1
2020-05-11 16:26:51,425: custodian.resources.launchconfig:DEBUG Filtered from 527 to 527 launchconfig
2020-05-11 16:26:51,683: custodian.output:DEBUG metric:PolicyException Count:1 policy:asg-ami
restype:asg
2020-05-11 16:26:51,683: custodian.output:DEBUG metric:ApiCalls Count:28 policy:asg-ami
restype:asg
2020-05-11 16:26:51,683: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/Users/abc/custodian/cloud-custodian/c7n/policy.py", line 291, in run
resources = self.policy.resource_manager.resources()
File "/Users/abc/custodian/cloud-custodian/c7n/query.py", line 465, in resources
resources = self.filter_resources(resources)
File "/Users/abc/custodian/cloud-custodian/c7n/manager.py", line 109, in filter_resources
resources = f.process(resources, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 258, in process
return self.process_set(resources, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 274, in process_set
r[rtype_id] for r in f.process(resources, event)])
File "/Users/abc/custodian/cloud-custodian/c7n/resources/asg.py", line 573, in process
return super(ImageAgeFilter, self).process(asgs, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 197, in process
return list(filter(self, resources))
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 677, in __call__
v = self.get_resource_date(i)
File "/Users/abc/custodian/cloud-custodian/c7n/resources/asg.py", line 577, in get_resource_date
ami = self.images.get(cfg.get('ImageId'), {})
AttributeError: 'NoneType' object has no attribute 'get'
2020-05-11 16:26:51,688: custodian.commands:ERROR Error while executing policy asg-ami, continuing
Traceback (most recent call last):
File "/Users/abc/custodian/cloud-custodian/c7n/commands.py", line 281, in run
policy()
File "/Users/abc/custodian/cloud-custodian/c7n/policy.py", line 1031, in __call__
resources = PullMode(self).run()
File "/Users/abc/custodian/cloud-custodian/c7n/policy.py", line 291, in run
resources = self.policy.resource_manager.resources()
File "/Users/abc/custodian/cloud-custodian/c7n/query.py", line 465, in resources
resources = self.filter_resources(resources)
File "/Users/abc/custodian/cloud-custodian/c7n/manager.py", line 109, in filter_resources
resources = f.process(resources, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 258, in process
return self.process_set(resources, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 274, in process_set
r[rtype_id] for r in f.process(resources, event)])
File "/Users/abc/custodian/cloud-custodian/c7n/resources/asg.py", line 573, in process
return super(ImageAgeFilter, self).process(asgs, event)
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 197, in process
return list(filter(self, resources))
File "/Users/abc/custodian/cloud-custodian/c7n/filters/core.py", line 677, in __call__
v = self.get_resource_date(i)
File "/Users/abc/custodian/cloud-custodian/c7n/resources/asg.py", line 577, in get_resource_date
ami = self.images.get(cfg.get('ImageId'), {})
AttributeError: 'NoneType' object has no attribute 'get'
|
AttributeError
|
def process(self, resources, event=None):
non_account_trails = set()
for r in resources:
region = self.manager.config.region
trail_arn = Arn.parse(r["TrailARN"])
if (
r.get("IsOrganizationTrail")
and self.manager.config.account_id != trail_arn.account_id
):
non_account_trails.add(r["TrailARN"])
continue
if r.get("HomeRegion") and r["HomeRegion"] != region:
region = trail_arn.region
if self.annotation_key in r:
continue
client = local_session(self.manager.session_factory).client(
"cloudtrail", region_name=region
)
status = client.get_trail_status(Name=r["Name"])
status.pop("ResponseMetadata")
r[self.annotation_key] = status
if non_account_trails:
self.log.warning(
"found %d org cloud trail from different account that cant be processed",
len(non_account_trails),
)
return super(Status, self).process(
[r for r in resources if r["TrailARN"] not in non_account_trails]
)
|
def process(self, resources, event=None):
for r in resources:
region = self.manager.config.region
trail_arn = Arn.parse(r["TrailARN"])
if (
r.get("IsOrganizationTrail")
and self.manager.config.account_id != trail_arn.account_id
):
continue
if r.get("HomeRegion") and r["HomeRegion"] != region:
region = trail_arn.region
if self.annotation_key in r:
continue
client = local_session(self.manager.session_factory).client(
"cloudtrail", region_name=region
)
status = client.get_trail_status(Name=r["Name"])
status.pop("ResponseMetadata")
r[self.annotation_key] = status
return super(Status, self).process(resources)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5713
|
2020-05-05 09:03:16,332: custodian.commands:ERROR Error while executing policy awslogs-cloudtrail, continuing
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 1062, in __call__
resources = mode.run()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 291, in run
resources = self.policy.resource_manager.resources()
File "/usr/local/lib/python3.7/site-packages/c7n/query.py", line 466, in resources
resources = self.filter_resources(resources)
File "/usr/local/lib/python3.7/site-packages/c7n/manager.py", line 109, in filter_resources
resources = f.process(resources, event)
File "/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py", line 122, in process
return super(Status, self).process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n/filters/core.py", line 499, in process
return super(ValueFilter, self).process(resources, event)
File "/usr/local/lib/python3.7/site-packages/c7n/filters/core.py", line 197, in process
return list(filter(self, resources))
File "/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py", line 125, in __call__
return self.match(r['c7n:TrailStatus'])
KeyError: 'c7n:TrailStatus'
|
KeyError
|
def get_launch_id(self, asg):
lid = asg.get("LaunchConfigurationName")
if lid is not None:
# We've noticed trailing white space allowed in some asgs
return lid.strip()
lid = asg.get("LaunchTemplate")
if lid is not None:
return (lid["LaunchTemplateId"], lid.get("Version", "$Default"))
if "MixedInstancesPolicy" in asg:
mip_spec = asg["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
return (mip_spec["LaunchTemplateId"], mip_spec.get("Version", "$Default"))
# we've noticed some corner cases where the asg name is the lc name, but not
# explicitly specified as launchconfiguration attribute.
lid = asg["AutoScalingGroupName"]
return lid
|
def get_launch_id(self, asg):
lid = asg.get("LaunchConfigurationName")
if lid is not None:
# We've noticed trailing white space allowed in some asgs
return lid.strip()
lid = asg.get("LaunchTemplate")
if lid is not None:
return (lid["LaunchTemplateId"], lid["Version"])
if "MixedInstancesPolicy" in asg:
mip_spec = asg["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
return (mip_spec["LaunchTemplateId"], mip_spec["Version"])
# we've noticed some corner cases where the asg name is the lc name, but not
# explicitly specified as launchconfiguration attribute.
lid = asg["AutoScalingGroupName"]
return lid
|
https://github.com/cloud-custodian/cloud-custodian/issues/5501
|
Traceback (most recent call last):
File "/home/cloudcustodian/custodian/bin/c7n-org", line 11, in <module>
sys.exit(cli())
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n_org/cli.py", line 649, in run
debug)] = (a, r)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/executor.py", line 63, in submit
return MainThreadFuture(func(*args, **kw))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n_org/cli.py", line 560, in run_account
resources = p.run()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/policy.py", line 1045, in __call__
resources = PullMode(self).run()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/policy.py", line 288, in run
resources = self.policy.resource_manager.resources()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/query.py", line 467, in resources
resources = self.filter_resources(resources)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/manager.py", line 108, in filter_resources
resources = f.process(resources, event)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 832, in process
launch_info = LaunchInfo(self.manager).initialize(asgs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 85, in initialize
self.templates = self.get_launch_templates(asgs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 92, in get_launch_templates
template_ids = list(tmpl_mgr.get_asg_templates(asgs))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/ec2.py", line 1956, in get_asg_templates
t['Version']), []).append(a['AutoScalingGroupName'])
KeyError: 'Version'
|
KeyError
|
def get_asg_templates(self, asgs):
templates = {}
for a in asgs:
t = None
if "LaunchTemplate" in a:
t = a["LaunchTemplate"]
elif "MixedInstancesPolicy" in a:
t = a["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
if t is None:
continue
templates.setdefault(
(t["LaunchTemplateId"], t.get("Version", "$Default")), []
).append(a["AutoScalingGroupName"])
return templates
|
def get_asg_templates(self, asgs):
templates = {}
for a in asgs:
t = None
if "LaunchTemplate" in a:
t = a["LaunchTemplate"]
elif "MixedInstancesPolicy" in a:
t = a["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
if t is None:
continue
templates.setdefault(
(t["LaunchTemplateId"], t.get("Version", None)), []
).append(a["AutoScalingGroupName"])
return templates
|
https://github.com/cloud-custodian/cloud-custodian/issues/5501
|
Traceback (most recent call last):
File "/home/cloudcustodian/custodian/bin/c7n-org", line 11, in <module>
sys.exit(cli())
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n_org/cli.py", line 649, in run
debug)] = (a, r)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/executor.py", line 63, in submit
return MainThreadFuture(func(*args, **kw))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n_org/cli.py", line 560, in run_account
resources = p.run()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/policy.py", line 1045, in __call__
resources = PullMode(self).run()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/policy.py", line 288, in run
resources = self.policy.resource_manager.resources()
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/query.py", line 467, in resources
resources = self.filter_resources(resources)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/manager.py", line 108, in filter_resources
resources = f.process(resources, event)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 832, in process
launch_info = LaunchInfo(self.manager).initialize(asgs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 85, in initialize
self.templates = self.get_launch_templates(asgs)
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/asg.py", line 92, in get_launch_templates
template_ids = list(tmpl_mgr.get_asg_templates(asgs))
File "/home/cloudcustodian/custodian/lib/python3.6/site-packages/c7n/resources/ec2.py", line 1956, in get_asg_templates
t['Version']), []).append(a['AutoScalingGroupName'])
KeyError: 'Version'
|
KeyError
|
def process_resource(self, client, r, related_tags, tag_keys, tag_action):
tags = {}
resource_tags = {
t["Key"]: t["Value"]
for t in r.get("Tags", [])
if not t["Key"].startswith("aws:")
}
if tag_keys == "*":
tags = {k: v for k, v in related_tags.items() if resource_tags.get(k) != v}
else:
tags = {
k: v
for k, v in related_tags.items()
if k in tag_keys and resource_tags.get(k) != v
}
if not tags:
return
if not isinstance(tag_action, UniversalTag):
tags = [{"Key": k, "Value": v} for k, v in tags.items()]
tag_action.process_resource_set(client, resource_set=[r], tags=tags)
return True
|
def process_resource(self, client, r, related_tags, tag_keys, tag_action):
tags = {}
resource_tags = {
t["Key"]: t["Value"]
for t in r.get("Tags", [])
if not t["Key"].startswith("aws:")
}
if tag_keys == "*":
tags = {k: v for k, v in related_tags.items() if resource_tags.get(k) != v}
else:
tags = {
k: v
for k, v in related_tags.items()
if k in tag_keys and resource_tags.get(k) != v
}
if not tags:
return
tag_action.process_resource_set(
client, resource_set=[r], tags=[{"Key": k, "Value": v} for k, v in tags.items()]
)
return True
|
https://github.com/cloud-custodian/cloud-custodian/issues/5380
|
policies:
- name: copy-tags-from-api-to-stage
resource: rest-stage
actions:
- type: copy-related-tag
resource: rest-api
skip_missing: True
key: restApiId
tags: '*'
(cc) root@ip-10-128-128-174:~# custodian run --assume arn:aws:iam::1234567890:role/Cloud_Custodian_Role -s . kristen-test-api-gateway.yaml -r us-east-1 --cache-period 0 -v --debug
2020-02-21 17:51:47,332: custodian.cache:DEBUG Disabling cache
2020-02-21 17:51:47,333: custodian.commands:DEBUG Loaded file kristen-test-api-gateway.yaml. Contains 1 policies
2020-02-21 17:51:47,458: custodian.output:DEBUG Storing output with <LogFile file://./copy-tags-from-api-to-stage/custodian-run.log>
2020-02-21 17:51:47,545: custodian.policy:DEBUG Running policy:copy-tags-from-api-to-stage resource:rest-stage region:us-east-1 c7n:0.8.46.1
2020-02-21 17:51:47,668: custodian.resources.restapi:DEBUG Filtered from 4 to 4 restapi
2020-02-21 17:51:47,946: custodian.resources.reststage:DEBUG Filtered from 4 to 4 reststage
2020-02-21 17:51:47,947: custodian.policy:INFO policy:copy-tags-from-api-to-stage resource:rest-stage region:us-east-1 count:4 time:0.40
2020-02-21 17:51:48,045: custodian.output:DEBUG metric:ResourceCount Count:4 policy:copy-tags-from-api-to-stage restype:rest-stage scope:policy
2020-02-21 17:51:48,046: custodian.output:DEBUG metric:PolicyException Count:1 policy:copy-tags-from-api-to-stage restype:rest-stage
2020-02-21 17:51:48,046: custodian.output:DEBUG metric:ApiCalls Count:6 policy:copy-tags-from-api-to-stage restype:rest-stage
2020-02-21 17:51:48,046: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/root/cc/lib/python3.6/site-packages/c7n/policy.py", line 320, in run
results = a.process(resources)
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1050, in process
client, r, related_tag_map[related], self.data['tags'], tag_action):
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1075, in process_resource
tags=[{'Key': k, 'Value': v} for k, v in tags.items()])
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 860, in process_resource_set
client.tag_resources, ResourceARNList=arns, Tags=tags)
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1117, in universal_retry
response = method(ResourceARNList=ResourceARNList, **kw)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 276, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 559, in _make_api_call
api_params, operation_model, context=request_context)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 607, in _convert_to_request_dict
api_params, operation_model)
File "/root/cc/lib/python3.6/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter Tags, value: [{'Key': 'BillingCostCenter', 'Value': '05a'}, {'Key': 'Division', 'Value': 'test'}, {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'ResourceContact', 'Value': 'kristen.happ@company.com'}, {'Key': 'ResourcePurpose', 'Value': 'Testing Tagging Policy'}], type: <class 'list'>, valid types: <class 'dict'>
Traceback (most recent call last):
File "/root/cc/lib/python3.6/site-packages/c7n/cli.py", line 374, in main
command(config)
File "/root/cc/lib/python3.6/site-packages/c7n/commands.py", line 141, in _load_policies
return f(options, list(policies))
File "/root/cc/lib/python3.6/site-packages/c7n/commands.py", line 283, in run
policy()
File "/root/cc/lib/python3.6/site-packages/c7n/policy.py", line 1049, in __call__
resources = mode.run()
File "/root/cc/lib/python3.6/site-packages/c7n/policy.py", line 320, in run
results = a.process(resources)
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1050, in process
client, r, related_tag_map[related], self.data['tags'], tag_action):
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1075, in process_resource
tags=[{'Key': k, 'Value': v} for k, v in tags.items()])
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 860, in process_resource_set
client.tag_resources, ResourceARNList=arns, Tags=tags)
File "/root/cc/lib/python3.6/site-packages/c7n/tags.py", line 1117, in universal_retry
response = method(ResourceARNList=ResourceARNList, **kw)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 276, in _api_call
return self._make_api_call(operation_name, kwargs)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 559, in _make_api_call
api_params, operation_model, context=request_context)
File "/root/cc/lib/python3.6/site-packages/botocore/client.py", line 607, in _convert_to_request_dict
api_params, operation_model)
File "/root/cc/lib/python3.6/site-packages/botocore/validate.py", line 297, in serialize_to_request
raise ParamValidationError(report=report.generate_report())
botocore.exceptions.ParamValidationError: Parameter validation failed:
Invalid type for parameter Tags, value: [{'Key': 'BillingCostCenter', 'Value': '05a'}, {'Key': 'Division', 'Value': 'test'}, {'Key': 'Environment', 'Value': 'dev'}, {'Key': 'ResourceContact', 'Value': 'kristen.happ@company.com'}, {'Key': 'ResourcePurpose', 'Value': 'Testing Tagging Policy'}], type: <class 'list'>, valid types: <class 'dict'>
> /root/cc/lib/python3.6/site-packages/botocore/validate.py(297)serialize_to_request()
-> raise ParamValidationError(report=report.generate_report())
(Pdb)
(cc) root@ip-10-128-128-174:~# custodian version --debug
Please copy/paste the following info along with any bug reports:
Custodian: 0.8.46.1
Python: 3.6.9 (default, Nov 7 2019, 10:44:02)
[GCC 8.3.0]
Platform: posix.uname_result(sysname='Linux', nodename='ip-10-128-128-174', release='4.15.0-1060-aws', version='#62-Ubuntu SMP Tue Feb 11 21:23:22 UTC 2020', machine='x86_64')
Using venv: False
Docker: False
PYTHONPATH:
[ '/root/cc/bin',
'/usr/lib/python36.zip',
'/usr/lib/python3.6',
'/usr/lib/python3.6/lib-dynload',
'/root/cc/lib/python3.6/site-packages']
(cc) root@ip-10-128-128-174:~# pip3 show boto3
Name: boto3
Version: 1.11.7
Summary: The AWS SDK for Python
Home-page: https://github.com/boto/boto3
Author: Amazon Web Services
Author-email: UNKNOWN
License: Apache License 2.0
Location: /root/cc/lib/python3.6/site-packages
Requires: s3transfer, jmespath, botocore
|
botocore.exceptions.ParamValidationError
|
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
"support", region_name="us-east-1"
)
checks = self.get_check_result(client, self.check_id)
region = self.manager.config.region
checks["flaggedResources"] = [
r
for r in checks["flaggedResources"]
if r["metadata"][0] == region
or (r["metadata"][0] == "-" and region == "us-east-1")
]
resources[0]["c7n:ServiceLimits"] = checks
delta = timedelta(self.data.get("refresh_period", 1))
check_date = parse_date(checks["timestamp"])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get("threshold")
services = self.data.get("services")
limits = self.data.get("limits")
exceeded = []
for resource in checks["flaggedResources"]:
if threshold is None and resource["status"] == "ok":
continue
limit = dict(zip(self.check_limit, resource["metadata"]))
if services and limit["service"] not in services:
continue
if limits and limit["check"] not in limits:
continue
limit["status"] = resource["status"]
limit["percentage"] = float(limit["extant"] or 0) / float(limit["limit"]) * 100
if threshold and limit["percentage"] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]["c7n:ServiceLimitsExceeded"] = exceeded
return resources
return []
|
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client(
"support", region_name="us-east-1"
)
checks = client.describe_trusted_advisor_check_result(
checkId=self.check_id, language="en"
)["result"]
region = self.manager.config.region
checks["flaggedResources"] = [
r
for r in checks["flaggedResources"]
if r["metadata"][0] == region
or (r["metadata"][0] == "-" and region == "us-east-1")
]
resources[0]["c7n:ServiceLimits"] = checks
delta = timedelta(self.data.get("refresh_period", 1))
check_date = parse_date(checks["timestamp"])
if datetime.now(tz=tzutc()) - delta > check_date:
client.refresh_trusted_advisor_check(checkId=self.check_id)
threshold = self.data.get("threshold")
services = self.data.get("services")
limits = self.data.get("limits")
exceeded = []
for resource in checks["flaggedResources"]:
if threshold is None and resource["status"] == "ok":
continue
limit = dict(zip(self.check_limit, resource["metadata"]))
if services and limit["service"] not in services:
continue
if limits and limit["check"] not in limits:
continue
limit["status"] = resource["status"]
limit["percentage"] = float(limit["extant"] or 0) / float(limit["limit"]) * 100
if threshold and limit["percentage"] < threshold:
continue
exceeded.append(limit)
if exceeded:
resources[0]["c7n:ServiceLimitsExceeded"] = exceeded
return resources
return []
|
https://github.com/cloud-custodian/cloud-custodian/issues/2402
|
$ export AWS_PROFILE=...
$ custodian run --dryrun --output-dir=. custodian.yml
018-05-15 17:47:25,372: custodian.commands:ERROR Error while executing policy account-service-limits, continuing
Traceback (most recent call last):
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/commands.py", line 237, in run
policy()
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/policy.py", line 703, in __call__
resources = PullMode(self).run()
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/policy.py", line 222, in run
resources = self.policy.resource_manager.resources()
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/resources/account.py", line 66, in resources
return self.filter_resources([get_account(self.session_factory, self.config)])
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/manager.py", line 90, in filter_resources
resources = f.process(resources, event)
File "/Users/9200779/git/bitbucket.org/vmdb/custodian/custodian/lib/python2.7/site-packages/c7n/resources/account.py", line 432, in process
checks['flaggedResources'] = [r for r in checks['flaggedResources']
KeyError: u'flaggedResources'
|
KeyError
|
def get_asg_templates(self, asgs):
templates = {}
for a in asgs:
t = None
if "LaunchTemplate" in a:
t = a["LaunchTemplate"]
elif "MixedInstancesPolicy" in a:
t = a["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
if t is None:
continue
templates.setdefault(
(t["LaunchTemplateId"], t.get("Version", None)), []
).append(a["AutoScalingGroupName"])
return templates
|
def get_asg_templates(self, asgs):
templates = {}
for a in asgs:
t = None
if "LaunchTemplate" in a:
t = a["LaunchTemplate"]
elif "MixedInstancesPolicy" in a:
t = a["MixedInstancesPolicy"]["LaunchTemplate"][
"LaunchTemplateSpecification"
]
if t is None:
continue
templates.setdefault((t["LaunchTemplateId"], t["Version"]), []).append(
a["AutoScalingGroupName"]
)
return templates
|
https://github.com/cloud-custodian/cloud-custodian/issues/5457
|
2020-03-17 14:34:01,624: custodian.output:ERROR Error while executing policy
Traceback (most recent call last):
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/policy.py", line 288, in run
resources = self.policy.resource_manager.resources()
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/resources/ami.py", line 53, in resources
return super(AMI, self).resources(query=query)
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/query.py", line 460, in resources
resources = self.filter_resources(resources)
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/manager.py", line 108, in filter_resources
resources = f.process(resources, event)
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/resources/ami.py", line 305, in process
images = self._pull_ec2_images().union(self._pull_asg_images())
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/resources/ami.py", line 296, in _pull_asg_images
list(tmpl_mgr.get_asg_templates(asgs).keys())):
File "/Users/user/.local/share/virtualenvs/cloud_custodian-GAUqGNXp/lib/python3.7/site-packages/c7n/resources/ec2.py", line 1902, in get_asg_templates
t['Version']), []).append(a['AutoScalingGroupName'])
KeyError: 'Version'
|
KeyError
|
def get_resource_tag_targets(resource, target_tag_keys):
if "Tags" not in resource:
return []
if isinstance(resource["Tags"], dict):
tags = resource["Tags"]
else:
tags = {tag["Key"]: tag["Value"] for tag in resource["Tags"]}
targets = []
for target_tag_key in target_tag_keys:
if target_tag_key in tags:
targets.append(tags[target_tag_key])
return targets
|
def get_resource_tag_targets(resource, target_tag_keys):
if "Tags" not in resource:
return []
tags = {tag["Key"]: tag["Value"] for tag in resource["Tags"]}
targets = []
for target_tag_key in target_tag_keys:
if target_tag_key in tags:
targets.append(tags[target_tag_key])
return targets
|
https://github.com/cloud-custodian/cloud-custodian/issues/5397
|
policies:
- name: msk-enforce-encryption-status
description: |
Deletes Non TLS only in-transit encrypted Kafka clusters and notifies customer.
resource: kafka
mode:
type: periodic
schedule: "rate(30 minutes)"
packages:
- botocore
- boto3
- urllib3
- certifi
tags:
ResourceContact: "AGTAutomatedCloudConfigurationStandards@transamerica.com"
ResourcePurpose: "Managed by AGT Automated Cloud Configuration Standards - ACCS"
Channel: AGT
BillingCostCenter: "0701-97290010 AGT"
Division: agt
Environment: prd
AGTManaged: "true"
filters:
- type: value
key: "EncryptionInfo.EncryptionInTransit.ClientBroker"
op: ne
value: "TLS"
- type: value
value_type: date
key: CreationTime
op: greater-than
value: "2020/02/20"
actions:
- type: delete
- type: notify
template: default.html
comments: |
The below Kafka cluster is not TLS encrypted and therefore it did not meet
Kafka encrption requirements.
priority_header: 1
subject: "Kafka - Kafka Cluster Deleted - [custodian {{ account }} - {{ region }}]"
violation_desc: |
Kafka clusters are required to only allow TLS encrypted data.
action_desc: "Actions Taken: The below Kafka cluster has been deleted. Please create new Kafka Cluster with Only TLS encrypted traffic allowed option."
owner_absent_contact:
- "{missingcontact}"
to:
- ouremail@company.com
- resource-owner
transport:
type: sqs
queue: https://sqs.us-east-1.amazonaws.com/123456789000/cloud-custodian-mailer
region: us-east-1
- type: put-metric
key: ClusterName
namespace: OurNamespace
metric_name: Kafka-Cluster-Unencrypted-Delete
op: count
units: Count
Mailer Log:
[DEBUG] 2020-02-28T20:15:10.991Z 63d4cdff-ddcf-443c-963f-aebd7cdeb1cf Message id: 62171941-8278-41ed-a75d-01d3bb4e08fd received {'mtype': {'StringValue': 'maidmsg/1.0', 'DataType': 'String'}}
14:15:10
[DEBUG] 2020-02-28T20:15:10.992Z 63d4cdff-ddcf-443c-963f-aebd7cdeb1cf Got account:ouraccount message:62171941-8278-41ed-a75d-01d3bb4e08fd kafka:2 policy:msk-enforce-encryption recipients:Ouremail@company.com, resource-owner, event-owner
14:15:11
[INFO] 2020-02-28T20:15:11.457Z 63d4cdff-ddcf-443c-963f-aebd7cdeb1cf no aws username in event
14:15:11
[ERROR] 2020-02-28T20:15:11.457Z 63d4cdff-ddcf-443c-963f-aebd7cdeb1cf Error starting mailer MailerSqsQueueProcessor().
14:15:11
Error: string indices must be integers
14:15:11
Traceback (most recent call last):
14:15:11
File "/var/task/c7n_mailer/handle.py", line 46, in start_c7n_mailer
14:15:11
mailer_sqs_queue_processor.run(parallel)
14:15:11
File "/var/task/c7n_mailer/sqs_queue_processor.py", line 130, in run
14:15:11
self.process_sqs_message(sqs_message)
14:15:11
File "/var/task/c7n_mailer/sqs_queue_processor.py", line 162, in process_sqs_message
14:15:11
to_addrs_to_email_messages_map = email_delivery.get_to_addrs_email_messages_map(sqs_message)
14:15:11
File "/var/task/c7n_mailer/email_delivery.py", line 194, in get_to_addrs_email_messages_map
14:15:11
to_addrs_to_resources_map = self.get_email_to_addrs_to_resources_map(sqs_message)
14:15:11
File "/var/task/c7n_mailer/email_delivery.py", line 167, in get_email_to_addrs_to_resources_map
14:15:11
resource
14:15:11
File "/var/task/c7n_mailer/email_delivery.py", line 80, in get_ldap_emails_from_resource
14:15:11
ldap_uid_tag_values = get_resource_tag_targets(resource, ldap_uid_tag_keys)
14:15:11
File "/var/task/c7n_mailer/utils.py", line 93, in get_resource_tag_targets
14:15:11
tags = {tag['Key']: tag['Value'] for tag in resource['Tags']}
14:15:11
File "/var/task/c7n_mailer/utils.py", line 93, in <dictcomp>
14:15:11
tags = {tag['Key']: tag['Value'] for tag in resource['Tags']}
14:15:11
TypeError: string indices must be integers
14:15:11
END RequestId: 63d4cdff-ddcf-443c-963f-aebd7cdeb1cf
|
TypeError
|
def watch(limit):
"""watch scan rates across the cluster"""
period = 5.0
prev = db.db()
prev_totals = None
while True:
click.clear()
time.sleep(period)
cur = db.db()
cur.data["gkrate"] = {}
progress = []
prev_buckets = {b.bucket_id: b for b in prev.buckets()}
totals = {"scanned": 0, "krate": 0, "lrate": 0, "bucket_id": "totals"}
for b in cur.buckets():
if not b.scanned:
continue
totals["scanned"] += b.scanned
totals["krate"] += b.krate
totals["lrate"] += b.lrate
if b.bucket_id not in prev_buckets:
b.data["gkrate"][b.bucket_id] = b.scanned / period
elif b.scanned == prev_buckets[b.bucket_id].scanned:
continue
else:
b.data["gkrate"][b.bucket_id] = (
b.scanned - prev_buckets[b.bucket_id].scanned
) / period
progress.append(b)
if prev_totals is None:
totals["gkrate"] = "..."
else:
totals["gkrate"] = (totals["scanned"] - prev_totals["scanned"]) / period
prev = cur
prev_totals = totals
progress = sorted(progress, key=lambda x: x.gkrate, reverse=True)
if limit:
progress = progress[:limit]
progress.insert(0, Bag(totals))
format_plain(
progress,
None,
explicit_only=True,
keys=["bucket_id", "scanned", "gkrate", "lrate", "krate"],
)
|
def watch(limit):
"""watch scan rates across the cluster"""
period = 5.0
prev = db.db()
prev_totals = None
while True:
click.clear()
time.sleep(period)
cur = db.db()
cur.data["gkrate"] = {}
progress = []
prev_buckets = {b.bucket_id: b for b in prev.buckets()}
totals = {"scanned": 0, "krate": 0, "lrate": 0, "bucket_id": "totals"}
for b in cur.buckets():
if not b.scanned:
continue
totals["scanned"] += b.scanned
totals["krate"] += b.krate
totals["lrate"] += b.lrate
if b.bucket_id not in prev_buckets:
b.data["gkrate"][b.bucket_id] = b.scanned / period
elif b.scanned == prev_buckets[b.bucket_id].scanned:
continue
else:
b.data["gkrate"][b.bucket_id] = (
b.scanned - prev_buckets[b.bucket_id].scanned
) / period
progress.append(b)
if prev_totals is None:
totals["gkrate"] = "..."
else:
totals["gkrate"] = (totals["scanned"] - prev_totals["scanned"]) / period
prev = cur
prev_totals = totals
progress = sorted(progress, key=lambda x: x.gkrate, reverse=True)
if limit:
progress = progress[:limit]
progress.insert(0, utils.Bag(totals))
format_plain(
progress,
None,
explicit_only=True,
keys=["bucket_id", "scanned", "gkrate", "lrate", "krate"],
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/2624
|
Traceback (most recent call last):
File "/usr/bin/c7n-salactus", line 11, in <module>
load_entry_point('c7n-salactus==0.3.0', 'console_scripts', 'c7n-salactus')()
File "/usr/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/c7n_salactus/cli.py", line 576, in watch
progress.insert(0, utils.Bag(totals))
AttributeError: 'module' object has no attribute 'Bag'
|
AttributeError
|
def _report_options(p):
"""Add options specific to the report subcommand."""
_default_options(p, blacklist=["cache", "log-group", "quiet"])
p.add_argument(
"--days", type=float, default=1, help="Number of days of history to consider"
)
p.add_argument(
"--raw",
type=argparse.FileType("w"),
help="Store raw json of collected records to given file path",
)
p.add_argument(
"--field",
action="append",
default=[],
type=_key_val_pair,
metavar="HEADER=FIELD",
help="Repeatable. JMESPath of field to include in the output OR "
"for a tag use prefix `tag:`. Special case fields `region` and"
"`policy` are available",
)
p.add_argument(
"--no-default-fields",
action="store_true",
help="Exclude default fields for report.",
)
p.add_argument(
"--format",
default="csv",
choices=["csv", "grid", "simple", "json"],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json",
)
|
def _report_options(p):
"""Add options specific to the report subcommand."""
_default_options(p, blacklist=["cache", "log-group", "quiet"])
p.add_argument(
"--days", type=float, default=1, help="Number of days of history to consider"
)
p.add_argument(
"--raw",
type=argparse.FileType("wb"),
help="Store raw json of collected records to given file path",
)
p.add_argument(
"--field",
action="append",
default=[],
type=_key_val_pair,
metavar="HEADER=FIELD",
help="Repeatable. JMESPath of field to include in the output OR "
"for a tag use prefix `tag:`. Special case fields `region` and"
"`policy` are available",
)
p.add_argument(
"--no-default-fields",
action="store_true",
help="Exclude default fields for report.",
)
p.add_argument(
"--format",
default="csv",
choices=["csv", "grid", "simple", "json"],
help="Format to output data in (default: %(default)s). "
"Options include simple, grid, csv, json",
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/5269
|
$ custodian report -s output --raw outfile.json list-ec2-instances.yaml
CustodianDate,InstanceId,tag:Name,InstanceType,LaunchTime,VpcId,PrivateIpAddress
Traceback (most recent call last):
File "/home/ec2-user/custodian/bin/custodian", line 8, in <module>
sys.exit(main())
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/cli.py", line 374, in main
command(config)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/commands.py", line 141, in _load_policies
return f(options, list(policies))
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/commands.py", line 311, in report
policies, begin_date, options, sys.stdout, raw_output_fh=options.raw)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/reports/csvout.py", line 112, in report
dumps(records, raw_output_fh, indent=2)
File "/home/ec2-user/custodian/lib/python3.7/site-packages/c7n/utils.py", line 122, in dumps
return json.dump(data, fh, cls=DateTimeEncoder, indent=indent)
File "/usr/lib64/python3.7/json/__init__.py", line 180, in dump
fp.write(chunk)
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def _process_resource(self, resource):
lock_name = self._get_lock_name(resource)
lock_notes = self._get_lock_notes(resource)
if is_resource_group(resource):
self.client.management_locks.create_or_update_at_resource_group_level(
resource["name"],
lock_name,
ManagementLockObject(level=self.lock_type, notes=lock_notes),
)
else:
self.client.management_locks.create_or_update_by_scope(
resource["id"],
lock_name,
ManagementLockObject(level=self.lock_type, notes=lock_notes),
)
|
def _process_resource(self, resource):
lock_name = self._get_lock_name(resource)
lock_notes = self._get_lock_notes(resource)
if is_resource_group(resource):
self.client.management_locks.create_or_update_at_resource_group_level(
resource["name"],
lock_name,
ManagementLockObject(level=self.lock_type, notes=lock_notes),
)
else:
self.client.management_locks.create_or_update_at_resource_level(
resource["resourceGroup"],
ResourceIdParser.get_namespace(resource["id"]),
ResourceIdParser.get_resource_name(resource.get("c7n:parent-id")) or "",
ResourceIdParser.get_resource_type(resource["id"]),
resource["name"],
lock_name,
ManagementLockObject(level=self.lock_type, notes=lock_notes),
)
|
https://github.com/cloud-custodian/cloud-custodian/issues/4937
|
====================================================================== FAILURES =======================================================================
________________________________________________________ ActionsMarkForOpTest.test_mark_for_op ________________________________________________________
[gw4] darwin -- Python 3.7.3 /Users/kapilt/projects/release-custodian/.tox/py37/bin/python
Traceback (most recent call last):
File "/Users/kapilt/projects/release-custodian/tools/c7n_azure/tests_azure/test_actions_mark-for-op.py", line 58, in test_mark_for_op
self.assertEqual(tags, expected_tags)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 839, in assertEqual
assertion_func(first, second, msg=msg)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 1138, in assertDictEqual
self.fail(self._formatMessage(msg, standardMsg))
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 680, in fail
raise self.failureException(msg)
AssertionError: {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/21'} != {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/20'}
- {'custodian_status': 'Resource does not meet policy: stop@2019/10/21',
? ^
+ {'custodian_status': 'Resource does not meet policy: stop@2019/10/20',
? ^
|
AssertionError
|
def get_namespace(resource_id):
parsed = parse_resource_id(resource_id)
return parsed.get("namespace")
|
def get_namespace(resource_id):
parsed = parse_resource_id(resource_id)
if parsed.get("children"):
return "/".join([parsed.get("namespace"), parsed.get("type")])
return parsed.get("namespace")
|
https://github.com/cloud-custodian/cloud-custodian/issues/4937
|
====================================================================== FAILURES =======================================================================
________________________________________________________ ActionsMarkForOpTest.test_mark_for_op ________________________________________________________
[gw4] darwin -- Python 3.7.3 /Users/kapilt/projects/release-custodian/.tox/py37/bin/python
Traceback (most recent call last):
File "/Users/kapilt/projects/release-custodian/tools/c7n_azure/tests_azure/test_actions_mark-for-op.py", line 58, in test_mark_for_op
self.assertEqual(tags, expected_tags)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 839, in assertEqual
assertion_func(first, second, msg=msg)
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 1138, in assertDictEqual
self.fail(self._formatMessage(msg, standardMsg))
File "/usr/local/Cellar/python/3.7.3/Frameworks/Python.framework/Versions/3.7/lib/python3.7/unittest/case.py", line 680, in fail
raise self.failureException(msg)
AssertionError: {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/21'} != {'pre[66 chars]dian_status': 'Resource does not meet policy: stop@2019/10/20'}
- {'custodian_status': 'Resource does not meet policy: stop@2019/10/21',
? ^
+ {'custodian_status': 'Resource does not meet policy: stop@2019/10/20',
? ^
|
AssertionError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.