after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def _atime_expr(self):
"""If max_items is set, return an expression to limit the query."""
max_items = config.get("completion", "web-history-max-items")
# HistoryCategory should not be added to the completion in that case.
assert max_items != 0
if max_items < 0:
return ""
min_atime = (
sql.Query(
" ".join(
[
"SELECT min(last_atime) FROM",
"(SELECT last_atime FROM CompletionHistory",
"ORDER BY last_atime DESC LIMIT :limit)",
]
)
)
.run(limit=max_items)
.value()
)
if not min_atime:
# if there are no history items, min_atime may be '' (issue #2849)
return ""
return "AND last_atime >= {}".format(min_atime)
|
def _atime_expr(self):
"""If max_items is set, return an expression to limit the query."""
max_items = config.get("completion", "web-history-max-items")
# HistoryCategory should not be added to the completion in that case.
assert max_items != 0
if max_items < 0:
return ""
min_atime = (
sql.Query(
" ".join(
[
"SELECT min(last_atime) FROM",
"(SELECT last_atime FROM CompletionHistory",
"ORDER BY last_atime DESC LIMIT :limit)",
]
)
)
.run(limit=max_items)
.value()
)
return "AND last_atime >= {}".format(min_atime)
|
https://github.com/qutebrowser/qutebrowser/issues/2849
|
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:99 Running SQL query: "SELECT min(last_atime) FROM (SELECT last_atime FROM CompletionHistory ORDER BY last_atime DESC LIMIT :limit)"
12:21:36 DEBUG sql sql:run:102 query bindings: {':limit': 1000}
12:21:36 DEBUG sql sql:__init__:80 Preparing SQL query: "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC"
12:21:36 DEBUG completion debug:__exit__:264 Starting url completion took 0.003652 seconds.
12:21:36 ERROR misc crashsignal:exception_hook:205 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completer.py", line 236, in _update_completion
model = func(*args)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py", line 70, in url
hist_cat = histcategory.HistoryCategory(delete_func=_delete_history)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 54, in __init__
]), forward_only=False)
File "/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py", line 83, in __init__
querystr, self.lastError().text()))
qutebrowser.misc.sql.SqlException: Failed to prepare query "SELECT url, title, strftime('%Y-%m-%d', last_atime, 'unixepoch', 'localtime') FROM CompletionHistory WHERE (url LIKE :pat escape '\' or title LIKE :pat escape '\') AND last_atime >= ORDER BY last_atime DESC": "near "ORDER": syntax error Unable to execute statement"
|
qutebrowser.misc.sql.SqlException
|
def set_pattern(self, pattern):
"""Set the pattern on the underlying model."""
if not self.model():
return
self.pattern = pattern
with debug.log_time(log.completion, "Set pattern {}".format(pattern)):
self.model().set_pattern(pattern)
self.selectionModel().clear()
self._maybe_update_geometry()
self._maybe_show()
|
def set_pattern(self, pattern):
"""Set the pattern on the underlying model."""
if not self.model():
return
self.pattern = pattern
with debug.log_time(log.completion, "Set pattern {}".format(pattern)):
self.model().set_pattern(pattern)
self._maybe_update_geometry()
self._maybe_show()
|
https://github.com/qutebrowser/qutebrowser/issues/2843
|
19:03:56 DEBUG commands command:run:524 Calling qutebrowser.completion.completionwidget.CompletionView.completion_item_del(<qutebrowser.completion.completionwidget.CompletionView>)
19:03:56 DEBUG completion urlmodel:_delete_history:34 Deleting history entry None
19:03:56 DEBUG sql sql:__init__:80 Preparing SQL query: "DELETE FROM History where url = :val"
19:03:56 DEBUG sql sql:run:99 Running SQL query: "DELETE FROM History where url = :val"
19:03:56 DEBUG sql sql:run:102 query bindings: {':val': None}
19:03:56 ERROR misc crashsignal:exception_hook:205 Uncaught exception
Traceback (most recent call last):
File "/home/florian/proj/qutebrowser/git/qutebrowser/app.py", line 889, in eventFilter
return handler(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/app.py", line 849, in _handle_key_event
return man.eventFilter(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py", line 337, in eventFilter
return self._eventFilter_keypress(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/modeman.py", line 168, in _eventFilter_keypress
handled = parser.handle(event)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py", line 307, in handle
handled = self._handle_special_key(e)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/basekeyparser.py", line 136, in _handle_special_key
self.execute(cmdstr, self.Type.special, count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/keyinput/keyparser.py", line 44, in execute
self._commandrunner.run(cmdstr, count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/commands/runners.py", line 275, in run
result.cmd.run(self._win_id, args, count=count)
File "/home/florian/proj/qutebrowser/git/qutebrowser/commands/command.py", line 525, in run
self.handler(*posargs, **kwargs)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/completionwidget.py", line 376, in completion_item_del
self.model().delete_cur_item(index)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/completionmodel.py", line 223, in delete_cur_item
cat.delete_cur_item(cat.index(index.row(), 0))
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/histcategory.py", line 99, in delete_cur_item
self.delete_func(data)
File "/home/florian/proj/qutebrowser/git/qutebrowser/completion/models/urlmodel.py", line 36, in _delete_history
hist.delete_url(urlstr)
File "/home/florian/proj/qutebrowser/git/qutebrowser/browser/history.py", line 126, in delete_url
self.delete('url', url)
File "/home/florian/proj/qutebrowser/git/qutebrowser/misc/sql.py", line 196, in delete
raise KeyError('No row with {} = "{}"'.format(field, value))
KeyError: 'No row with url = "None"'
|
KeyError
|
def read_direct(self, dest, source_sel=None, dest_sel=None):
"""Read data directly from HDF5 into an existing NumPy array.
The destination array must be C-contiguous and writable.
Selections must be the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
with phil:
if self._is_empty:
raise TypeError("Empty datasets have no numpy representation")
if source_sel is None:
source_sel = sel.SimpleSelection(self.shape)
else:
source_sel = sel.select(self.shape, source_sel, self) # for numpy.s_
fspace = source_sel.id
if dest_sel is None:
dest_sel = sel.SimpleSelection(dest.shape)
else:
dest_sel = sel.select(dest.shape, dest_sel)
for mspace in dest_sel.broadcast(source_sel.mshape):
self.id.read(mspace, fspace, dest, dxpl=self._dxpl)
|
def read_direct(self, dest, source_sel=None, dest_sel=None):
"""Read data directly from HDF5 into an existing NumPy array.
The destination array must be C-contiguous and writable.
Selections must be the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
with phil:
if self._is_empty:
raise TypeError("Empty datasets have no numpy representation")
if source_sel is None:
source_sel = sel.SimpleSelection(self.shape)
else:
source_sel = sel.select(self.shape, source_sel, self) # for numpy.s_
fspace = source_sel.id
if dest_sel is None:
dest_sel = sel.SimpleSelection(dest.shape)
else:
dest_sel = sel.select(dest.shape, dest_sel, self)
for mspace in dest_sel.broadcast(source_sel.mshape):
self.id.read(mspace, fspace, dest, dxpl=self._dxpl)
|
https://github.com/h5py/h5py/issues/1792
|
Traceback (most recent call last):
File "/Users/jasondet/Downloads/h5py_bug.py", line 9, in <module>
dset.read_direct(arr, None, np.s_[2:])
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/dataset.py", line 970, in read_direct
for mspace in dest_sel.broadcast(source_sel.mshape):
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/selections.py", line 291, in broadcast
tshape = self.expand_shape(source_shape)
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/selections.py", line 264, in expand_shape
raise TypeError("Can't broadcast %s -> %s" % (source_shape, self.array_shape)) # array shape
TypeError: Can't broadcast (10,) -> (8,)
|
TypeError
|
def write_direct(self, source, source_sel=None, dest_sel=None):
"""Write data directly to HDF5 from a NumPy array.
The source array must be C-contiguous. Selections must be
the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
with phil:
if self._is_empty:
raise TypeError("Empty datasets cannot be written to")
if source_sel is None:
source_sel = sel.SimpleSelection(source.shape)
else:
source_sel = sel.select(source.shape, source_sel) # for numpy.s_
mspace = source_sel.id
if dest_sel is None:
dest_sel = sel.SimpleSelection(self.shape)
else:
dest_sel = sel.select(self.shape, dest_sel, self)
for fspace in dest_sel.broadcast(source_sel.mshape):
self.id.write(mspace, fspace, source, dxpl=self._dxpl)
|
def write_direct(self, source, source_sel=None, dest_sel=None):
"""Write data directly to HDF5 from a NumPy array.
The source array must be C-contiguous. Selections must be
the output of numpy.s_[<args>].
Broadcasting is supported for simple indexing.
"""
with phil:
if self._is_empty:
raise TypeError("Empty datasets cannot be written to")
if source_sel is None:
source_sel = sel.SimpleSelection(source.shape)
else:
source_sel = sel.select(source.shape, source_sel, self) # for numpy.s_
mspace = source_sel.id
if dest_sel is None:
dest_sel = sel.SimpleSelection(self.shape)
else:
dest_sel = sel.select(self.shape, dest_sel, self)
for fspace in dest_sel.broadcast(source_sel.mshape):
self.id.write(mspace, fspace, source, dxpl=self._dxpl)
|
https://github.com/h5py/h5py/issues/1792
|
Traceback (most recent call last):
File "/Users/jasondet/Downloads/h5py_bug.py", line 9, in <module>
dset.read_direct(arr, None, np.s_[2:])
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/dataset.py", line 970, in read_direct
for mspace in dest_sel.broadcast(source_sel.mshape):
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/selections.py", line 291, in broadcast
tshape = self.expand_shape(source_shape)
File "/usr/local/lib/python3.9/site-packages/h5py/_hl/selections.py", line 264, in expand_shape
raise TypeError("Can't broadcast %s -> %s" % (source_shape, self.array_shape)) # array shape
TypeError: Can't broadcast (10,) -> (8,)
|
TypeError
|
def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
"""Create a new HDF5 dataset
name
Name of the dataset (absolute or relative). Provide None to make
an anonymous dataset.
shape
Dataset shape. Use "()" for scalar datasets. Required if "data"
isn't provided.
dtype
Numpy dtype or string. If omitted, dtype('f') will be used.
Required if "data" isn't provided; otherwise, overrides data
array's dtype.
data
Provide data to initialize the dataset. If used, you can omit
shape and dtype arguments.
Keyword-only arguments:
chunks
(Tuple or int) Chunk shape, or True to enable auto-chunking. Integers can
be used for 1D shape.
maxshape
(Tuple or int) Make the dataset resizable up to this shape. Use None for
axes you want to be unlimited. Integers can be used for 1D shape.
compression
(String or int) Compression strategy. Legal values are 'gzip',
'szip', 'lzf'. If an integer in range(10), this indicates gzip
compression level. Otherwise, an integer indicates the number of a
dynamically loaded compression filter.
compression_opts
Compression settings. This is an integer for gzip, 2-tuple for
szip, etc. If specifying a dynamically loaded compression filter
number, this must be a tuple of values.
scaleoffset
(Integer) Enable scale/offset filter for (usually) lossy
compression of integer or floating-point data. For integer
data, the value of scaleoffset is the number of bits to
retain (pass 0 to let HDF5 determine the minimum number of
bits necessary for lossless compression). For floating point
data, scaleoffset is the number of digits after the decimal
place to retain; stored values thus have absolute error
less than 0.5*10**(-scaleoffset).
shuffle
(T/F) Enable shuffle filter.
fletcher32
(T/F) Enable fletcher32 error detection. Not permitted in
conjunction with the scale/offset filter.
fillvalue
(Scalar) Use this value for uninitialized parts of the dataset.
track_times
(T/F) Enable dataset creation timestamps.
track_order
(T/F) Track attribute creation order if True. If omitted use
global default h5.get_config().track_order.
external
(Iterable of tuples) Sets the external storage property, thus
designating that the dataset will be stored in one or more
non-HDF5 files external to the HDF5 file. Adds each tuple
of (name, offset, size) to the dataset's list of external files.
Each name must be a str, bytes, or os.PathLike; each offset and
size, an integer. If only a name is given instead of an iterable
of tuples, it is equivalent to [(name, 0, h5py.h5f.UNLIMITED)].
allow_unknown_filter
(T/F) Do not check that the requested filter is available for use.
This should only be used with ``write_direct_chunk``, where the caller
compresses the data before handing it to h5py.
"""
if "track_order" not in kwds:
kwds["track_order"] = h5.get_config().track_order
with phil:
group = self
if name:
name = self._e(name)
if b"/" in name.lstrip(b"/"):
parent_path, name = name.rsplit(b"/", 1)
group = self.require_group(parent_path)
dsid = dataset.make_new_dset(group, shape, dtype, data, name, **kwds)
dset = dataset.Dataset(dsid)
return dset
|
def create_dataset(self, name, shape=None, dtype=None, data=None, **kwds):
"""Create a new HDF5 dataset
name
Name of the dataset (absolute or relative). Provide None to make
an anonymous dataset.
shape
Dataset shape. Use "()" for scalar datasets. Required if "data"
isn't provided.
dtype
Numpy dtype or string. If omitted, dtype('f') will be used.
Required if "data" isn't provided; otherwise, overrides data
array's dtype.
data
Provide data to initialize the dataset. If used, you can omit
shape and dtype arguments.
Keyword-only arguments:
chunks
(Tuple or int) Chunk shape, or True to enable auto-chunking. Integers can
be used for 1D shape.
maxshape
(Tuple or int) Make the dataset resizable up to this shape. Use None for
axes you want to be unlimited. Integers can be used for 1D shape.
compression
(String or int) Compression strategy. Legal values are 'gzip',
'szip', 'lzf'. If an integer in range(10), this indicates gzip
compression level. Otherwise, an integer indicates the number of a
dynamically loaded compression filter.
compression_opts
Compression settings. This is an integer for gzip, 2-tuple for
szip, etc. If specifying a dynamically loaded compression filter
number, this must be a tuple of values.
scaleoffset
(Integer) Enable scale/offset filter for (usually) lossy
compression of integer or floating-point data. For integer
data, the value of scaleoffset is the number of bits to
retain (pass 0 to let HDF5 determine the minimum number of
bits necessary for lossless compression). For floating point
data, scaleoffset is the number of digits after the decimal
place to retain; stored values thus have absolute error
less than 0.5*10**(-scaleoffset).
shuffle
(T/F) Enable shuffle filter.
fletcher32
(T/F) Enable fletcher32 error detection. Not permitted in
conjunction with the scale/offset filter.
fillvalue
(Scalar) Use this value for uninitialized parts of the dataset.
track_times
(T/F) Enable dataset creation timestamps.
track_order
(T/F) Track attribute creation order if True. If omitted use
global default h5.get_config().track_order.
external
(Iterable of tuples) Sets the external storage property, thus
designating that the dataset will be stored in one or more
non-HDF5 files external to the HDF5 file. Adds each tuple
of (name, offset, size) to the dataset's list of external files.
Each name must be a str, bytes, or os.PathLike; each offset and
size, an integer. If only a name is given instead of an iterable
of tuples, it is equivalent to [(name, 0, h5py.h5f.UNLIMITED)].
allow_unknown_filter
(T/F) Do not check that the requested filter is available for use.
This should only be used with ``write_direct_chunk``, where the caller
compresses the data before handing it to h5py.
"""
if "track_order" not in kwds:
kwds["track_order"] = h5.get_config().track_order
with phil:
group = self
if name:
if "/" in name:
h5objects = [obj for obj in name.split("/") if len(obj)]
name = h5objects[-1]
h5objects = h5objects[:-1]
for new_group in h5objects:
group = group.get(new_group) or group.create_group(new_group)
name = self._e(name)
dsid = dataset.make_new_dset(group, shape, dtype, data, name, **kwds)
dset = dataset.Dataset(dsid)
return dset
|
https://github.com/h5py/h5py/issues/1732
|
Traceback (most recent call last):
File "test.py", line 5, in <module>
model.save('test.h5')
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py", line 1171, in save
signatures)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/save.py", line 109, in save_model
model, filepath, overwrite, include_optimizer)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 103, in save_model_to_hdf5
save_weights_to_hdf5_group(model_weights_group, model_layers)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 625, in save_weights_to_hdf5_group
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/h5py/_hl/group.py", line 143, in create_dataset
if '/' in name:
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def create_virtual_dataset(self, name, layout, fillvalue=None):
"""Create a new virtual dataset in this group.
See virtual datasets in the docs for more information.
name
(str) Name of the new dataset
layout
(VirtualLayout) Defines the sources for the virtual dataset
fillvalue
The value to use where there is no data.
"""
from .vds import VDSmap
# Encode filenames and dataset names appropriately.
sources = []
for vspace, file_name, dset_name, src_space in layout.sources:
if file_name == self.file.filename:
# use relative path if the source dataset is in the same
# file, in order to keep the virtual dataset valid in case
# the file is renamed.
file_name = "."
sources.append(
VDSmap(vspace, filename_encode(file_name), self._e(dset_name), src_space)
)
with phil:
group = self
if name:
name = self._e(name)
if b"/" in name.lstrip(b"/"):
parent_path, name = name.rsplit(b"/", 1)
group = self.require_group(parent_path)
dsid = dataset.make_new_virtual_dset(
group,
layout.shape,
sources=sources,
dtype=layout.dtype,
name=name,
maxshape=layout.maxshape,
fillvalue=fillvalue,
)
dset = dataset.Dataset(dsid)
return dset
|
def create_virtual_dataset(self, name, layout, fillvalue=None):
"""Create a new virtual dataset in this group.
See virtual datasets in the docs for more information.
name
(str) Name of the new dataset
layout
(VirtualLayout) Defines the sources for the virtual dataset
fillvalue
The value to use where there is no data.
"""
from .vds import VDSmap
# Encode filenames and dataset names appropriately.
sources = []
for vspace, file_name, dset_name, src_space in layout.sources:
if file_name == self.file.filename:
# use relative path if the source dataset is in the same
# file, in order to keep the virtual dataset valid in case
# the file is renamed.
file_name = "."
sources.append(
VDSmap(vspace, filename_encode(file_name), self._e(dset_name), src_space)
)
with phil:
group = self
if name:
if "/" in name:
h5objects = [obj for obj in name.split("/") if len(obj)]
name = h5objects[-1]
h5objects = h5objects[:-1]
for new_group in h5objects:
group = group.get(new_group) or group.create_group(new_group)
name = self._e(name)
dsid = dataset.make_new_virtual_dset(
group,
layout.shape,
sources=sources,
dtype=layout.dtype,
name=name,
maxshape=layout.maxshape,
fillvalue=fillvalue,
)
dset = dataset.Dataset(dsid)
return dset
|
https://github.com/h5py/h5py/issues/1732
|
Traceback (most recent call last):
File "test.py", line 5, in <module>
model.save('test.h5')
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/network.py", line 1171, in save
signatures)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/save.py", line 109, in save_model
model, filepath, overwrite, include_optimizer)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 103, in save_model_to_hdf5
save_weights_to_hdf5_group(model_weights_group, model_layers)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/tensorflow_core/python/keras/saving/hdf5_format.py", line 625, in save_weights_to_hdf5_group
param_dset = g.create_dataset(name, val.shape, dtype=val.dtype)
File "/Users/tgaddair/.venv/horovod/stable/lib/python3.7/site-packages/h5py/_hl/group.py", line 143, in create_dataset
if '/' in name:
TypeError: a bytes-like object is required, not 'str'
|
TypeError
|
def copy(
self,
source,
dest,
name=None,
shallow=False,
expand_soft=False,
expand_external=False,
expand_refs=False,
without_attrs=False,
):
"""Copy an object or group.
The source can be a path, Group, Dataset, or Datatype object. The
destination can be either a path or a Group object. The source and
destinations need not be in the same file.
If the source is a Group object, all objects contained in that group
will be copied recursively.
When the destination is a Group object, by default the target will
be created in that group with its current name (basename of obj.name).
You can override that by setting "name" to a string.
There are various options which all default to "False":
- shallow: copy only immediate members of a group.
- expand_soft: expand soft links into new objects.
- expand_external: expand external links into new objects.
- expand_refs: copy objects that are pointed to by references.
- without_attrs: copy object without copying attributes.
Example:
>>> f = File('myfile.hdf5')
>>> f.listnames()
['MyGroup']
>>> f.copy('MyGroup', 'MyCopy')
>>> f.listnames()
['MyGroup', 'MyCopy']
"""
with phil:
if isinstance(source, HLObject):
source_path = "."
else:
# Interpret source as a path relative to this group
source_path = source
source = self
if isinstance(dest, Group):
if name is not None:
dest_path = name
elif source_path == ".":
dest_path = pp.basename(h5i.get_name(source.id))
else:
# copy source into dest group: dest_name/source_name
dest_path = pp.basename(h5i.get_name(source[source_path].id))
elif isinstance(dest, HLObject):
raise TypeError("Destination must be path or Group object")
else:
# Interpret destination as a path relative to this group
dest_path = dest
dest = self
flags = 0
if shallow:
flags |= h5o.COPY_SHALLOW_HIERARCHY_FLAG
if expand_soft:
flags |= h5o.COPY_EXPAND_SOFT_LINK_FLAG
if expand_external:
flags |= h5o.COPY_EXPAND_EXT_LINK_FLAG
if expand_refs:
flags |= h5o.COPY_EXPAND_REFERENCE_FLAG
if without_attrs:
flags |= h5o.COPY_WITHOUT_ATTR_FLAG
if flags:
copypl = h5p.create(h5p.OBJECT_COPY)
copypl.set_copy_object(flags)
else:
copypl = None
h5o.copy(
source.id,
self._e(source_path),
dest.id,
self._e(dest_path),
copypl,
base.dlcpl,
)
|
def copy(
self,
source,
dest,
name=None,
shallow=False,
expand_soft=False,
expand_external=False,
expand_refs=False,
without_attrs=False,
):
"""Copy an object or group.
The source can be a path, Group, Dataset, or Datatype object. The
destination can be either a path or a Group object. The source and
destinations need not be in the same file.
If the source is a Group object, all objects contained in that group
will be copied recursively.
When the destination is a Group object, by default the target will
be created in that group with its current name (basename of obj.name).
You can override that by setting "name" to a string.
There are various options which all default to "False":
- shallow: copy only immediate members of a group.
- expand_soft: expand soft links into new objects.
- expand_external: expand external links into new objects.
- expand_refs: copy objects that are pointed to by references.
- without_attrs: copy object without copying attributes.
Example:
>>> f = File('myfile.hdf5')
>>> f.listnames()
['MyGroup']
>>> f.copy('MyGroup', 'MyCopy')
>>> f.listnames()
['MyGroup', 'MyCopy']
"""
with phil:
if isinstance(source, HLObject):
source_path = "."
else:
# Interpret source as a path relative to this group
source_path = source
source = self
if isinstance(dest, Group):
if name is not None:
dest_path = name
else:
# copy source into dest group: dest_name/source_name
dest_path = pp.basename(h5i.get_name(source[source_path].id))
elif isinstance(dest, HLObject):
raise TypeError("Destination must be path or Group object")
else:
# Interpret destination as a path relative to this group
dest_path = dest
dest = self
flags = 0
if shallow:
flags |= h5o.COPY_SHALLOW_HIERARCHY_FLAG
if expand_soft:
flags |= h5o.COPY_EXPAND_SOFT_LINK_FLAG
if expand_external:
flags |= h5o.COPY_EXPAND_EXT_LINK_FLAG
if expand_refs:
flags |= h5o.COPY_EXPAND_REFERENCE_FLAG
if without_attrs:
flags |= h5o.COPY_WITHOUT_ATTR_FLAG
if flags:
copypl = h5p.create(h5p.OBJECT_COPY)
copypl.set_copy_object(flags)
else:
copypl = None
h5o.copy(
source.id,
self._e(source_path),
dest.id,
self._e(dest_path),
copypl,
base.dlcpl,
)
|
https://github.com/h5py/h5py/issues/1005
|
Traceback (most recent call last):
File "/home/challtdow/workspace/pytest/bin/h5_bugtest.py", line 14, in <module>
h5_file.copy(h5_source, h5_group)
File "/usr/local/lib/python3.5/dist-packages/h5py/_hl/group.py", line 372, in copy
dest_path = pp.basename(h5i.get_name(source[source_path].id))
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "/usr/local/lib/python3.5/dist-packages/h5py/_hl/dataset.py", line 429, in __getitem__
new_dtype = readtime_dtype(self.id.dtype, names)
File "/usr/local/lib/python3.5/dist-packages/h5py/_hl/dataset.py", line 44, in readtime_dtype
raise ValueError("Field names only allowed for compound types")
ValueError: Field names only allowed for compound types
|
ValueError
|
def __init__(self, dtype):
self.dtype = np.dtype(dtype)
|
def __init__(self, dtype):
self.dtype = dtype
|
https://github.com/h5py/h5py/issues/1540
|
Traceback (most recent call last):
File "error.py", line 4, in <module>
f.attrs['empty'] = h5py.Empty('f')
File "h5py/_objects.pyx", line 54, in h5py._objects.with_phil.wrapper
File "h5py/_objects.pyx", line 55, in h5py._objects.with_phil.wrapper
File "/usr/local/lib/python3.7/site-packages/h5py/_hl/attrs.py", line 100, in __setitem__
self.create(name, data=value)
File "/usr/local/lib/python3.7/site-packages/h5py/_hl/attrs.py", line 156, in create
if dtype.subdtype is not None:
AttributeError: 'str' object has no attribute 'subdtype'
|
AttributeError
|
def size(self):
"""Numpy-style attribute giving the total dataset size"""
if is_empty_dataspace(self.id):
return None
return numpy.prod(self.shape, dtype=numpy.intp)
|
def size(self):
"""Numpy-style attribute giving the total dataset size"""
return numpy.prod(self.shape, dtype=numpy.intp)
|
https://github.com/h5py/h5py/issues/1044
|
TypeError Traceback (most recent call last)
<ipython-input-10-f4a7fbce39b5> in <module>()
----> 1 hasattr(empty, "size")
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
h5py/_objects.pyx in h5py._objects.with_phil.wrapper()
~/.local/lib/python3.5/site-packages/h5py/_hl/dataset.py in size(self)
234 def size(self):
235 """Numpy-style attribute giving the total dataset size"""
--> 236 return numpy.prod(self.shape, dtype=numpy.intp)
237
238 @property
~/.local/lib/python3.5/site-packages/numpy/core/fromnumeric.py in prod(a, axis, dtype, out, keepdims)
2564
2565 return _methods._prod(a, axis=axis, dtype=dtype,
-> 2566 out=out, **kwargs)
2567
2568
~/.local/lib/python3.5/site-packages/numpy/core/_methods.py in _prod(a, axis, dtype, out, keepdims)
33
34 def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
---> 35 return umr_prod(a, axis, dtype, out, keepdims)
36
37 def _any(a, axis=None, dtype=None, out=None, keepdims=False):
TypeError: int() argument must be a string, a bytes-like object or a number, not 'NoneType'
|
TypeError
|
def h5py_completer(self, event):
"""Completer function to be loaded into IPython"""
base = re_object_match.split(event.line)[1]
if not isinstance(self._ofind(base).get("obj"), (AttributeManager, HLObject)):
raise TryNext
try:
return h5py_attr_completer(self, event.line)
except ValueError:
pass
try:
return h5py_item_completer(self, event.line)
except ValueError:
pass
return []
|
def h5py_completer(self, event):
"""Completer function to be loaded into IPython"""
base = re_object_match.split(event.line)[1]
if not isinstance(self._ofind(base)["obj"], (AttributeManager, HLObject)):
raise TryNext
try:
return h5py_attr_completer(self, event.line)
except ValueError:
pass
try:
return h5py_item_completer(self, event.line)
except ValueError:
pass
return []
|
https://github.com/h5py/h5py/issues/885
|
$ ipython
Python 3.6.1 | packaged by conda-forge | (default, May 11 2017, 18:00:28)
Type 'copyright', 'credits' or 'license' for more information
IPython 6.0.0 -- An enhanced Interactive Python. Type '?' for help.
In [1]: [a[Exception in thread Thread-40:
Traceback (most recent call last):
File "/zopt/conda2/envs/test/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/zopt/conda2/envs/test/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/prompt_toolkit/interface.py", line 860, in run
completions = list(buffer.completer.get_completions(document, complete_event))
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/terminal/ptutils.py", line 75, in get_completions
yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/terminal/ptutils.py", line 85, in _get_completions
for c in completions:
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/core/completer.py", line 429, in _deduplicate_completions
completions = list(completions)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/core/completer.py", line 1647, in completions
for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/core/completer.py", line 1687, in _completions
full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/core/completer.py", line 1829, in _complete
custom_res = self.dispatch_custom_completer(text)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/IPython/core/completer.py", line 1582, in dispatch_custom_completer
res = c(event)
File "/zopt/conda2/envs/test/lib/python3.6/site-packages/h5py/ipy_completer.py", line 175, in h5py_completer
if not isinstance(self._ofind(base)['obj'], (AttributeManager, HLObject)):
KeyError: 'obj'
|
KeyError
|
def parse(self, handler, name=None):
args, varargs, kws, defaults, kwo, kwo_defaults, annotations = self._get_arg_spec(
handler
)
spec = ArgumentSpec(
name,
self._type,
positional=args,
varargs=varargs,
kwargs=kws,
kwonlyargs=kwo,
defaults=self._get_defaults(args, defaults, kwo_defaults),
)
spec.types = self._get_types(handler, annotations, spec)
return spec
|
def parse(self, handler, name=None):
args, varargs, kwargs, defaults, kwonly, kwonlydefaults, annotations = (
getfullargspec(unwrap(handler))
)
if ismethod(handler) or handler.__name__ == "__init__":
args = args[1:] # drop 'self'
spec = ArgumentSpec(
name,
self._type,
positional=args,
varargs=varargs,
kwargs=kwargs,
kwonlyargs=kwonly,
defaults=self._get_defaults(args, defaults, kwonlydefaults),
)
spec.types = self._get_types(handler, annotations, spec)
return spec
|
https://github.com/robotframework/robotframework/issues/3453
|
from inspect import getfullargspec
from operator import eq
getfullargspec(eq)
Traceback (most recent call last):
[snip]
TypeError: unsupported callable
getfullargspec(len)
[snip]
TypeError: unsupported callable
|
TypeError
|
def _get_defaults(self, args, default_values, kwo_defaults):
if default_values:
defaults = dict(zip(args[-len(default_values) :], default_values))
else:
defaults = {}
if kwo_defaults:
defaults.update(kwo_defaults)
return defaults
|
def _get_defaults(self, args, default_values, kwonlydefaults):
if default_values:
defaults = dict(zip(args[-len(default_values) :], default_values))
else:
defaults = {}
if kwonlydefaults:
defaults.update(kwonlydefaults)
return defaults
|
https://github.com/robotframework/robotframework/issues/3453
|
from inspect import getfullargspec
from operator import eq
getfullargspec(eq)
Traceback (most recent call last):
[snip]
TypeError: unsupported callable
getfullargspec(len)
[snip]
TypeError: unsupported callable
|
TypeError
|
def getfullargspec(func):
return getargspec(func) + ([], None, {})
|
def getfullargspec(func):
return getargspec(unwrap(func)) + ([], None, {})
|
https://github.com/robotframework/robotframework/issues/3453
|
from inspect import getfullargspec
from operator import eq
getfullargspec(eq)
Traceback (most recent call last):
[snip]
TypeError: unsupported callable
getfullargspec(len)
[snip]
TypeError: unsupported callable
|
TypeError
|
def _get_writer(self, outpath):
return file_writer(
outpath, newline=self._options["line_separator"], usage="Tidy output"
)
|
def _get_writer(self, outpath):
return file_writer(outpath, newline=self._options["line_separator"])
|
https://github.com/robotframework/robotframework/issues/3339
|
$ python -m robot.libdoc BuiltIn out.html
Unexpected error: IOError: [Errno 13] Permission denied: 'out.html'
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/robot/utils/application.py", line 83, in _execute
rc = self.main(arguments, **options)
File "/usr/local/lib/python2.7/dist-packages/robot/libdoc.py", line 158, in main
libdoc.save(output, self._get_output_format(format, output))
File "/usr/local/lib/python2.7/dist-packages/robot/libdocpkg/model.py", line 52, in save
with LibdocOutput(output, format) as outfile:
File "/usr/local/lib/python2.7/dist-packages/robot/libdocpkg/output.py", line 30, in __enter__
self._output_file = file_writer(self._output_path)
File "/usr/local/lib/python2.7/dist-packages/robot/utils/robotio.py", line 23, in file_writer
f = io.open(path, 'w', encoding=encoding, newline=newline)
|
IOError
|
def parse_xml(self, source, keep_clark_notation=False):
"""Parses the given XML file or string into an element structure.
The `source` can either be a path to an XML file or a string containing
XML. In both cases the XML is parsed into ElementTree
[http://docs.python.org/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element|element structure]
and the root element is returned.
As discussed in `Handling XML namespaces` section, this keyword, by
default, strips possible namespaces added by ElementTree into tag names.
This typically eases handling XML documents with namespaces
considerably. If you do not want that to happen, or want to avoid
the small overhead of going through the element structure when your
XML does not have namespaces, you can disable this feature by giving
`keep_clark_notation` argument a true value (e.g. any non-empty string).
Examples:
| ${root} = | Parse XML | <root><child/></root> |
| ${xml} = | Parse XML | ${CURDIR}/test.xml | no namespace cleanup |
Use `Get Element` keyword if you want to get a certain element and not
the whole structure. See `Parsing XML` section for more details and
examples
Stripping namespaces is a new feature in Robot Framework 2.7.5.
"""
with ETSource(source) as source:
root = self.etree.parse(source).getroot()
if self.lxml_etree:
self._remove_comments(root)
if not keep_clark_notation:
NameSpaceStripper().strip(root)
return root
|
def parse_xml(self, source, keep_clark_notation=False):
"""Parses the given XML file or string into an element structure.
The `source` can either be a path to an XML file or a string containing
XML. In both cases the XML is parsed into ElementTree
[http://docs.python.org/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element|element structure]
and the root element is returned.
As discussed in `Handling XML namespaces` section, this keyword, by
default, strips possible namespaces added by ElementTree into tag names.
This typically eases handling XML documents with namespaces
considerably. If you do not want that to happen, or want to avoid
the small overhead of going through the element structure when your
XML does not have namespaces, you can disable this feature by giving
`keep_clark_notation` argument a true value (e.g. any non-empty string).
Examples:
| ${root} = | Parse XML | <root><child/></root> |
| ${xml} = | Parse XML | ${CURDIR}/test.xml | no namespace cleanup |
Use `Get Element` keyword if you want to get a certain element and not
the whole structure. See `Parsing XML` section for more details and
examples
Stripping namespaces is a new feature in Robot Framework 2.7.5.
"""
with ETSource(source) as source:
root = self.etree.parse(source).getroot()
if not keep_clark_notation:
NameSpaceStripper().strip(root)
return root
|
https://github.com/robotframework/robotframework/issues/1748
|
AttributeError: 'builtin_function_or_method' object has no attribute 'startswith'
Traceback (most recent call last):
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 447, in parse_xml
NameSpaceStripper().strip(root)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1241, in strip
if elem.tag.startswith('{') and '}' in elem.tag:
|
AttributeError
|
def _remove_element(self, root, element, remove_tail=False):
parent = self._find_parent(root, element)
if not remove_tail:
self._preserve_tail(element, parent)
parent.remove(element)
|
def _remove_element(self, root, element, remove_tail=False):
parent = self._find_parent(root, element)
if element.tail and not remove_tail:
self._preserve_tail(element, parent)
parent.remove(element)
|
https://github.com/robotframework/robotframework/issues/1748
|
AttributeError: 'builtin_function_or_method' object has no attribute 'startswith'
Traceback (most recent call last):
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 447, in parse_xml
NameSpaceStripper().strip(root)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1241, in strip
if elem.tag.startswith('{') and '}' in elem.tag:
|
AttributeError
|
def _preserve_tail(self, element, parent):
if not element.tail:
return
index = list(parent).index(element)
if index == 0:
parent.text = (parent.text or "") + element.tail
else:
sibling = parent[index - 1]
sibling.tail = (sibling.tail or "") + element.tail
|
def _preserve_tail(self, element, parent):
index = list(parent).index(element)
if index == 0:
parent.text = (parent.text or "") + element.tail
else:
sibling = parent[index - 1]
sibling.tail = (sibling.tail or "") + element.tail
|
https://github.com/robotframework/robotframework/issues/1748
|
AttributeError: 'builtin_function_or_method' object has no attribute 'startswith'
Traceback (most recent call last):
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 447, in parse_xml
NameSpaceStripper().strip(root)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1247, in strip
self.strip(child, current_ns)
File "/opt/ute/python/lib/python2.7/site-packages/robot/libraries/XML.py", line 1241, in strip
if elem.tag.startswith('{') and '}' in elem.tag:
|
AttributeError
|
def exc_info(self):
"""
Holds the exc_info three-tuple raised by the function if the
greenlet finished with an error. Otherwise a false value.
.. note:: This is a provisional API and may change.
.. versionadded:: 1.1
"""
ei = self._exc_info
if ei is not None and ei[0] is not None:
return (
ei[0],
ei[1],
# The pickled traceback may be None if we couldn't pickle it.
load_traceback(ei[2]) if ei[2] else None,
)
|
def exc_info(self):
"""
Holds the exc_info three-tuple raised by the function if the
greenlet finished with an error. Otherwise a false value.
.. note:: This is a provisional API and may change.
.. versionadded:: 1.1
"""
ei = self._exc_info
if ei is not None and ei[0] is not None:
return (ei[0], ei[1], load_traceback(ei[2]))
|
https://github.com/gevent/gevent/issues/1704
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 856, in gevent._gevent_cgreenlet.Greenlet.run
File "src/gevent/greenlet.py", line 837, in gevent._gevent_cgreenlet.Greenlet._Greenlet__report_error
File "/home/danmilon/tmp/gevent/src/gevent/_tblib.py", line 415, in g
return f(a)
File "/home/danmilon/tmp/gevent/src/gevent/_tblib.py", line 471, in dump_traceback
return dumps(tb)
File "/home/danmilon/.pyenv/versions/2.7.18/lib/python2.7/copy_reg.py", line 74, in _reduce_ex
getstate = self.__getstate__
RuntimeError: maximum recursion depth exceeded while calling a Python object
2020-11-23T13:06:10Z <callback at 0x7fe4304846e0 stopped> failed with RuntimeError
|
RuntimeError
|
def __report_error(self, exc_info):
if isinstance(exc_info[1], GreenletExit):
self.__report_result(exc_info[1])
return
# Depending on the error, we may not be able to pickle it.
# In particular, RecursionError can be a problem.
try:
tb = dump_traceback(exc_info[2])
except: # pylint:disable=bare-except
tb = None
self._exc_info = exc_info[0], exc_info[1], tb
hub = get_my_hub(self) # pylint:disable=undefined-variable
if self._links and not self._notifier:
self._notifier = hub.loop.run_callback(self._notify_links)
try:
hub.handle_error(self, *exc_info)
finally:
del exc_info
|
def __report_error(self, exc_info):
if isinstance(exc_info[1], GreenletExit):
self.__report_result(exc_info[1])
return
self._exc_info = exc_info[0], exc_info[1], dump_traceback(exc_info[2])
hub = get_my_hub(self) # pylint:disable=undefined-variable
if self._links and not self._notifier:
self._notifier = hub.loop.run_callback(self._notify_links)
try:
hub.handle_error(self, *exc_info)
finally:
del exc_info
|
https://github.com/gevent/gevent/issues/1704
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 856, in gevent._gevent_cgreenlet.Greenlet.run
File "src/gevent/greenlet.py", line 837, in gevent._gevent_cgreenlet.Greenlet._Greenlet__report_error
File "/home/danmilon/tmp/gevent/src/gevent/_tblib.py", line 415, in g
return f(a)
File "/home/danmilon/tmp/gevent/src/gevent/_tblib.py", line 471, in dump_traceback
return dumps(tb)
File "/home/danmilon/.pyenv/versions/2.7.18/lib/python2.7/copy_reg.py", line 74, in _reduce_ex
getstate = self.__getstate__
RuntimeError: maximum recursion depth exceeded while calling a Python object
2020-11-23T13:06:10Z <callback at 0x7fe4304846e0 stopped> failed with RuntimeError
|
RuntimeError
|
def wrapped(self, raw):
"""
Wraps the raw IO object (`RawIOBase` or `io.TextIOBase`) in
buffers, text decoding, and newline handling.
"""
# pylint:disable=too-many-branches
result = raw
buffering = self.buffering
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = self.default_buffer_size
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0: # pragma: no cover
raise ValueError("invalid buffering size")
if not isinstance(raw, io.BufferedIOBase) and (
not hasattr(raw, "buffer") or raw.buffer is None
):
# Need to wrap our own buffering around it. If it
# is already buffered, don't do so.
if buffering != 0:
if self.updating:
Buffer = io.BufferedRandom
elif self.creating or self.writing or self.appending:
Buffer = io.BufferedWriter
elif self.reading:
Buffer = io.BufferedReader
else: # prgama: no cover
raise ValueError("unknown mode: %r" % self.mode)
try:
result = Buffer(raw, buffering)
except AttributeError:
# Python 2 file() objects don't have the readable/writable
# attributes. But they handle their own buffering.
result = raw
if self.binary:
if isinstance(raw, io.TextIOBase):
# Can't do it. The TextIO object will have its own buffer, and
# trying to read from the raw stream or the buffer without going through
# the TextIO object is likely to lead to problems with the codec.
raise ValueError("Unable to perform binary IO on top of text IO stream")
return result
# Either native or text at this point.
if PY2 and self.native:
# Neither text mode nor binary mode specified.
if self.universal:
# universal was requested, e.g., 'rU'
result = UniversalNewlineBytesWrapper(result, line_buffering)
else:
# Python 2 and text mode, or Python 3 and either text or native (both are the same)
if not isinstance(raw, io.TextIOBase):
# Avoid double-wrapping a TextIOBase in another TextIOWrapper.
# That tends not to work. See https://github.com/gevent/gevent/issues/1542
result = io.TextIOWrapper(
result, self.encoding, self.errors, self.newline, line_buffering
)
if result is not raw:
# Set the mode, if possible, but only if we created a new
# object.
try:
result.mode = self.mode
except (AttributeError, TypeError):
# AttributeError: No such attribute
# TypeError: Readonly attribute (py2)
pass
return result
|
def wrapped(self, raw):
"""
Wraps the raw IO object (`RawIOBase`) in buffers, text decoding,
and newline handling.
"""
# pylint:disable=too-many-branches
result = raw
buffering = self.buffering
line_buffering = False
if buffering == 1 or buffering < 0 and raw.isatty():
buffering = -1
line_buffering = True
if buffering < 0:
buffering = self.default_buffer_size
try:
bs = os.fstat(raw.fileno()).st_blksize
except (OSError, AttributeError):
pass
else:
if bs > 1:
buffering = bs
if buffering < 0: # pragma: no cover
raise ValueError("invalid buffering size")
if buffering != 0:
if self.updating:
Buffer = io.BufferedRandom
elif self.creating or self.writing or self.appending:
Buffer = io.BufferedWriter
elif self.reading:
Buffer = io.BufferedReader
else: # prgama: no cover
raise ValueError("unknown mode: %r" % self.mode)
try:
result = Buffer(raw, buffering)
except AttributeError:
# Python 2 file() objects don't have the readable/writable
# attributes. But they handle their own buffering.
result = raw
if self.binary:
return result
if PY2 and self.native:
# Neither text mode nor binary mode specified.
if self.universal:
# universal was requested, e.g., 'rU'
result = UniversalNewlineBytesWrapper(result, line_buffering)
else:
result = io.TextIOWrapper(
result, self.encoding, self.errors, self.newline, line_buffering
)
try:
result.mode = self.mode
except (AttributeError, TypeError):
# AttributeError: No such attribute
# TypeError: Readonly attribute (py2)
pass
return result
|
https://github.com/gevent/gevent/issues/1542
|
$ python
Python 3.7.5 (default, Nov 20 2019, 09:21:52)
[GCC 9.2.1 20191008] on linux
Type "help", "copyright", "credits" or "license" for more information.
import sys
import gevent.fileobject
gevent.fileobject.FileObjectThread(sys.stdin).readline()
Traceback (most recent call last):
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/threadpool.py", line 120, in __run_task
thread_result.set(func(*args, **kwargs))
AttributeError: '_io.TextIOWrapper' object has no attribute 'readinto'
2020-03-10T13:35:10Z (<ThreadPoolWorker at 0x7ffba1fb1ec0 thread_ident=0x7ffba1f87700 hub=<Hub at 0x7ffba21997d0 thread_ident=0x7ffba2de9740>>, <built-in method readline of _io.TextIOWrapper object at 0x7ffba24ce910>) failed with AttributeError
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/_fileobjectcommon.py", line 525, in thread_method
return threadpool.apply(method, args, kwargs)
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/pool.py", line 161, in apply
return self.spawn(func, *args, **kwds).get()
File "src/gevent/event.py", line 279, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 307, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 297, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 277, in gevent._event.AsyncResult._raise_exception
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/_compat.py", line 62, in reraise
raise value.with_traceback(tb)
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/threadpool.py", line 120, in __run_task
thread_result.set(func(*args, **kwargs))
AttributeError: '_io.TextIOWrapper' object has no attribute 'readinto'
|
AttributeError
|
def wait(self, timeout=None): # pylint:disable=unused-argument
"""Waiting for a DummySemaphore returns immediately."""
return 1
|
def wait(self, timeout=None):
"""Waiting for a DummySemaphore returns immediately."""
|
https://github.com/gevent/gevent/issues/1542
|
$ python
Python 3.7.5 (default, Nov 20 2019, 09:21:52)
[GCC 9.2.1 20191008] on linux
Type "help", "copyright", "credits" or "license" for more information.
import sys
import gevent.fileobject
gevent.fileobject.FileObjectThread(sys.stdin).readline()
Traceback (most recent call last):
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/threadpool.py", line 120, in __run_task
thread_result.set(func(*args, **kwargs))
AttributeError: '_io.TextIOWrapper' object has no attribute 'readinto'
2020-03-10T13:35:10Z (<ThreadPoolWorker at 0x7ffba1fb1ec0 thread_ident=0x7ffba1f87700 hub=<Hub at 0x7ffba21997d0 thread_ident=0x7ffba2de9740>>, <built-in method readline of _io.TextIOWrapper object at 0x7ffba24ce910>) failed with AttributeError
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/_fileobjectcommon.py", line 525, in thread_method
return threadpool.apply(method, args, kwargs)
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/pool.py", line 161, in apply
return self.spawn(func, *args, **kwds).get()
File "src/gevent/event.py", line 279, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 307, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 297, in gevent._event.AsyncResult.get
File "src/gevent/event.py", line 277, in gevent._event.AsyncResult._raise_exception
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/_compat.py", line 62, in reraise
raise value.with_traceback(tb)
File "/home/davidlawson/virtualenvs/scrapers_37/lib/python3.7/site-packages/gevent/threadpool.py", line 120, in __run_task
thread_result.set(func(*args, **kwargs))
AttributeError: '_io.TextIOWrapper' object has no attribute 'readinto'
|
AttributeError
|
def __repr__(self):
return "<%s at 0x%x %s_fobj=%r%s>" % (
self.__class__.__name__,
id(self),
"closed" if self.closed else "",
self.io,
self._extra_repr(),
)
|
def __repr__(self):
return "<%s _fobj=%r%s>" % (self.__class__.__name__, self.io, self._extra_repr())
|
https://github.com/gevent/gevent/issues/1510
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "/usr/local/lib/python3.7/site-packages/gevent/subprocess.py", line 725, in _read
data = pipe.read()
File "/usr/local/lib/python3.7/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode bytes in position 0-1: invalid continuation byte
2020-01-14T10:08:13Z <Greenlet at 0x7ff576becdd0: _read> failed with UnicodeDecodeError
|
UnicodeDecodeError
|
def communicate(self, input=None, timeout=None):
"""
Interact with process and return its output and error.
- Send *input* data to stdin.
- Read data from stdout and stderr, until end-of-file is reached.
- Wait for process to terminate.
The optional *input* argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr).
:keyword timeout: Under Python 2, this is a gevent extension; if
given and it expires, we will raise :exc:`TimeoutExpired`, which
extends :exc:`gevent.timeout.Timeout` (note that this only extends :exc:`BaseException`,
*not* :exc:`Exception`)
Under Python 3, this raises the standard :exc:`TimeoutExpired` exception.
.. versionchanged:: 1.1a2
Under Python 2, if the *timeout* elapses, raise the :exc:`gevent.timeout.Timeout`
exception. Previously, we silently returned.
.. versionchanged:: 1.1b5
Honor a *timeout* even if there's no way to communicate with the child
(stdin, stdout, and stderr are not pipes).
"""
if self._communicating_greenlets is None:
self._communicating_greenlets = _CommunicatingGreenlets(self, input)
greenlets = self._communicating_greenlets
# If we were given stdin=stdout=stderr=None, we have no way to
# communicate with the child, and thus no greenlets to wait
# on. This is a nonsense case, but it comes up in the test
# case for Python 3.5 (test_subprocess.py
# RunFuncTestCase.test_timeout). Instead, we go directly to
# self.wait
if not greenlets and timeout is not None:
self.wait(timeout=timeout, _raise_exc=True)
done = joinall(greenlets, timeout=timeout)
# Allow finished greenlets, if any, to raise. This takes priority over
# the timeout exception.
for greenlet in done:
greenlet.get()
if timeout is not None and len(done) != len(self._communicating_greenlets):
raise TimeoutExpired(self.args, timeout)
# Close only after we're sure that everything is done
# (there was no timeout, or there was, but everything finished).
# There should be no greenlets still running, even from a prior
# attempt. If there are, then this can raise RuntimeError: 'reentrant call'.
# So we ensure that previous greenlets are dead.
for pipe in (self.stdout, self.stderr):
if pipe:
try:
pipe.close()
except RuntimeError:
pass
self.wait()
return (
None if greenlets.stdout is None else greenlets.stdout.get(),
None if greenlets.stderr is None else greenlets.stderr.get(),
)
|
def communicate(self, input=None, timeout=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr).
:keyword timeout: Under Python 2, this is a gevent extension; if
given and it expires, we will raise :exc:`TimeoutExpired`, which
extends :exc:`gevent.timeout.Timeout` (note that this only extends :exc:`BaseException`,
*not* :exc:`Exception`)
Under Python 3, this raises the standard :exc:`TimeoutExpired` exception.
.. versionchanged:: 1.1a2
Under Python 2, if the *timeout* elapses, raise the :exc:`gevent.timeout.Timeout`
exception. Previously, we silently returned.
.. versionchanged:: 1.1b5
Honor a *timeout* even if there's no way to communicate with the child
(stdin, stdout, and stderr are not pipes).
"""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
# If the timeout parameter is used, and the caller calls back after
# getting a TimeoutExpired exception, we can wind up with multiple
# greenlets trying to run and read from and close stdout/stderr.
# That's bad because it can lead to 'RuntimeError: reentrant call in io.BufferedReader'.
# We can't just kill the previous greenlets when a timeout happens,
# though, because we risk losing the output collected by that greenlet
# (and Python 3, where timeout is an official parameter, explicitly says
# that no output should be lost in the event of a timeout.) Instead, we're
# watching for the exception and ignoring it. It's not elegant,
# but it works
def _make_pipe_reader(pipe_name):
pipe = getattr(self, pipe_name)
buf_name = "_" + pipe_name + "_buffer"
def _read():
try:
data = pipe.read()
except (
# io.Buffered* can raise RuntimeError: 'reentrant call'
RuntimeError,
# unbuffered Posix IO that we're already waiting on
# can raise this. Closing the pipe will free those greenlets up.
ConcurrentObjectUseError,
):
return
if not data:
return
the_buffer = getattr(self, buf_name)
if the_buffer:
the_buffer.append(data)
else:
setattr(self, buf_name, [data])
return _read
if self.stdout:
_read_out = _make_pipe_reader("stdout")
stdout = spawn(_read_out)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
_read_err = _make_pipe_reader("stderr")
stderr = spawn(_read_err)
greenlets.append(stderr)
else:
stderr = None
# If we were given stdin=stdout=stderr=None, we have no way to
# communicate with the child, and thus no greenlets to wait
# on. This is a nonsense case, but it comes up in the test
# case for Python 3.5 (test_subprocess.py
# RunFuncTestCase.test_timeout). Instead, we go directly to
# self.wait
if not greenlets and timeout is not None:
self.wait(timeout=timeout, _raise_exc=True)
done = joinall(greenlets, timeout=timeout)
if timeout is not None and len(done) != len(greenlets):
raise TimeoutExpired(self.args, timeout)
for pipe in (self.stdout, self.stderr):
if pipe:
try:
pipe.close()
except RuntimeError:
pass
self.wait()
def _get_output_value(pipe_name):
buf_name = "_" + pipe_name + "_buffer"
buf_value = getattr(self, buf_name)
setattr(self, buf_name, None)
if buf_value:
buf_value = self._communicate_empty_value.join(buf_value)
else:
buf_value = self._communicate_empty_value
return buf_value
stdout_value = _get_output_value("stdout")
stderr_value = _get_output_value("stderr")
return (
None if stdout is None else stdout_value,
None if stderr is None else stderr_value,
)
|
https://github.com/gevent/gevent/issues/1510
|
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 766, in gevent._greenlet.Greenlet.run
File "/usr/local/lib/python3.7/site-packages/gevent/subprocess.py", line 725, in _read
data = pipe.read()
File "/usr/local/lib/python3.7/codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode bytes in position 0-1: invalid continuation byte
2020-01-14T10:08:13Z <Greenlet at 0x7ff576becdd0: _read> failed with UnicodeDecodeError
|
UnicodeDecodeError
|
def _local_find_descriptors(self):
type_self = type(self)
gets = set()
dels = set()
set_or_del = set()
sets = set()
mro = list(type_self.mro())
for attr_name in dir(type_self):
# Conventionally, descriptors when called on a class
# return themself, but not all do. Notable exceptions are
# in the zope.interface package, where things like __provides__
# return other class attributes. So we can't use getattr, and instead
# walk up the dicts
for base in mro:
if attr_name in base.__dict__:
attr = base.__dict__[attr_name]
break
else:
raise AttributeError(attr_name)
type_attr = type(attr)
if hasattr(type_attr, "__get__"):
gets.add(attr_name)
if hasattr(type_attr, "__delete__"):
dels.add(attr_name)
set_or_del.add(attr_name)
if hasattr(type_attr, "__set__"):
sets.add(attr_name)
return (gets, dels, set_or_del, sets)
|
def _local_find_descriptors(self):
type_self = type(self)
gets = set()
dels = set()
set_or_del = set()
sets = set()
for attr_name in dir(type_self):
attr = getattr(type_self, attr_name)
type_attr = type(attr)
if hasattr(type_attr, "__get__"):
gets.add(attr_name)
if hasattr(type_attr, "__delete__"):
dels.add(attr_name)
set_or_del.add(attr_name)
if hasattr(type_attr, "__set__"):
sets.add(attr_name)
return (gets, dels, set_or_del, sets)
|
https://github.com/gevent/gevent/issues/1122
|
Traceback (most recent call last):
...
File "//transaction-2.1.2-py2.py3-none-any.ovo/transaction/__init__.py", line 28, in <module>
manager = ThreadTransactionManager()
File "//gevent/src/gevent/local.py", line 517, in __new__
self.__cinit__(*args[1:], **kw)
File "//gevent/src/gevent/local.py", line 336, in __cinit__
get, dels, sets_or_dels, sets = _local_find_descriptors(self)
File "//gevent/src/gevent/local.py", line 496, in _local_find_descriptors
attr = getattr(type_self, attr_name)
File "//eggs/zope.interface-4.4.3-py2.7.egg/zope/interface/declarations.py", line 634, in __get__
raise AttributeError('__provides__')
AttributeError: __provides__
|
AttributeError
|
def __setattr__(cls, name, value):
# For symmetry with getattr and dir, pass all
# attribute setting on to the module. (This makes
# reloading work, see issue #805)
setattr(_signal_module, name, value)
|
def __setattr__(cls, name, value):
# Because we can't know whether to try to go to the module
# or the class, we don't allow setting an attribute after the fact
raise TypeError("Cannot set attribute")
|
https://github.com/gevent/gevent/issues/805
|
[DEBUG ] Local cache dir: '/var/cache/salt/minion/files/base/_proxy'
[DEBUG ] Refreshing modules...
......
Passed invalid arguments: Cannot set attribute.
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/salt/cli/caller.py", line 196, in call
ret['return'] = func(*args, **kwargs)
File "/usr/lib/python2.7/dist-packages/salt/modules/state.py", line 594, in highstate
whitelist=kwargs.get('whitelist')
File "/usr/lib/python2.7/dist-packages/salt/state.py", line 3173, in call_highstate
self.load_dynamic(matches)
File "/usr/lib/python2.7/dist-packages/salt/state.py", line 2706, in load_dynamic
self.state.module_refresh()
File "/usr/lib/python2.7/dist-packages/salt/state.py", line 857, in module_refresh
reload(site)
File "/usr/lib/python2.7/site.py", line 563, in <module>
main()
File "/usr/lib/python2.7/site.py", line 541, in main
abs__file__()
File "/usr/lib/python2.7/site.py", line 100, in abs__file__
m.__file__ = os.path.abspath(m.__file__)
File "/usr/local/lib/python2.7/dist-packages/gevent/__init__.py", line 87, in __setattr__
raise TypeError("Cannot set attribute")
TypeError: Cannot set attribute
|
TypeError
|
def sendall(self, data, flags=0):
if isinstance(data, unicode):
data = data.encode()
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
data_memory = _get_memory(data)
len_data_memory = len(data_memory)
if not len_data_memory:
# Don't send empty data, can cause SSL EOFError.
# See issue 719
return 0
# On PyPy up through 2.6.0, subviews of a memoryview() object
# copy the underlying bytes the first time the builtin
# socket.send() method is called. On a non-blocking socket
# (that thus calls socket.send() many times) with a large
# input, this results in many repeated copies of an ever
# smaller string, depending on the networking buffering. For
# example, if each send() can process 1MB of a 50MB input, and
# we naively pass the entire remaining subview each time, we'd
# copy 49MB, 48MB, 47MB, etc, thus completely killing
# performance. To workaround this problem, we work in
# reasonable, fixed-size chunks. This results in a 10x
# improvement to bench_sendall.py, while having no measurable impact on
# CPython (since it doesn't copy at all the only extra overhead is
# a few python function calls, which is negligible for large inputs).
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
# Too small of a chunk (the socket's buf size is usually too
# small) results in reduced perf due to *too many* calls to send and too many
# small copies. With a buffer of 143K (the default on my system), for
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
# chosen, say, to match a page size?
chunk_size = max(self.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024)
data_sent = 0
end = None
timeleft = None
if self.timeout is not None:
timeleft = self.timeout
end = time.time() + timeleft
while data_sent < len_data_memory:
chunk_end = min(data_sent + chunk_size, len_data_memory)
chunk = data_memory[data_sent:chunk_end]
timeleft = self.__send_chunk(chunk, flags, timeleft, end)
data_sent += len(chunk) # Guaranteed it sent the whole thing
|
def sendall(self, data, flags=0):
if isinstance(data, unicode):
data = data.encode()
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
data_memory = _get_memory(data)
len_data_memory = len(data_memory)
# On PyPy up through 2.6.0, subviews of a memoryview() object
# copy the underlying bytes the first time the builtin
# socket.send() method is called. On a non-blocking socket
# (that thus calls socket.send() many times) with a large
# input, this results in many repeated copies of an ever
# smaller string, depending on the networking buffering. For
# example, if each send() can process 1MB of a 50MB input, and
# we naively pass the entire remaining subview each time, we'd
# copy 49MB, 48MB, 47MB, etc, thus completely killing
# performance. To workaround this problem, we work in
# reasonable, fixed-size chunks. This results in a 10x
# improvement to bench_sendall.py, while having no measurable impact on
# CPython (since it doesn't copy at all the only extra overhead is
# a few python function calls, which is negligible for large inputs).
# See https://bitbucket.org/pypy/pypy/issues/2091/non-blocking-socketsend-slow-gevent
# Too small of a chunk (the socket's buf size is usually too
# small) results in reduced perf due to *too many* calls to send and too many
# small copies. With a buffer of 143K (the default on my system), for
# example, bench_sendall.py yields ~264MB/s, while using 1MB yields
# ~653MB/s (matching CPython). 1MB is arbitrary and might be better
# chosen, say, to match a page size?
chunk_size = max(self.getsockopt(SOL_SOCKET, SO_SNDBUF), 1024 * 1024)
data_sent = 0
end = None
timeleft = None
if self.timeout is not None:
timeleft = self.timeout
end = time.time() + timeleft
while data_sent < len_data_memory:
chunk_end = min(data_sent + chunk_size, len_data_memory)
chunk = data_memory[data_sent:chunk_end]
timeleft = self.__send_chunk(chunk, flags, timeleft, end)
data_sent += len(chunk) # Guaranteed it sent the whole thing
|
https://github.com/gevent/gevent/issues/719
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/usr/local/lib/python2.7/dist-packages/gevent/server.py", line 102, in wrap_socket_and_handle
ssl_socket = self.wrap_socket(client_socket, **self.ssl_args)
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 410, in wrap_socket
ciphers=ciphers)
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 93, in __init__
self.do_handshake()
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 310, in do_handshake
return self._sslobj.do_handshake()
SSLError: [Errno 8] _ssl.c:510: EOF occurred in violation of protocol
<Greenlet at 0x7fbc02c4a9b0: <bound method WSGIServer.wrap_socket_and_handle of <WSGIServer at 0x7fbc03b9b110 fileno=9 address=0.0.0.0:5000>>(<socket at 0x7fbc02bf7590 fileno=76 sock=10.122.97, ('41.234.232.59', 40471))> failed with SSLError
|
SSLError
|
def sendall(self, data, flags=0):
# XXX When we run on PyPy3, see the notes in _socket2.py's sendall()
data_memory = _get_memory(data)
len_data_memory = len(data_memory)
if not len_data_memory:
# Don't try to send empty data at all, no point, and breaks ssl
# See issue 719
return 0
if self.timeout is None:
data_sent = 0
while data_sent < len_data_memory:
data_sent += self.send(data_memory[data_sent:], flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(data_memory[data_sent:], flags, timeout=timeleft)
if data_sent >= len_data_memory:
break
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
def sendall(self, data, flags=0):
# XXX When we run on PyPy3, see the notes in _socket2.py's sendall()
data_memory = _get_memory(data)
if self.timeout is None:
data_sent = 0
while data_sent < len(data_memory):
data_sent += self.send(data_memory[data_sent:], flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(data_memory[data_sent:], flags, timeout=timeleft)
if data_sent >= len(data_memory):
break
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
https://github.com/gevent/gevent/issues/719
|
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/gevent/greenlet.py", line 327, in run
result = self._run(*self.args, **self.kwargs)
File "/usr/local/lib/python2.7/dist-packages/gevent/server.py", line 102, in wrap_socket_and_handle
ssl_socket = self.wrap_socket(client_socket, **self.ssl_args)
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 410, in wrap_socket
ciphers=ciphers)
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 93, in __init__
self.do_handshake()
File "/usr/local/lib/python2.7/dist-packages/gevent/_ssl2.py", line 310, in do_handshake
return self._sslobj.do_handshake()
SSLError: [Errno 8] _ssl.c:510: EOF occurred in violation of protocol
<Greenlet at 0x7fbc02c4a9b0: <bound method WSGIServer.wrap_socket_and_handle of <WSGIServer at 0x7fbc03b9b110 fileno=9 address=0.0.0.0:5000>>(<socket at 0x7fbc02bf7590 fileno=76 sock=10.122.97, ('41.234.232.59', 40471))> failed with SSLError
|
SSLError
|
def sendall(self, data):
fileno = self.fileno()
data_memory = _get_memory(data)
bytes_total = len(data_memory)
bytes_written = 0
while True:
try:
bytes_written += _write(fileno, data_memory[bytes_written:])
except (IOError, OSError) as ex:
code = ex.args[0]
if code not in ignored_errors:
raise
sys.exc_clear()
if bytes_written >= bytes_total:
return
self.hub.wait(self._write_event)
|
def sendall(self, data):
fileno = self.fileno()
bytes_total = len(data)
bytes_written = 0
while True:
try:
bytes_written += _write(fileno, _get_memory(data, bytes_written))
except (IOError, OSError) as ex:
code = ex.args[0]
if code not in ignored_errors:
raise
sys.exc_clear()
if bytes_written >= bytes_total:
return
self.hub.wait(self._write_event)
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def sendall(self, data, flags=0):
if isinstance(data, unicode):
data = data.encode()
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
data_memory = _get_memory(data)
if self.timeout is None:
data_sent = 0
while data_sent < len(data_memory):
data_sent += self.send(data_memory[data_sent:], flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(data_memory[data_sent:], flags, timeout=timeleft)
if data_sent >= len(data_memory):
return
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
def sendall(self, data, flags=0):
if isinstance(data, unicode):
data = data.encode()
# this sendall is also reused by gevent.ssl.SSLSocket subclass,
# so it should not call self._sock methods directly
if self.timeout is None:
data_sent = 0
while data_sent < len(data):
data_sent += self.send(_get_memory(data, data_sent), flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(
_get_memory(data, data_sent), flags, timeout=timeleft
)
if data_sent >= len(data):
return
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def _get_memory(data):
try:
mv = memoryview(data)
if mv.shape:
return mv
# No shape, probably working with a ctypes object,
# or something else exotic that supports the buffer interface
return mv.tobytes()
except TypeError:
# fixes "python2.7 array.array doesn't support memoryview used in
# gevent.socket.send" issue
# (http://code.google.com/p/gevent/issues/detail?id=94)
return buffer(data)
|
def _get_memory(string, offset):
try:
return memoryview(string)[offset:]
except TypeError:
# fixes "python2.7 array.array doesn't support memoryview used in
# gevent.socket.send" issue
# (http://code.google.com/p/gevent/issues/detail?id=94)
return buffer(string, offset)
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def _get_memory(data):
mv = memoryview(data)
if mv.shape:
return mv
# No shape, probably working with a ctypes object,
# or something else exotic that supports the buffer interface
return mv.tobytes()
|
def _get_memory(string, offset):
return memoryview(string)[offset:]
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def sendall(self, data, flags=0):
data_memory = _get_memory(data)
if self.timeout is None:
data_sent = 0
while data_sent < len(data_memory):
data_sent += self.send(data_memory[data_sent:], flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(data_memory[data_sent:], flags, timeout=timeleft)
if data_sent >= len(data_memory):
break
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
def sendall(self, data, flags=0):
if self.timeout is None:
data_sent = 0
while data_sent < len(data):
data_sent += self.send(_get_memory(data, data_sent), flags)
else:
timeleft = self.timeout
end = time.time() + timeleft
data_sent = 0
while True:
data_sent += self.send(
_get_memory(data, data_sent), flags, timeout=timeleft
)
if data_sent >= len(data):
break
timeleft = end - time.time()
if timeleft <= 0:
raise timeout("timed out")
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def patch_all(
socket=True,
dns=True,
time=True,
select=True,
thread=True,
os=True,
ssl=True,
httplib=False,
subprocess=True,
sys=False,
aggressive=True,
Event=False,
):
"""Do all of the default monkey patching (calls every other function in this module."""
# order is important
if os:
patch_os()
if time:
patch_time()
if thread:
patch_thread(Event=Event)
# sys must be patched after thread. in other cases threading._shutdown will be
# initiated to _MainThread with real thread ident
if sys:
patch_sys()
if socket:
patch_socket(dns=dns, aggressive=aggressive)
if select:
patch_select(aggressive=aggressive)
if ssl:
patch_ssl()
if httplib:
raise ValueError("gevent.httplib is no longer provided, httplib must be False")
if subprocess:
patch_subprocess()
|
def patch_all(
socket=True,
dns=True,
time=True,
select=True,
thread=True,
os=True,
ssl=True,
httplib=False,
subprocess=False,
sys=False,
aggressive=True,
Event=False,
):
"""Do all of the default monkey patching (calls every other function in this module."""
# order is important
if os:
patch_os()
if time:
patch_time()
if thread:
patch_thread(Event=Event)
# sys must be patched after thread. in other cases threading._shutdown will be
# initiated to _MainThread with real thread ident
if sys:
patch_sys()
if socket:
patch_socket(dns=dns, aggressive=aggressive)
if select:
patch_select(aggressive=aggressive)
if ssl:
patch_ssl()
if httplib:
raise ValueError("gevent.httplib is no longer provided, httplib must be False")
if subprocess:
patch_subprocess()
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
if self.stdout:
stdout = spawn(self.stdout.read)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
stderr = spawn(self.stderr.read)
greenlets.append(stderr)
else:
stderr = None
joinall(greenlets)
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
self.wait()
return (
None if stdout is None else stdout.value or b"",
None if stderr is None else stderr.value or b"",
)
|
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
greenlets = []
if self.stdin:
greenlets.append(spawn(write_and_close, self.stdin, input))
if self.stdout:
stdout = spawn(self.stdout.read)
greenlets.append(stdout)
else:
stdout = None
if self.stderr:
stderr = spawn(self.stderr.read)
greenlets.append(stderr)
else:
stderr = None
joinall(greenlets)
if self.stdout:
self.stdout.close()
if self.stderr:
self.stderr.close()
self.wait()
return (
None if stdout is None else stdout.value or "",
None if stderr is None else stderr.value or "",
)
|
https://github.com/gevent/gevent/issues/466
|
python test.py
Traceback (most recent call last):
File "test.py", line 22, in <module>
TestFails()
File "test.py", line 19, in TestFails
sock.sendall(anStructure)
File "/usr/local/lib/python2.7/site-packages/gevent/socket.py", line 457, in sendall
while data_sent < len(data):
TypeError: object of type 'AnStructure' has no len()
|
TypeError
|
def issues(self):
email = self.username
# TODO -- doing something with blockedby would be nice.
if self.query_url:
query = self.bz.url_to_query(self.query_url)
query["column_list"] = self.COLUMN_LIST
else:
query = dict(
column_list=self.COLUMN_LIST,
bug_status=self.open_statuses,
email1=email,
emailreporter1=1,
emailassigned_to1=1,
emailqa_contact1=1,
emailtype1="substring",
)
if not self.ignore_cc:
query["emailcc1"] = 1
if self.advanced:
# Required for new bugzilla
# https://bugzilla.redhat.com/show_bug.cgi?id=825370
query["query_format"] = "advanced"
bugs = self.bz.query(query)
if self.include_needinfos:
needinfos = self.bz.query(
dict(
column_list=self.COLUMN_LIST,
quicksearch="flag:needinfo?%s" % email,
)
)
exists = [b.id for b in bugs]
for bug in needinfos:
# don't double-add bugs that have already been found
if bug.id in exists:
continue
bugs.append(bug)
# Convert to dicts
bugs = [
dict(((col, _get_bug_attr(bug, col)) for col in self.COLUMN_LIST))
for bug in bugs
]
issues = [(self.target, bug) for bug in bugs]
log.debug(" Found %i total.", len(issues))
# Build a url for each issue
base_url = "https://%s/show_bug.cgi?id=" % (self.base_uri)
for tag, issue in issues:
issue_obj = self.get_issue_for_record(issue)
extra = {
"url": base_url + six.text_type(issue["id"]),
"annotations": self.annotations(tag, issue, issue_obj),
}
needinfos = [
f
for f in issue["flags"]
if (
f["name"] == "needinfo"
and f["status"] == "?"
and f.get("requestee", self.username) == self.username
)
]
if needinfos:
last_mod = needinfos[0]["modification_date"]
# convert from RPC DateTime string to datetime.datetime object
mod_date = datetime.datetime.fromtimestamp(
time.mktime(last_mod.timetuple())
)
extra["needinfo_since"] = pytz.UTC.localize(mod_date).isoformat()
if issue["status"] == "ASSIGNED":
extra["assigned_on"] = self._get_assigned_date(issue)
else:
extra["assigned_on"] = None
issue_obj.update_extra(extra)
yield issue_obj
|
def issues(self):
email = self.username
# TODO -- doing something with blockedby would be nice.
if self.query_url:
query = self.bz.url_to_query(self.query_url)
query["column_list"] = self.COLUMN_LIST
else:
query = dict(
column_list=self.COLUMN_LIST,
bug_status=self.open_statuses,
email1=email,
emailreporter1=1,
emailassigned_to1=1,
emailqa_contact1=1,
emailtype1="substring",
)
if not self.ignore_cc:
query["emailcc1"] = 1
if self.advanced:
# Required for new bugzilla
# https://bugzilla.redhat.com/show_bug.cgi?id=825370
query["query_format"] = "advanced"
bugs = self.bz.query(query)
if self.include_needinfos:
needinfos = self.bz.query(
dict(
column_list=self.COLUMN_LIST,
quicksearch="flag:needinfo?%s" % email,
)
)
exists = [b.id for b in bugs]
for bug in needinfos:
# don't double-add bugs that have already been found
if bug.id in exists:
continue
bugs.append(bug)
# Convert to dicts
bugs = [
dict(((col, _get_bug_attr(bug, col)) for col in self.COLUMN_LIST))
for bug in bugs
]
issues = [(self.target, bug) for bug in bugs]
log.debug(" Found %i total.", len(issues))
# Build a url for each issue
base_url = "https://%s/show_bug.cgi?id=" % (self.base_uri)
for tag, issue in issues:
issue_obj = self.get_issue_for_record(issue)
extra = {
"url": base_url + six.text_type(issue["id"]),
"annotations": self.annotations(tag, issue, issue_obj),
}
needinfos = [
f
for f in issue["flags"]
if (
f["name"] == "needinfo"
and f["status"] == "?"
and f.get("requestee", self.username) == self.username
)
]
if needinfos:
last_mod = needinfos[0]["modification_date"]
# convert from RPC DateTime string to datetime.datetime object
mod_date = datetime.datetime.fromtimestamp(
time.mktime(last_mod.timetuple())
)
extra["needinfo_since"] = pytz.UTC.localize(mod_date)
if issue["status"] == "ASSIGNED":
extra["assigned_on"] = self._get_assigned_date(issue)
else:
extra["assigned_on"] = None
issue_obj.update_extra(extra)
yield issue_obj
|
https://github.com/ralphbean/bugwarrior/issues/740
|
Traceback (most recent call last):
File "/home/dave/dotfiles/external/pyenv/versions/bugwarrior/bin/bugwarrior-pull", line 8, in <module>
sys.exit(pull())
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/db.py", line 317, in synchronize
issue_dict = dict(issue)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 414, in keys
return list(self.__iter__())
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 409, in __iter__
record = self.get_taskwarrior_record()
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 305, in get_taskwarrior_record
self._taskwarrior_record = self.to_taskwarrior()
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/services/bz.py", line 83, in to_taskwarrior
task[self.NEEDINFO] = self.parse_date(self.extra.get('needinfo_since'))
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 345, in parse_date
date = parse_date(date)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/dateutil/parser/_parser.py", line 1374, in parse
return DEFAULTPARSER.parse(timestr, **kwargs)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/dateutil/parser/_parser.py", line 646, in parse
res, skipped_tokens = self._parse(timestr, **kwargs)
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/dateutil/parser/_parser.py", line 725, in _parse
l = _timelex.split(timestr) # Splits the timestr into tokens
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/dateutil/parser/_parser.py", line 207, in split
return list(cls(s))
File "/home/dave/.pyenv/versions/2.7.18/envs/bugwarrior/lib/python2.7/site-packages/dateutil/parser/_parser.py", line 76, in __init__
'{itype}'.format(itype=instream.__class__.__name__))
TypeError: Parser must be a string or character stream, not datetime
|
TypeError
|
def annotations(self, issue, issue_obj):
comments = self.jira.comments(issue.key) or []
return self.build_annotations(
((comment.author.displayName, comment.body) for comment in comments),
issue_obj.get_processed_url(issue_obj.get_url()),
)
|
def annotations(self, issue, issue_obj):
comments = self.jira.comments(issue.key) or []
return self.build_annotations(
((comment.author.name, comment.body) for comment in comments),
issue_obj.get_processed_url(issue_obj.get_url()),
)
|
https://github.com/ralphbean/bugwarrior/issues/718
|
INFO:bugwarrior.db:Service-defined UDAs exist: you can optionally use the `bugwarrior-uda` command to export a list of UDAs you can add to your taskrc file.
INFO:bugwarrior.services:Starting to aggregate remote issues.
INFO:bugwarrior.services:Spawning 1 workers.
INFO:bugwarrior.services:Working on [JIRA]
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
INFO:bugwarrior.services.jira:Found 1 distinct sprint fields.
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
/usr/local/lib/python3.7/site-packages/urllib3/connectionpool.py:857: InsecureRequestWarning: Unverified HTTPS request is being made. Adding certificate verification is strongly advised. See: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
InsecureRequestWarning)
ERROR:bugwarrior.services:Worker for [JIRA] failed: <class 'jira.resources.UnknownResource'> object has no attribute 'name' ('UnknownResource' object is not subscriptable)
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/jira/resources.py", line 161, in __getattr__
return self[item]
TypeError: 'UnknownResource' object is not subscriptable
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/__init__.py", line 499, in _aggregate_issues
for issue in service.issues():
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/jira.py", line 352, in issues
'annotations': self.annotations(case, issue)
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/jira.py", line 335, in annotations
issue_obj.get_processed_url(issue_obj.get_url())
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/__init__.py", line 149, in build_annotations
for author, message in annotations:
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/jira.py", line 334, in <genexpr>
) for comment in comments),
File "/usr/local/lib/python3.7/site-packages/jira/resources.py", line 177, in __getattr__
raise AttributeError("%r object has no attribute %r (%s)" % (self.__class__, item, e))
AttributeError: <class 'jira.resources.UnknownResource'> object has no attribute 'name' ('UnknownResource' object is not subscriptable)
INFO:bugwarrior.services:Done with [JIRA] in 3.948460s
INFO:bugwarrior.services:Terminating workers
ERROR:bugwarrior.command:Aborted (critical error in target 'JIRA')
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python3.7/site-packages/bugwarrior/db.py", line 348, in synchronize
for issue in issue_generator:
File "/usr/local/lib/python3.7/site-packages/bugwarrior/services/__init__.py", line 568, in aggregate_issues
"critical error in target '{}'".format(target))
RuntimeError: critical error in target 'JIRA'
|
TypeError
|
def aslist(value):
"""Cast config values to lists of strings"""
return [item.strip() for item in re.split(",(?![^{]*})", value.strip())]
|
def aslist(value):
"""Cast config values to lists of strings"""
return [item.strip() for item in value.strip().split(",")]
|
https://github.com/ralphbean/bugwarrior/issues/568
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
sys.exit(pull())
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.6/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python3.6/site-packages/bugwarrior/db.py", line 330, in synchronize
issue_dict = dict(issue)
File "/usr/local/lib/python3.6/site-packages/bugwarrior/services/__init__.py", line 416, in keys
return list(self.__iter__())
File "/usr/local/lib/python3.6/site-packages/bugwarrior/services/__init__.py", line 411, in __iter__
record = self.get_taskwarrior_record()
File "/usr/local/lib/python3.6/site-packages/bugwarrior/services/__init__.py", line 319, in get_taskwarrior_record
record['tags'].extend(self.get_added_tags())
File "/usr/local/lib/python3.6/site-packages/bugwarrior/services/__init__.py", line 304, in get_added_tags
tag = Template(tag).render(self.get_template_context())
File "/usr/local/lib/python3.6/site-packages/jinja2/environment.py", line 945, in __new__
return env.from_string(source, template_class=cls)
File "/usr/local/lib/python3.6/site-packages/jinja2/environment.py", line 880, in from_string
return cls.from_code(self, self.compile(source), globals, None)
File "/usr/local/lib/python3.6/site-packages/jinja2/environment.py", line 591, in compile
self.handle_exception(exc_info, source_hint=source_hint)
File "/usr/local/lib/python3.6/site-packages/jinja2/environment.py", line 780, in handle_exception
reraise(exc_type, exc_value, tb)
File "/usr/local/lib/python3.6/site-packages/jinja2/_compat.py", line 37, in reraise
raise value.with_traceback(tb)
File "<unknown>", line 1, in template
File "/usr/local/lib/python3.6/site-packages/jinja2/environment.py", line 497, in _parse
return Parser(self, source, name, encode_filename(filename)).parse()
File "/usr/local/lib/python3.6/site-packages/jinja2/parser.py", line 901, in parse
result = nodes.Template(self.subparse(), lineno=1)
File "/usr/local/lib/python3.6/site-packages/jinja2/parser.py", line 874, in subparse
next(self.stream)
File "/usr/local/lib/python3.6/site-packages/jinja2/lexer.py", line 359, in __next__
self.current = next(self._iter)
File "/usr/local/lib/python3.6/site-packages/jinja2/lexer.py", line 562, in wrap
for lineno, token, value in stream:
File "/usr/local/lib/python3.6/site-packages/jinja2/lexer.py", line 739, in tokeniter
name, filename)
jinja2.exceptions.TemplateSyntaxError: unexpected char "'" at 5
|
jinja2.exceptions.TemplateSyntaxError
|
def get_annotations(self, tag, issue, issue_obj, url):
response = self.get_collection(
"/repositories/%s/pullrequests/%i/comments" % (tag, issue["id"])
)
return self.build_annotations(
(
(
comment["user"]["username"],
comment["content"]["raw"],
)
for comment in response
),
issue_obj.get_processed_url(url),
)
|
def get_annotations(self, tag, issue, issue_obj, url):
response = self.get_data(
self.BASE_API + "/repositories/%s/issues/%i/comments" % (tag, issue["id"])
)
return self.build_annotations(
(
(
comment["author_info"]["username"],
comment["content"],
)
for comment in response
),
issue_obj.get_processed_url(url),
)
|
https://github.com/ralphbean/bugwarrior/issues/694
|
ERROR:bugwarrior.services:Worker for [ELIDED] failed: Non-200 status code 410; u'https://api.bitbucket.org/1.0/repositories/[ELIDED]'; u'{"type": "error", "error": {"message": "Resource removed", "detail": "This API is no longer supported.\\n\\nFor information about its removal, please refer to the deprecation notice at: https://developer.atlassian.com/cloud/bitbucket/deprecation-notice-v1-apis/"}}'
Traceback (most recent call last):
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 499, in _aggregate_issues
for issue in service.issues():
File "/home/ELIDEDi/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 233, in issues
'annotations': self.get_annotations(tag, issue, issue_obj, url)
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 177, in get_annotations
'/repositories/%s/issues/%i/comments' % (tag, issue['id']))
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 144, in get_data
return self.json_response(requests.get(url, **self.requests_kwargs))
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 478, in json_response
response.status_code, response.url, response.text,
IOError: Non-200 status code 410; u'https://api.bitbucket.org/1.0/repositories/ELIDED'; u'{"type": "error", "error": {"message": "Resource removed", "detail": "This API is no longer supported.\\n\\nFor information about its removal, please refer to the deprecation notice at: https://developer.atlassian.com/cloud/bitbucket/deprecation-notice-v1-apis/"}}'
|
IOError
|
def issues(self):
user = self.config.get("username")
response = self.get_collection("/repositories/" + user + "/")
repo_tags = list(
filter(
self.filter_repos,
[repo["full_name"] for repo in response if repo.get("has_issues")],
)
)
issues = sum([self.fetch_issues(repo) for repo in repo_tags], [])
log.debug(" Found %i total.", len(issues))
closed = ["resolved", "duplicate", "wontfix", "invalid", "closed"]
try:
issues = [tup for tup in issues if tup[1]["status"] not in closed]
except KeyError: # Undocumented API change.
issues = [tup for tup in issues if tup[1]["state"] not in closed]
issues = list(filter(self.include, issues))
log.debug(" Pruned down to %i", len(issues))
for tag, issue in issues:
issue_obj = self.get_issue_for_record(issue)
tagParts = tag.split("/")
projectName = tagParts[1]
if self.project_owner_prefix:
projectName = tagParts[0] + "." + projectName
url = issue["links"]["html"]["href"]
extras = {
"project": projectName,
"url": url,
"annotations": self.get_annotations(tag, issue, issue_obj, url),
}
issue_obj.update_extra(extras)
yield issue_obj
if not self.filter_merge_requests:
pull_requests = sum([self.fetch_pull_requests(repo) for repo in repo_tags], [])
log.debug(" Found %i total.", len(pull_requests))
closed = ["rejected", "fulfilled"]
not_resolved = lambda tup: tup[1]["state"] not in closed
pull_requests = list(filter(not_resolved, pull_requests))
pull_requests = list(filter(self.include, pull_requests))
log.debug(" Pruned down to %i", len(pull_requests))
for tag, issue in pull_requests:
issue_obj = self.get_issue_for_record(issue)
tagParts = tag.split("/")
projectName = tagParts[1]
if self.project_owner_prefix:
projectName = tagParts[0] + "." + projectName
url = self.BASE_URL + "/".join(
issue["links"]["html"]["href"].split("/")[3:]
).replace("pullrequests", "pullrequest")
extras = {
"project": projectName,
"url": url,
"annotations": self.get_annotations(tag, issue, issue_obj, url),
}
issue_obj.update_extra(extras)
yield issue_obj
|
def issues(self):
user = self.config.get("username")
response = self.get_collection("/repositories/" + user + "/")
repo_tags = list(
filter(
self.filter_repos,
[repo["full_name"] for repo in response if repo.get("has_issues")],
)
)
issues = sum([self.fetch_issues(repo) for repo in repo_tags], [])
log.debug(" Found %i total.", len(issues))
closed = ["resolved", "duplicate", "wontfix", "invalid", "closed"]
try:
issues = [tup for tup in issues if tup[1]["status"] not in closed]
except KeyError: # Undocumented API change.
issues = [tup for tup in issues if tup[1]["state"] not in closed]
issues = list(filter(self.include, issues))
log.debug(" Pruned down to %i", len(issues))
for tag, issue in issues:
issue_obj = self.get_issue_for_record(issue)
tagParts = tag.split("/")
projectName = tagParts[1]
if self.project_owner_prefix:
projectName = tagParts[0] + "." + projectName
url = issue["links"]["html"]["href"]
extras = {
"project": projectName,
"url": url,
"annotations": self.get_annotations(tag, issue, issue_obj, url),
}
issue_obj.update_extra(extras)
yield issue_obj
if not self.filter_merge_requests:
pull_requests = sum([self.fetch_pull_requests(repo) for repo in repo_tags], [])
log.debug(" Found %i total.", len(pull_requests))
closed = ["rejected", "fulfilled"]
not_resolved = lambda tup: tup[1]["state"] not in closed
pull_requests = list(filter(not_resolved, pull_requests))
pull_requests = list(filter(self.include, pull_requests))
log.debug(" Pruned down to %i", len(pull_requests))
for tag, issue in pull_requests:
issue_obj = self.get_issue_for_record(issue)
tagParts = tag.split("/")
projectName = tagParts[1]
if self.project_owner_prefix:
projectName = tagParts[0] + "." + projectName
url = self.BASE_URL + "/".join(
issue["links"]["html"]["href"].split("/")[3:]
).replace("pullrequests", "pullrequest")
extras = {
"project": projectName,
"url": url,
"annotations": self.get_annotations2(tag, issue, issue_obj, url),
}
issue_obj.update_extra(extras)
yield issue_obj
|
https://github.com/ralphbean/bugwarrior/issues/694
|
ERROR:bugwarrior.services:Worker for [ELIDED] failed: Non-200 status code 410; u'https://api.bitbucket.org/1.0/repositories/[ELIDED]'; u'{"type": "error", "error": {"message": "Resource removed", "detail": "This API is no longer supported.\\n\\nFor information about its removal, please refer to the deprecation notice at: https://developer.atlassian.com/cloud/bitbucket/deprecation-notice-v1-apis/"}}'
Traceback (most recent call last):
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 499, in _aggregate_issues
for issue in service.issues():
File "/home/ELIDEDi/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 233, in issues
'annotations': self.get_annotations(tag, issue, issue_obj, url)
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 177, in get_annotations
'/repositories/%s/issues/%i/comments' % (tag, issue['id']))
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/bitbucket.py", line 144, in get_data
return self.json_response(requests.get(url, **self.requests_kwargs))
File "/home/ELIDED/.local/lib/python2.7/site-packages/bugwarrior/services/__init__.py", line 478, in json_response
response.status_code, response.url, response.text,
IOError: Non-200 status code 410; u'https://api.bitbucket.org/1.0/repositories/ELIDED'; u'{"type": "error", "error": {"message": "Resource removed", "detail": "This API is no longer supported.\\n\\nFor information about its removal, please refer to the deprecation notice at: https://developer.atlassian.com/cloud/bitbucket/deprecation-notice-v1-apis/"}}'
|
IOError
|
def to_taskwarrior(self):
return {
"project": self.extra["project"],
"annotations": self.extra["annotations"],
self.URL: self.extra["url"],
"priority": self.origin["default_priority"],
"tags": self.get_tags(),
self.FOREIGN_ID: self.record["ref"],
self.SUMMARY: self.record["subject"],
}
|
def to_taskwarrior(self):
return {
"project": self.extra["project"],
"annotations": self.extra["annotations"],
self.URL: self.extra["url"],
"priority": self.origin["default_priority"],
"tags": self.record["tags"],
self.FOREIGN_ID: self.record["ref"],
self.SUMMARY: self.record["subject"],
}
|
https://github.com/ralphbean/bugwarrior/issues/591
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
sys.exit(pull())
File "/home/frank/.local/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/home/frank/.local/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/home/frank/.local/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/frank/.local/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/home/frank/.local/lib/python2.7/site-packages/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/home/frank/.local/lib/python2.7/site-packages/bugwarrior/db.py", line 386, in synchronize
send_notification(issue, 'Created', conf)
File "/home/frank/.local/lib/python2.7/site-packages/bugwarrior/notifications.py", line 111, in send_notification
metadata = _get_metadata(issue)
File "/home/frank/.local/lib/python2.7/site-packages/bugwarrior/notifications.py", line 39, in _get_metadata
tags = "Tags: " + ', '.join(issue['tags'])
TypeError: sequence item 0: expected string, list found
|
TypeError
|
def _get_bug_attr(bug, attr):
"""Default longdescs/flags case to [] since they may not be present."""
if attr in ("longdescs", "flags"):
return getattr(bug, attr, [])
return getattr(bug, attr)
|
def _get_bug_attr(bug, attr):
"""Default only the longdescs case to [] since it may not be present."""
if attr == "longdescs":
return getattr(bug, attr, [])
return getattr(bug, attr)
|
https://github.com/ralphbean/bugwarrior/issues/535
|
ERROR:bugwarrior.services:Worker for [bz.redhat] failed: Bug object has no attribute 'flags'.
If 'flags' is a bugzilla attribute, it may not have been cached when the bug was fetched. You may want to adjust your include_fields for getbug/query.
Traceback (most recent call last):
File "/home/mike/Devel/bugwarrior/build/lib/bugwarrior/services/__init__.py", line 506, in _aggregate_issues
for issue in service.issues():
File "/home/mike/Devel/bugwarrior/build/lib/bugwarrior/services/bz.py", line 257, in issues
) for bug in bugs
File "/home/mike/Devel/bugwarrior/build/lib/bugwarrior/services/bz.py", line 256, in <genexpr>
((col, _get_bug_attr(bug, col)) for col in self.COLUMN_LIST)
File "/home/mike/Devel/bugwarrior/build/lib/bugwarrior/services/bz.py", line 291, in _get_bug_attr
return getattr(bug, attr)
File "/usr/lib/python2.7/site-packages/bugzilla/bug.py", line 110, in __getattr__
raise AttributeError(msg)
AttributeError: Bug object has no attribute 'flags'.
If 'flags' is a bugzilla attribute, it may not have been cached when the bug was fetched. You may want to adjust your include_fields for getbug/query.
INFO:bugwarrior.services:Done with [bz.redhat] in 7.013321s
|
AttributeError
|
def validate_config(config, main_section):
if not config.has_section(main_section):
die("No [%s] section found." % main_section)
logging.basicConfig(
level=getattr(logging, config.get(main_section, "log.level")),
filename=config.get(main_section, "log.file"),
)
# In general, its nice to log "everything", but some of the loggers from
# our dependencies are very very spammy. Here, we silence most of their
# noise:
spammers = [
"bugzilla.base",
"bugzilla.bug",
"requests.packages.urllib3.connectionpool",
]
for spammer in spammers:
logging.getLogger(spammer).setLevel(logging.WARN)
if not config.has_option(main_section, "targets"):
die("No targets= item in [%s] found." % main_section)
targets = aslist(config.get(main_section, "targets"))
targets = [t for t in targets if len(t)]
if not targets:
die("Empty targets= item in [%s]." % main_section)
for target in targets:
if target not in config.sections():
die("No [%s] section found." % target)
# Validate each target one by one.
for target in targets:
service = config.get(target, "service")
if not service:
die("No 'service' in [%s]" % target)
if not get_service(service):
die("'%s' in [%s] is not a valid service." % (service, target))
# Call the service-specific validator
service = get_service(service)
service_config = ServiceConfig(service.CONFIG_PREFIX, config, target)
service.validate_config(service_config, target)
|
def validate_config(config, main_section):
if not config.has_section(main_section):
die("No [%s] section found." % main_section)
logging.basicConfig(
level=getattr(logging, config.get(main_section, "log.level")),
filename=config.get(main_section, "log.file"),
)
# In general, its nice to log "everything", but some of the loggers from
# our dependencies are very very spammy. Here, we silence most of their
# noise:
spammers = [
"bugzilla.base",
"bugzilla.bug",
"requests.packages.urllib3.connectionpool",
]
for spammer in spammers:
logging.getLogger(spammer).setLevel(logging.WARN)
if not config.has_option(main_section, "targets"):
die("No targets= item in [%s] found." % main_section)
targets = config.get(main_section, "targets")
targets = [t for t in [t.strip() for t in targets.split(",")] if len(t)]
if not targets:
die("Empty targets= item in [%s]." % main_section)
for target in targets:
if target not in config.sections():
die("No [%s] section found." % target)
# Validate each target one by one.
for target in targets:
service = config.get(target, "service")
if not service:
die("No 'service' in [%s]" % target)
if not get_service(service):
die("'%s' in [%s] is not a valid service." % (service, target))
# Call the service-specific validator
service = get_service(service)
service_config = ServiceConfig(service.CONFIG_PREFIX, config, target)
service.validate_config(service_config, target)
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def run_hooks(conf, name):
if conf.has_option("hooks", name):
pre_import = aslist(conf.get("hooks", name))
if pre_import is not None:
for hook in pre_import:
exit_code = subprocess.call(hook, shell=True)
if exit_code is not 0:
msg = "Non-zero exit code %d on hook %s" % (exit_code, hook)
log.error(msg)
raise RuntimeError(msg)
|
def run_hooks(conf, name):
if conf.has_option("hooks", name):
pre_import = [t.strip() for t in conf.get("hooks", name).split(",")]
if pre_import is not None:
for hook in pre_import:
exit_code = subprocess.call(hook, shell=True)
if exit_code is not 0:
msg = "Non-zero exit code %d on hook %s" % (exit_code, hook)
log.error(msg)
raise RuntimeError(msg)
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def synchronize(issue_generator, conf, main_section, dry_run=False):
def _bool_option(section, option, default):
try:
return asbool(conf.get(section, option))
except (NoSectionError, NoOptionError):
return default
targets = aslist(conf.get(main_section, "targets"))
services = set([conf.get(target, "service") for target in targets])
key_list = build_key_list(services)
uda_list = build_uda_config_overrides(services)
if uda_list:
log.info(
"Service-defined UDAs exist: you can optionally use the "
"`bugwarrior-uda` command to export a list of UDAs you can "
"add to your taskrc file."
)
static_fields = ["priority"]
if conf.has_option(main_section, "static_fields"):
static_fields = aslist(conf.get(main_section, "static_fields"))
# Before running CRUD operations, call the pre_import hook(s).
run_hooks(conf, "pre_import")
notify = _bool_option("notifications", "notifications", False) and not dry_run
tw = TaskWarriorShellout(
config_filename=get_taskrc_path(conf, main_section),
config_overrides=uda_list,
marshal=True,
)
legacy_matching = _bool_option(main_section, "legacy_matching", False)
merge_annotations = _bool_option(main_section, "merge_annotations", True)
merge_tags = _bool_option(main_section, "merge_tags", True)
issue_updates = {
"new": [],
"existing": [],
"changed": [],
"closed": get_managed_task_uuids(tw, key_list, legacy_matching),
}
for issue in issue_generator:
try:
issue_dict = dict(issue)
# We received this issue from The Internet, but we're not sure what
# kind of encoding the service providers may have handed us. Let's try
# and decode all byte strings from UTF8 off the bat. If we encounter
# other encodings in the wild in the future, we can revise the handling
# here. https://github.com/ralphbean/bugwarrior/issues/350
for key in issue_dict.keys():
if isinstance(issue_dict[key], six.binary_type):
try:
issue_dict[key] = issue_dict[key].decode("utf-8")
except UnicodeDecodeError:
log.warn("Failed to interpret %r as utf-8" % key)
existing_uuid = find_local_uuid(
tw, key_list, issue, legacy_matching=legacy_matching
)
_, task = tw.get_task(uuid=existing_uuid)
# Drop static fields from the upstream issue. We don't want to
# overwrite local changes to fields we declare static.
for field in static_fields:
del issue_dict[field]
# Merge annotations & tags from online into our task object
if merge_annotations:
merge_left("annotations", task, issue_dict, hamming=True)
if merge_tags:
merge_left("tags", task, issue_dict)
issue_dict.pop("annotations", None)
issue_dict.pop("tags", None)
task.update(issue_dict)
if task.get_changes(keep=True):
issue_updates["changed"].append(task)
else:
issue_updates["existing"].append(task)
if existing_uuid in issue_updates["closed"]:
issue_updates["closed"].remove(existing_uuid)
except MultipleMatches as e:
log.exception("Multiple matches: %s", six.text_type(e))
except NotFound:
issue_updates["new"].append(issue_dict)
notreally = " (not really)" if dry_run else ""
# Add new issues
log.info("Adding %i tasks", len(issue_updates["new"]))
for issue in issue_updates["new"]:
log.info("Adding task %s%s", issue["description"], notreally)
if dry_run:
continue
if notify:
send_notification(issue, "Created", conf)
try:
tw.task_add(**issue)
except TaskwarriorError as e:
log.exception("Unable to add task: %s" % e.stderr)
log.info("Updating %i tasks", len(issue_updates["changed"]))
for issue in issue_updates["changed"]:
changes = "; ".join(
[
"{field}: {f} -> {t}".format(field=field, f=repr(ch[0]), t=repr(ch[1]))
for field, ch in six.iteritems(issue.get_changes(keep=True))
]
)
log.info(
"Updating task %s, %s; %s%s",
six.text_type(issue["uuid"]),
issue["description"],
changes,
notreally,
)
if dry_run:
continue
try:
tw.task_update(issue)
except TaskwarriorError as e:
log.exception("Unable to modify task: %s" % e.stderr)
log.info("Closing %i tasks", len(issue_updates["closed"]))
for issue in issue_updates["closed"]:
_, task_info = tw.get_task(uuid=issue)
log.info(
"Completing task %s %s%s",
issue,
task_info.get("description", ""),
notreally,
)
if dry_run:
continue
if notify:
send_notification(task_info, "Completed", conf)
try:
tw.task_done(uuid=issue)
except TaskwarriorError as e:
log.exception("Unable to close task: %s" % e.stderr)
# Send notifications
if notify:
only_on_new_tasks = _bool_option("notifications", "only_on_new_tasks", False)
if (
not only_on_new_tasks
or len(issue_updates["new"])
+ len(issue_updates["changed"])
+ len(issue_updates["closed"])
> 0
):
send_notification(
dict(
description="New: %d, Changed: %d, Completed: %d"
% (
len(issue_updates["new"]),
len(issue_updates["changed"]),
len(issue_updates["closed"]),
)
),
"bw_finished",
conf,
)
|
def synchronize(issue_generator, conf, main_section, dry_run=False):
def _bool_option(section, option, default):
try:
return asbool(conf.get(section, option))
except (NoSectionError, NoOptionError):
return default
targets = [t.strip() for t in conf.get(main_section, "targets").split(",")]
services = set([conf.get(target, "service") for target in targets])
key_list = build_key_list(services)
uda_list = build_uda_config_overrides(services)
if uda_list:
log.info(
"Service-defined UDAs exist: you can optionally use the "
"`bugwarrior-uda` command to export a list of UDAs you can "
"add to your taskrc file."
)
static_fields = ["priority"]
if conf.has_option(main_section, "static_fields"):
static_fields = conf.get(main_section, "static_fields").split(",")
# Before running CRUD operations, call the pre_import hook(s).
run_hooks(conf, "pre_import")
notify = _bool_option("notifications", "notifications", False) and not dry_run
tw = TaskWarriorShellout(
config_filename=get_taskrc_path(conf, main_section),
config_overrides=uda_list,
marshal=True,
)
legacy_matching = _bool_option(main_section, "legacy_matching", False)
merge_annotations = _bool_option(main_section, "merge_annotations", True)
merge_tags = _bool_option(main_section, "merge_tags", True)
issue_updates = {
"new": [],
"existing": [],
"changed": [],
"closed": get_managed_task_uuids(tw, key_list, legacy_matching),
}
for issue in issue_generator:
try:
issue_dict = dict(issue)
# We received this issue from The Internet, but we're not sure what
# kind of encoding the service providers may have handed us. Let's try
# and decode all byte strings from UTF8 off the bat. If we encounter
# other encodings in the wild in the future, we can revise the handling
# here. https://github.com/ralphbean/bugwarrior/issues/350
for key in issue_dict.keys():
if isinstance(issue_dict[key], six.binary_type):
try:
issue_dict[key] = issue_dict[key].decode("utf-8")
except UnicodeDecodeError:
log.warn("Failed to interpret %r as utf-8" % key)
existing_uuid = find_local_uuid(
tw, key_list, issue, legacy_matching=legacy_matching
)
_, task = tw.get_task(uuid=existing_uuid)
# Drop static fields from the upstream issue. We don't want to
# overwrite local changes to fields we declare static.
for field in static_fields:
del issue_dict[field]
# Merge annotations & tags from online into our task object
if merge_annotations:
merge_left("annotations", task, issue_dict, hamming=True)
if merge_tags:
merge_left("tags", task, issue_dict)
issue_dict.pop("annotations", None)
issue_dict.pop("tags", None)
task.update(issue_dict)
if task.get_changes(keep=True):
issue_updates["changed"].append(task)
else:
issue_updates["existing"].append(task)
if existing_uuid in issue_updates["closed"]:
issue_updates["closed"].remove(existing_uuid)
except MultipleMatches as e:
log.exception("Multiple matches: %s", six.text_type(e))
except NotFound:
issue_updates["new"].append(issue_dict)
notreally = " (not really)" if dry_run else ""
# Add new issues
log.info("Adding %i tasks", len(issue_updates["new"]))
for issue in issue_updates["new"]:
log.info("Adding task %s%s", issue["description"], notreally)
if dry_run:
continue
if notify:
send_notification(issue, "Created", conf)
try:
tw.task_add(**issue)
except TaskwarriorError as e:
log.exception("Unable to add task: %s" % e.stderr)
log.info("Updating %i tasks", len(issue_updates["changed"]))
for issue in issue_updates["changed"]:
changes = "; ".join(
[
"{field}: {f} -> {t}".format(field=field, f=repr(ch[0]), t=repr(ch[1]))
for field, ch in six.iteritems(issue.get_changes(keep=True))
]
)
log.info(
"Updating task %s, %s; %s%s",
six.text_type(issue["uuid"]),
issue["description"],
changes,
notreally,
)
if dry_run:
continue
try:
tw.task_update(issue)
except TaskwarriorError as e:
log.exception("Unable to modify task: %s" % e.stderr)
log.info("Closing %i tasks", len(issue_updates["closed"]))
for issue in issue_updates["closed"]:
_, task_info = tw.get_task(uuid=issue)
log.info(
"Completing task %s %s%s",
issue,
task_info.get("description", ""),
notreally,
)
if dry_run:
continue
if notify:
send_notification(task_info, "Completed", conf)
try:
tw.task_done(uuid=issue)
except TaskwarriorError as e:
log.exception("Unable to close task: %s" % e.stderr)
# Send notifications
if notify:
only_on_new_tasks = _bool_option("notifications", "only_on_new_tasks", False)
if (
not only_on_new_tasks
or len(issue_updates["new"])
+ len(issue_updates["changed"])
+ len(issue_updates["closed"])
> 0
):
send_notification(
dict(
description="New: %d, Changed: %d, Completed: %d"
% (
len(issue_updates["new"]),
len(issue_updates["changed"]),
len(issue_updates["closed"]),
)
),
"bw_finished",
conf,
)
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def get_defined_udas_as_strings(conf, main_section):
targets = aslist(conf.get(main_section, "targets"))
services = set([conf.get(target, "service") for target in targets])
uda_list = build_uda_config_overrides(services)
for uda in convert_override_args_to_taskrc_settings(uda_list):
yield uda
|
def get_defined_udas_as_strings(conf, main_section):
targets = [t.strip() for t in conf.get(main_section, "targets").split(",")]
services = set([conf.get(target, "service") for target in targets])
uda_list = build_uda_config_overrides(services)
for uda in convert_override_args_to_taskrc_settings(uda_list):
yield uda
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def __init__(self, main_config, main_section, target):
self.config = ServiceConfig(self.CONFIG_PREFIX, main_config, target)
self.main_section = main_section
self.main_config = main_config
self.target = target
self.desc_len = self._get_config_or_default("description_length", 35, asint)
self.anno_len = self._get_config_or_default("annotation_length", 45, asint)
self.inline_links = self._get_config_or_default("inline_links", True, asbool)
self.annotation_links = self._get_config_or_default(
"annotation_links", not self.inline_links, asbool
)
self.annotation_comments = self._get_config_or_default(
"annotation_comments", True, asbool
)
self.shorten = self._get_config_or_default("shorten", False, asbool)
self.default_priority = self._get_config_or_default("default_priority", "M")
self.add_tags = []
if "add_tags" in self.config:
for raw_option in aslist(self.config.get("add_tags")):
option = raw_option.strip(" +;")
if option:
self.add_tags.append(option)
log.info("Working on [%s]", self.target)
|
def __init__(self, main_config, main_section, target):
self.config = ServiceConfig(self.CONFIG_PREFIX, main_config, target)
self.main_section = main_section
self.main_config = main_config
self.target = target
self.desc_len = self._get_config_or_default("description_length", 35, asint)
self.anno_len = self._get_config_or_default("annotation_length", 45, asint)
self.inline_links = self._get_config_or_default("inline_links", True, asbool)
self.annotation_links = self._get_config_or_default(
"annotation_links", not self.inline_links, asbool
)
self.annotation_comments = self._get_config_or_default(
"annotation_comments", True, asbool
)
self.shorten = self._get_config_or_default("shorten", False, asbool)
self.default_priority = self._get_config_or_default("default_priority", "M")
self.add_tags = []
if "add_tags" in self.config:
for raw_option in self.config.get("add_tags").split(","):
option = raw_option.strip(" +;")
if option:
self.add_tags.append(option)
log.info("Working on [%s]", self.target)
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def aggregate_issues(conf, main_section, debug):
"""Return all issues from every target."""
log.info("Starting to aggregate remote issues.")
# Create and call service objects for every target in the config
targets = aslist(conf.get(main_section, "targets"))
queue = multiprocessing.Queue()
log.info("Spawning %i workers." % len(targets))
processes = []
if debug:
for target in targets:
_aggregate_issues(
conf, main_section, target, queue, conf.get(target, "service")
)
else:
for target in targets:
proc = multiprocessing.Process(
target=_aggregate_issues,
args=(conf, main_section, target, queue, conf.get(target, "service")),
)
proc.start()
processes.append(proc)
# Sleep for 1 second here to try and avoid a race condition where
# all N workers start up and ask the gpg-agent process for
# information at the same time. This causes gpg-agent to fumble
# and tell some of our workers some incomplete things.
time.sleep(1)
currently_running = len(targets)
while currently_running > 0:
issue = queue.get(True)
if isinstance(issue, tuple):
completion_type, args = issue
if completion_type == SERVICE_FINISHED_ERROR:
target, e = args
log.info("Terminating workers")
for process in processes:
process.terminate()
raise RuntimeError("critical error in target '{}'".format(target))
currently_running -= 1
continue
yield issue
log.info("Done aggregating remote issues.")
|
def aggregate_issues(conf, main_section, debug):
"""Return all issues from every target."""
log.info("Starting to aggregate remote issues.")
# Create and call service objects for every target in the config
targets = [t.strip() for t in conf.get(main_section, "targets").split(",")]
queue = multiprocessing.Queue()
log.info("Spawning %i workers." % len(targets))
processes = []
if debug:
for target in targets:
_aggregate_issues(
conf, main_section, target, queue, conf.get(target, "service")
)
else:
for target in targets:
proc = multiprocessing.Process(
target=_aggregate_issues,
args=(conf, main_section, target, queue, conf.get(target, "service")),
)
proc.start()
processes.append(proc)
# Sleep for 1 second here to try and avoid a race condition where
# all N workers start up and ask the gpg-agent process for
# information at the same time. This causes gpg-agent to fumble
# and tell some of our workers some incomplete things.
time.sleep(1)
currently_running = len(targets)
while currently_running > 0:
issue = queue.get(True)
if isinstance(issue, tuple):
completion_type, args = issue
if completion_type == SERVICE_FINISHED_ERROR:
target, e = args
log.info("Terminating workers")
for process in processes:
process.terminate()
raise RuntimeError("critical error in target '{}'".format(target))
currently_running -= 1
continue
yield issue
log.info("Done aggregating remote issues.")
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def __init__(self, *args, **kw):
super(BugzillaService, self).__init__(*args, **kw)
self.base_uri = self.config.get("base_uri")
self.username = self.config.get("username")
self.ignore_cc = self.config.get(
"ignore_cc", default=False, to_type=lambda x: x == "True"
)
self.query_url = self.config.get("query_url", default=None)
self.include_needinfos = self.config.get(
"include_needinfos", False, to_type=lambda x: x == "True"
)
self.open_statuses = self.config.get(
"open_statuses", _open_statuses, to_type=aslist
)
log.debug(" filtering on statuses: %r", self.open_statuses)
# So more modern bugzilla's require that we specify
# query_format=advanced along with the xmlrpc request.
# https://bugzilla.redhat.com/show_bug.cgi?id=825370
# ...but older bugzilla's don't know anything about that argument.
# Here we make it possible for the user to specify whether they want
# to pass that argument or not.
self.advanced = asbool(self.config.get("advanced", "no"))
url = "https://%s/xmlrpc.cgi" % self.base_uri
api_key = self.config.get("api_key", default=None)
if api_key:
try:
self.bz = bugzilla.Bugzilla(url=url, api_key=api_key)
except TypeError:
raise Exception("Bugzilla API keys require python-bugzilla>=2.1.0")
else:
password = self.get_password("password", self.username)
self.bz = bugzilla.Bugzilla(url=url)
self.bz.login(self.username, password)
|
def __init__(self, *args, **kw):
super(BugzillaService, self).__init__(*args, **kw)
self.base_uri = self.config.get("base_uri")
self.username = self.config.get("username")
self.ignore_cc = self.config.get(
"ignore_cc", default=False, to_type=lambda x: x == "True"
)
self.query_url = self.config.get("query_url", default=None)
self.include_needinfos = self.config.get(
"include_needinfos", False, to_type=lambda x: x == "True"
)
self.open_statuses = self.config.get(
"open_statuses", _open_statuses, to_type=lambda x: x.split(",")
)
log.debug(" filtering on statuses: %r", self.open_statuses)
# So more modern bugzilla's require that we specify
# query_format=advanced along with the xmlrpc request.
# https://bugzilla.redhat.com/show_bug.cgi?id=825370
# ...but older bugzilla's don't know anything about that argument.
# Here we make it possible for the user to specify whether they want
# to pass that argument or not.
self.advanced = asbool(self.config.get("advanced", "no"))
url = "https://%s/xmlrpc.cgi" % self.base_uri
api_key = self.config.get("api_key", default=None)
if api_key:
try:
self.bz = bugzilla.Bugzilla(url=url, api_key=api_key)
except TypeError:
raise Exception("Bugzilla API keys require python-bugzilla>=2.1.0")
else:
password = self.get_password("password", self.username)
self.bz = bugzilla.Bugzilla(url=url)
self.bz.login(self.username, password)
|
https://github.com/ralphbean/bugwarrior/issues/490
|
Traceback (most recent call last):
File "/usr/local/bin/bugwarrior-pull", line 11, in <module>
load_entry_point('bugwarrior==1.5.1', 'console_scripts', 'bugwarrior-pull')()
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 700, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 680, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 873, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python2.7/dist-packages/click/core.py", line 508, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/command.py", line 73, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/local/lib/python2.7/dist-packages/bugwarrior-1.5.1-py2.7.egg/bugwarrior/db.py", line 351, in synchronize
del issue_dict[field]
KeyError: u' description'
|
KeyError
|
def synchronize(issue_generator, conf, main_section, dry_run=False):
def _bool_option(section, option, default):
try:
return asbool(conf.get(section, option))
except (NoSectionError, NoOptionError):
return default
targets = [t.strip() for t in conf.get(main_section, "targets").split(",")]
services = set([conf.get(target, "service") for target in targets])
key_list = build_key_list(services)
uda_list = build_uda_config_overrides(services)
if uda_list:
log.info(
"Service-defined UDAs exist: you can optionally use the "
"`bugwarrior-uda` command to export a list of UDAs you can "
"add to your taskrc file."
)
static_fields = ["priority"]
if conf.has_option(main_section, "static_fields"):
static_fields = conf.get(main_section, "static_fields").split(",")
# Before running CRUD operations, call the pre_import hook(s).
run_hooks(conf, "pre_import")
notify = _bool_option("notifications", "notifications", False) and not dry_run
tw = TaskWarriorShellout(
config_filename=get_taskrc_path(conf, main_section),
config_overrides=uda_list,
marshal=True,
)
legacy_matching = _bool_option(main_section, "legacy_matching", False)
merge_annotations = _bool_option(main_section, "merge_annotations", True)
merge_tags = _bool_option(main_section, "merge_tags", True)
issue_updates = {
"new": [],
"existing": [],
"changed": [],
"closed": get_managed_task_uuids(tw, key_list, legacy_matching),
}
for issue in issue_generator:
# We received this issue from The Internet, but we're not sure what
# kind of encoding the service providers may have handed us. Let's try
# and decode all byte strings from UTF8 off the bat. If we encounter
# other encodings in the wild in the future, we can revise the handling
# here. https://github.com/ralphbean/bugwarrior/issues/350
for key in issue.keys():
if isinstance(issue[key], six.binary_type):
try:
issue[key] = issue[key].decode("utf-8")
except UnicodeDecodeError:
log.warn("Failed to interpret %r as utf-8" % key)
try:
existing_uuid = find_local_uuid(
tw, key_list, issue, legacy_matching=legacy_matching
)
issue_dict = dict(issue)
_, task = tw.get_task(uuid=existing_uuid)
# Drop static fields from the upstream issue. We don't want to
# overwrite local changes to fields we declare static.
for field in static_fields:
del issue_dict[field]
# Merge annotations & tags from online into our task object
if merge_annotations:
merge_left("annotations", task, issue_dict, hamming=True)
if merge_tags:
merge_left("tags", task, issue_dict)
issue_dict.pop("annotations", None)
issue_dict.pop("tags", None)
task.update(issue_dict)
if task.get_changes(keep=True):
issue_updates["changed"].append(task)
else:
issue_updates["existing"].append(task)
if existing_uuid in issue_updates["closed"]:
issue_updates["closed"].remove(existing_uuid)
except MultipleMatches as e:
log.exception("Multiple matches: %s", six.text_type(e))
except NotFound:
issue_updates["new"].append(dict(issue))
notreally = " (not really)" if dry_run else ""
# Add new issues
log.info("Adding %i tasks", len(issue_updates["new"]))
for issue in issue_updates["new"]:
log.info("Adding task %s%s", issue["description"], notreally)
if dry_run:
continue
if notify:
send_notification(issue, "Created", conf)
try:
tw.task_add(**issue)
except TaskwarriorError as e:
log.exception("Unable to add task: %s" % e.stderr)
log.info("Updating %i tasks", len(issue_updates["changed"]))
for issue in issue_updates["changed"]:
changes = "; ".join(
[
"{field}: {f} -> {t}".format(field=field, f=repr(ch[0]), t=repr(ch[1]))
for field, ch in six.iteritems(issue.get_changes(keep=True))
]
)
log.info(
"Updating task %s, %s; %s%s",
six.text_type(issue["uuid"]),
issue["description"],
changes,
notreally,
)
if dry_run:
continue
try:
tw.task_update(issue)
except TaskwarriorError as e:
log.exception("Unable to modify task: %s" % e.stderr)
log.info("Closing %i tasks", len(issue_updates["closed"]))
for issue in issue_updates["closed"]:
_, task_info = tw.get_task(uuid=issue)
log.info(
"Completing task %s %s%s",
issue,
task_info.get("description", ""),
notreally,
)
if dry_run:
continue
if notify:
send_notification(task_info, "Completed", conf)
try:
tw.task_done(uuid=issue)
except TaskwarriorError as e:
log.exception("Unable to close task: %s" % e.stderr)
# Send notifications
if notify:
only_on_new_tasks = _bool_option("notifications", "only_on_new_tasks", False)
if (
not only_on_new_tasks
or len(issue_updates["new"])
+ len(issue_updates["changed"])
+ len(issue_updates["closed"])
> 0
):
send_notification(
dict(
description="New: %d, Changed: %d, Completed: %d"
% (
len(issue_updates["new"]),
len(issue_updates["changed"]),
len(issue_updates["closed"]),
)
),
"bw_finished",
conf,
)
|
def synchronize(issue_generator, conf, main_section, dry_run=False):
def _bool_option(section, option, default):
try:
return asbool(conf.get(section, option))
except (NoSectionError, NoOptionError):
return default
targets = [t.strip() for t in conf.get(main_section, "targets").split(",")]
services = set([conf.get(target, "service") for target in targets])
key_list = build_key_list(services)
uda_list = build_uda_config_overrides(services)
if uda_list:
log.info(
"Service-defined UDAs exist: you can optionally use the "
"`bugwarrior-uda` command to export a list of UDAs you can "
"add to your taskrc file."
)
static_fields = ["priority"]
if conf.has_option(main_section, "static_fields"):
static_fields = conf.get(main_section, "static_fields").split(",")
# Before running CRUD operations, call the pre_import hook(s).
run_hooks(conf, "pre_import")
notify = _bool_option("notifications", "notifications", False) and not dry_run
tw = TaskWarriorShellout(
config_filename=get_taskrc_path(conf, main_section),
config_overrides=uda_list,
marshal=True,
)
legacy_matching = _bool_option(main_section, "legacy_matching", False)
merge_annotations = _bool_option(main_section, "merge_annotations", True)
merge_tags = _bool_option(main_section, "merge_tags", True)
issue_updates = {
"new": [],
"existing": [],
"changed": [],
"closed": get_managed_task_uuids(tw, key_list, legacy_matching),
}
for issue in issue_generator:
try:
existing_uuid = find_local_uuid(
tw, key_list, issue, legacy_matching=legacy_matching
)
issue_dict = dict(issue)
_, task = tw.get_task(uuid=existing_uuid)
# Drop static fields from the upstream issue. We don't want to
# overwrite local changes to fields we declare static.
for field in static_fields:
del issue_dict[field]
# Merge annotations & tags from online into our task object
if merge_annotations:
merge_left("annotations", task, issue_dict, hamming=True)
if merge_tags:
merge_left("tags", task, issue_dict)
issue_dict.pop("annotations", None)
issue_dict.pop("tags", None)
task.update(issue_dict)
if task.get_changes(keep=True):
issue_updates["changed"].append(task)
else:
issue_updates["existing"].append(task)
if existing_uuid in issue_updates["closed"]:
issue_updates["closed"].remove(existing_uuid)
except MultipleMatches as e:
log.exception("Multiple matches: %s", six.text_type(e))
except NotFound:
issue_updates["new"].append(dict(issue))
notreally = " (not really)" if dry_run else ""
# Add new issues
log.info("Adding %i tasks", len(issue_updates["new"]))
for issue in issue_updates["new"]:
log.info("Adding task %s%s", issue["description"], notreally)
if dry_run:
continue
if notify:
send_notification(issue, "Created", conf)
try:
tw.task_add(**issue)
except TaskwarriorError as e:
log.exception("Unable to add task: %s" % e.stderr)
log.info("Updating %i tasks", len(issue_updates["changed"]))
for issue in issue_updates["changed"]:
changes = "; ".join(
[
"{field}: {f} -> {t}".format(field=field, f=repr(ch[0]), t=repr(ch[1]))
for field, ch in six.iteritems(issue.get_changes(keep=True))
]
)
log.info(
"Updating task %s, %s; %s%s",
six.text_type(issue["uuid"]),
issue["description"],
changes,
notreally,
)
if dry_run:
continue
try:
tw.task_update(issue)
except TaskwarriorError as e:
log.exception("Unable to modify task: %s" % e.stderr)
log.info("Closing %i tasks", len(issue_updates["closed"]))
for issue in issue_updates["closed"]:
_, task_info = tw.get_task(uuid=issue)
log.info(
"Completing task %s %s%s",
issue,
task_info.get("description", ""),
notreally,
)
if dry_run:
continue
if notify:
send_notification(task_info, "Completed", conf)
try:
tw.task_done(uuid=issue)
except TaskwarriorError as e:
log.exception("Unable to close task: %s" % e.stderr)
# Send notifications
if notify:
only_on_new_tasks = _bool_option("notifications", "only_on_new_tasks", False)
if (
not only_on_new_tasks
or len(issue_updates["new"])
+ len(issue_updates["changed"])
+ len(issue_updates["closed"])
> 0
):
send_notification(
dict(
description="New: %d, Changed: %d, Completed: %d"
% (
len(issue_updates["new"]),
len(issue_updates["changed"]),
len(issue_updates["closed"]),
)
),
"bw_finished",
conf,
)
|
https://github.com/ralphbean/bugwarrior/issues/350
|
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 861, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 734, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 465, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 329, in getMessage
msg = msg % self.args
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc2 in position 37: ordinal not in range(128)
|
UnicodeDecodeError
|
def send_notification(issue, op, conf):
notify_backend = conf.get("notifications", "backend")
if notify_backend == "pynotify":
warnings.warn(
"pynotify is deprecated. Use backend=gobject. "
"See https://github.com/ralphbean/bugwarrior/issues/336"
)
notify_backend = "gobject"
# Notifications for growlnotify on Mac OS X
if notify_backend == "growlnotify":
import gntp.notifier
growl = gntp.notifier.GrowlNotifier(
applicationName="Bugwarrior",
notifications=["New Updates", "New Messages"],
defaultNotifications=["New Messages"],
)
growl.register()
if op == "bw_finished":
growl.notify(
noteType="New Messages",
title="Bugwarrior",
description="Finished querying for new issues.\n%s"
% issue["description"],
sticky=asbool(
conf.get("notifications", "finished_querying_sticky", "True")
),
icon="https://upload.wikimedia.org/wikipedia/"
"en/5/59/Taskwarrior_logo.png",
priority=1,
)
return
message = "%s task: %s" % (op, issue["description"])
metadata = _get_metadata(issue)
if metadata is not None:
message += metadata
growl.notify(
noteType="New Messages",
title="Bugwarrior",
description=message,
sticky=asbool(conf.get("notifications", "task_crud_sticky", "True")),
icon="https://upload.wikimedia.org/wikipedia/en/5/59/Taskwarrior_logo.png",
priority=1,
)
return
elif notify_backend == "gobject":
_cache_logo()
import gi
gi.require_version("Notify", "0.7")
from gi.repository import Notify
Notify.init("bugwarrior")
if op == "bw finished":
message = "Finished querying for new issues.\n%s" % issue["description"]
else:
message = "%s task: %s" % (op, issue["description"])
metadata = _get_metadata(issue)
if metadata is not None:
message += metadata
Notify.Notification.new("Bugwarrior", message, logo_path).show()
|
def send_notification(issue, op, conf):
notify_backend = conf.get("notifications", "backend")
# Notifications for growlnotify on Mac OS X
if notify_backend == "growlnotify":
import gntp.notifier
growl = gntp.notifier.GrowlNotifier(
applicationName="Bugwarrior",
notifications=["New Updates", "New Messages"],
defaultNotifications=["New Messages"],
)
growl.register()
if op == "bw_finished":
growl.notify(
noteType="New Messages",
title="Bugwarrior",
description="Finished querying for new issues.\n%s"
% issue["description"],
sticky=asbool(
conf.get("notifications", "finished_querying_sticky", "True")
),
icon="https://upload.wikimedia.org/wikipedia/"
"en/5/59/Taskwarrior_logo.png",
priority=1,
)
return
message = "%s task: %s" % (op, issue["description"])
metadata = _get_metadata(issue)
if metadata is not None:
message += metadata
growl.notify(
noteType="New Messages",
title="Bugwarrior",
description=message,
sticky=asbool(conf.get("notifications", "task_crud_sticky", "True")),
icon="https://upload.wikimedia.org/wikipedia/en/5/59/Taskwarrior_logo.png",
priority=1,
)
return
elif notify_backend == "pynotify":
_cache_logo()
import pynotify
pynotify.init("bugwarrior")
if op == "bw finished":
message = "Finished querying for new issues.\n%s" % issue["description"]
else:
message = "%s task: %s" % (op, issue["description"])
metadata = _get_metadata(issue)
if metadata is not None:
message += metadata
pynotify.Notification("Bugwarrior", message, logo_path).show()
elif notify_backend == "gobject":
_cache_logo()
import gi
gi.require_version("Notify", "0.7")
from gi.repository import Notify
Notify.init("bugwarrior")
if op == "bw finished":
message = "Finished querying for new issues.\n%s" % issue["description"]
else:
message = "%s task: %s" % (op, issue["description"])
metadata = _get_metadata(issue)
if metadata is not None:
message += metadata
Notify.Notification.new("Bugwarrior", message, logo_path).show()
|
https://github.com/ralphbean/bugwarrior/issues/336
|
Traceback (most recent call last):
File "/usr/lib/python2.7/dist-packages/bugwarrior/command.py", line 56, in pull
synchronize(issue_generator, config, main_section, dry_run)
File "/usr/lib/python2.7/dist-packages/bugwarrior/db.py", line 366, in synchronize
send_notification(issue, 'Created', conf)
File "/usr/lib/python2.7/dist-packages/bugwarrior/notifications.py", line 93, in send_notification
import pynotify
File "/usr/lib/python2.7/dist-packages/gtk-2.0/pynotify/__init__.py", line 1, in <module>
from _pynotify import *
ImportError: could not import gobject (could not find _PyGObject_API object)
|
ImportError
|
def is_local(self):
return self.chain_id not in PUBLIC_CHAINS
|
def is_local(self):
# TODO: #1505 -- rethink this metaphor
return int(self.w3.net.version) not in PUBLIC_CHAINS
|
https://github.com/nucypher/nucypher/issues/2484
|
nucypher | Authenticating Ursula
geth | WARN [12-19|12:41:45.063] Served net_version conn=172.18.0.4:40246 reqid=3 t="12.63µs" err="the method net_version does not exist/is not available"
nucypher | Traceback (most recent call last):
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/signers/base.py", line 65, in from_signer_uri
nucypher | signer_class = cls._SIGNERS[scheme]
nucypher | KeyError: 'http'
nucypher |
nucypher | During handling of the above exception, another exception occurred:
nucypher |
nucypher | Traceback (most recent call last):
nucypher | File "/usr/local/bin/nucypher", line 11, in <module>
nucypher | sys.exit(nucypher_cli())
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 829, in __call__
nucypher | return self.main(*args, **kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 782, in main
nucypher | rv = self.invoke(ctx)
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
nucypher | return _process_result(sub_ctx.command.invoke(sub_ctx))
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1259, in invoke
nucypher | return _process_result(sub_ctx.command.invoke(sub_ctx))
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1066, in invoke
nucypher | return ctx.invoke(self.callback, **ctx.params)
nucypher | File "/usr/local/lib/python3.7/site-packages/click/core.py", line 610, in invoke
nucypher | return callback(*args, **kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/options.py", line 167, in wrapper
nucypher | return func(**kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/options.py", line 167, in wrapper
nucypher | return func(**kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/options.py", line 167, in wrapper
nucypher | return func(**kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/commands/ursula.py", line 384, in run
nucypher | json_ipc=general_config.json_ipc)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/commands/ursula.py", line 287, in create_character
nucypher | start_learning_now=load_seednodes)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/cli/utils.py", line 91, in make_cli_character
nucypher | **config_args)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/config/node.py", line 248, in __call__
nucypher | return self.produce(**character_kwargs)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/config/characters.py", line 109, in produce
nucypher | merged_parameters = self.generate_parameters(**overrides)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/config/node.py", line 336, in generate_parameters
nucypher | merged_parameters = {**self.static_payload(), **self.dynamic_payload, **overrides}
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/config/characters.py", line 104, in dynamic_payload
nucypher | return {**super().dynamic_payload, **payload}
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/config/node.py", line 446, in dynamic_payload
nucypher | signer = Signer.from_signer_uri(self.signer_uri, testnet=testnet)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/signers/base.py", line 71, in from_signer_uri
nucypher | signer = Web3Signer.from_signer_uri(uri=uri)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/signers/software.py", line 55, in from_signer_uri
nucypher | blockchain = BlockchainInterfaceFactory.get_or_create_interface(provider_uri=uri)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/interfaces.py", line 1127, in get_or_create_interface
nucypher | interface = cls.get_interface(provider_uri=provider_uri)
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/interfaces.py", line 1115, in get_interface
nucypher | interface.connect()
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/interfaces.py", line 335, in connect
nucypher | self.attach_middleware()
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/interfaces.py", line 282, in attach_middleware
nucypher | self.log.debug(f'Ethereum chain: {self.client.chain_name} (chain_id={chain_id}, poa={self.poa})')
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/clients.py", line 227, in chain_name
nucypher | chain_inventory = LOCAL_CHAINS if self.is_local else PUBLIC_CHAINS
nucypher | File "/usr/local/lib/python3.7/site-packages/nucypher/blockchain/eth/clients.py", line 477, in is_local
nucypher | return int(self.w3.net.version) not in PUBLIC_CHAINS
nucypher | File "/usr/local/lib/python3.7/site-packages/web3/net.py", line 44, in version
nucypher | return self._version()
nucypher | File "/usr/local/lib/python3.7/site-packages/web3/module.py", line 44, in caller
nucypher | result = w3.manager.request_blocking(method_str, params, error_formatters)
nucypher | File "/usr/local/lib/python3.7/site-packages/web3/manager.py", line 153, in request_blocking
nucypher | raise ValueError(response["error"])
nucypher | ValueError: {'code': -32601, 'message': 'the method net_version does not exist/is not available'}
|
KeyError
|
def get_external_ip_from_default_teacher(
network: str,
federated_only: bool = False,
registry: Optional[BaseContractRegistry] = None,
log: Logger = IP_DETECTION_LOGGER,
) -> Union[str, None]:
# Prevents circular import
from nucypher.characters.lawful import Ursula
if federated_only and registry:
raise ValueError("Federated mode must not be true if registry is provided.")
base_error = "Cannot determine IP using default teacher"
try:
top_teacher_url = RestMiddleware.TEACHER_NODES[network][0]
except IndexError:
log.debug(f'{base_error}: No teacher available for network "{network}".')
return
except KeyError:
log.debug(f'{base_error}: Unknown network "{network}".')
return
####
# TODO: Clean this mess #1481 (Federated Mode)
node_storage = LocalFileBasedNodeStorage(federated_only=federated_only)
Ursula.set_cert_storage_function(node_storage.store_node_certificate)
Ursula.set_federated_mode(federated_only)
#####
try:
teacher = Ursula.from_teacher_uri(
teacher_uri=top_teacher_url, federated_only=federated_only, min_stake=0
) # TODO: Handle customized min stake here.
except NodeSeemsToBeDown:
# Teacher is unreachable. Move on.
return
# TODO: Pass registry here to verify stake (not essential here since it's a hardcoded node)
result = _request_from_node(teacher=teacher)
return result
|
def get_external_ip_from_default_teacher(
network: str,
federated_only: bool = False,
log: Logger = IP_DETECTION_LOGGER,
registry: BaseContractRegistry = None,
) -> Union[str, None]:
from nucypher.characters.lawful import Ursula
if federated_only and registry:
raise ValueError("Federated mode must not be true if registry is provided.")
base_error = "Cannot determine IP using default teacher"
try:
top_teacher_url = RestMiddleware.TEACHER_NODES[network][0]
except IndexError:
log.debug(f'{base_error}: No teacher available for network "{network}".')
return
except KeyError:
log.debug(f'{base_error}: Unknown network "{network}".')
return
####
# TODO: Clean this mess #1481
node_storage = LocalFileBasedNodeStorage(federated_only=federated_only)
Ursula.set_cert_storage_function(node_storage.store_node_certificate)
Ursula.set_federated_mode(federated_only)
#####
teacher = Ursula.from_teacher_uri(
teacher_uri=top_teacher_url, federated_only=federated_only, min_stake=0
) # TODO: Handle customized min stake here.
# TODO: Pass registry here to verify stake (not essential here since it's a hardcoded node)
client = NucypherMiddlewareClient()
try:
response = client.get(
node_or_sprout=teacher, path=f"ping", timeout=2
) # TLS certificate logic within
except RestMiddleware.UnexpectedResponse:
# 404, 405, 500, All server response codes handled by will be caught here.
return # Default teacher does not support this request - just move on.
if response.status_code == 200:
try:
ip = str(ip_address(response.text))
except ValueError:
error = f"Default teacher at {top_teacher_url} returned an invalid IP response; Got {response.text}"
raise UnknownIPAddress(error)
log.info(
f"Fetched external IP address ({ip}) from default teacher ({top_teacher_url})."
)
return ip
else:
log.debug(
f"Failed to get external IP from teacher node ({response.status_code})"
)
|
https://github.com/nucypher/nucypher/issues/2529
|
Verbose mode is enabled
Authenticating Ursula
Qualifying worker
✓ Worker is bonded to 0x559A0408aAf3beAb55E80Bd5E2ea83D233f43F7d
✓ Worker is funded with 0.378943746597798709 ETH
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1466, in __attempt
teacher = cls.from_seed_and_stake_info(seed_uri=teacher_uri,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1507, in from_seed_and_stake_info
certificate = network_middleware.get_certificate(host=host, port=port)
File "/usr/lib/python3.8/site-packages/nucypher/network/middleware.py", line 176, in get_certificate
seednode_certificate = ssl.get_server_certificate(addr=(host, port))
File "/usr/lib/python3.8/ssl.py", line 1483, in get_server_certificate
with create_connection(addr) as sock:
File "/usr/lib/python3.8/socket.py", line 808, in create_connection
raise err
File "/usr/lib/python3.8/socket.py", line 796, in create_connection
sock.connect(sa)
ConnectionRefusedError: [Errno 111] Connection refused
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/nucypher", line 8, in <module>
sys.exit(nucypher_cli())
File "/usr/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/commands/ursula.py", line 390, in run
perform_startup_ip_check(emitter=emitter, ursula=URSULA, force=force)
File "/usr/lib/python3.8/site-packages/nucypher/cli/actions/configure.py", line 157, in perform_startup_ip_check
external_ip = determine_external_ip_address(network=ursula.domain, known_nodes=ursula.known_nodes)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 172, in determine_external_ip_address
rest_host = get_external_ip_from_default_teacher(network=network)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 105, in get_external_ip_from_default_teacher
teacher = Ursula.from_teacher_uri(teacher_uri=top_teacher_url,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1481, in from_teacher_uri
return __attempt()
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1477, in __attempt
return __attempt(attempt=attempt + 1)
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1463, in __attempt
raise ConnectionRefusedError("Host {} Refused Connection".format(teacher_uri))
ConnectionRefusedError: Host https://mainnet.nucypher.network:9151 Refused Connection
|
ConnectionRefusedError
|
def get_external_ip_from_known_nodes(
known_nodes: FleetSensor, sample_size: int = 3, log: Logger = IP_DETECTION_LOGGER
) -> Union[str, None]:
"""
Randomly select a sample of peers to determine the external IP address
of this host. The first node to reply successfully will be used.
# TODO: Parallelize the requests and compare results.
"""
if len(known_nodes) < sample_size:
return # There are too few known nodes
sample = random.sample(list(known_nodes), sample_size)
client = NucypherMiddlewareClient()
for node in sample:
ip = _request_from_node(teacher=node, client=client)
if ip:
log.info(
f"Fetched external IP address ({ip}) from randomly selected known nodes."
)
return ip
|
def get_external_ip_from_known_nodes(
known_nodes: FleetSensor, sample_size: int = 3, log: Logger = IP_DETECTION_LOGGER
) -> Union[str, None]:
"""
Randomly select a sample of peers to determine the external IP address
of this host. The first node to reply successfully will be used.
# TODO: Parallelize the requests and compare results.
"""
ip = None
sample = random.sample(known_nodes, sample_size)
for node in sample:
ip = __request(url=node.rest_url())
if ip:
log.info(
f"Fetched external IP address ({ip}) from randomly selected known node(s)."
)
return ip
|
https://github.com/nucypher/nucypher/issues/2529
|
Verbose mode is enabled
Authenticating Ursula
Qualifying worker
✓ Worker is bonded to 0x559A0408aAf3beAb55E80Bd5E2ea83D233f43F7d
✓ Worker is funded with 0.378943746597798709 ETH
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1466, in __attempt
teacher = cls.from_seed_and_stake_info(seed_uri=teacher_uri,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1507, in from_seed_and_stake_info
certificate = network_middleware.get_certificate(host=host, port=port)
File "/usr/lib/python3.8/site-packages/nucypher/network/middleware.py", line 176, in get_certificate
seednode_certificate = ssl.get_server_certificate(addr=(host, port))
File "/usr/lib/python3.8/ssl.py", line 1483, in get_server_certificate
with create_connection(addr) as sock:
File "/usr/lib/python3.8/socket.py", line 808, in create_connection
raise err
File "/usr/lib/python3.8/socket.py", line 796, in create_connection
sock.connect(sa)
ConnectionRefusedError: [Errno 111] Connection refused
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/nucypher", line 8, in <module>
sys.exit(nucypher_cli())
File "/usr/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/commands/ursula.py", line 390, in run
perform_startup_ip_check(emitter=emitter, ursula=URSULA, force=force)
File "/usr/lib/python3.8/site-packages/nucypher/cli/actions/configure.py", line 157, in perform_startup_ip_check
external_ip = determine_external_ip_address(network=ursula.domain, known_nodes=ursula.known_nodes)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 172, in determine_external_ip_address
rest_host = get_external_ip_from_default_teacher(network=network)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 105, in get_external_ip_from_default_teacher
teacher = Ursula.from_teacher_uri(teacher_uri=top_teacher_url,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1481, in from_teacher_uri
return __attempt()
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1477, in __attempt
return __attempt(attempt=attempt + 1)
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1463, in __attempt
raise ConnectionRefusedError("Host {} Refused Connection".format(teacher_uri))
ConnectionRefusedError: Host https://mainnet.nucypher.network:9151 Refused Connection
|
ConnectionRefusedError
|
def get_external_ip_from_centralized_source(
log: Logger = IP_DETECTION_LOGGER,
) -> Union[str, None]:
"""Use hardcoded URL to determine the external IP address of this host."""
ip = _request(url=CENTRALIZED_IP_ORACLE_URL)
if ip:
log.info(
f"Fetched external IP address ({ip}) from centralized source ({CENTRALIZED_IP_ORACLE_URL})."
)
return ip
|
def get_external_ip_from_centralized_source(
log: Logger = IP_DETECTION_LOGGER,
) -> Union[str, None]:
"""Use hardcoded URL to determine the external IP address of this host."""
endpoint = "https://ifconfig.me/"
ip = __request(url=endpoint)
if ip:
log.info(
f"Fetched external IP address ({ip}) from centralized source ({endpoint})."
)
return ip
|
https://github.com/nucypher/nucypher/issues/2529
|
Verbose mode is enabled
Authenticating Ursula
Qualifying worker
✓ Worker is bonded to 0x559A0408aAf3beAb55E80Bd5E2ea83D233f43F7d
✓ Worker is funded with 0.378943746597798709 ETH
Traceback (most recent call last):
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1466, in __attempt
teacher = cls.from_seed_and_stake_info(seed_uri=teacher_uri,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1507, in from_seed_and_stake_info
certificate = network_middleware.get_certificate(host=host, port=port)
File "/usr/lib/python3.8/site-packages/nucypher/network/middleware.py", line 176, in get_certificate
seednode_certificate = ssl.get_server_certificate(addr=(host, port))
File "/usr/lib/python3.8/ssl.py", line 1483, in get_server_certificate
with create_connection(addr) as sock:
File "/usr/lib/python3.8/socket.py", line 808, in create_connection
raise err
File "/usr/lib/python3.8/socket.py", line 796, in create_connection
sock.connect(sa)
ConnectionRefusedError: [Errno 111] Connection refused
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/bin/nucypher", line 8, in <module>
sys.exit(nucypher_cli())
File "/usr/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/options.py", line 169, in wrapper
return func(**kwargs)
File "/usr/lib/python3.8/site-packages/nucypher/cli/commands/ursula.py", line 390, in run
perform_startup_ip_check(emitter=emitter, ursula=URSULA, force=force)
File "/usr/lib/python3.8/site-packages/nucypher/cli/actions/configure.py", line 157, in perform_startup_ip_check
external_ip = determine_external_ip_address(network=ursula.domain, known_nodes=ursula.known_nodes)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 172, in determine_external_ip_address
rest_host = get_external_ip_from_default_teacher(network=network)
File "/usr/lib/python3.8/site-packages/nucypher/utilities/networking.py", line 105, in get_external_ip_from_default_teacher
teacher = Ursula.from_teacher_uri(teacher_uri=top_teacher_url,
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1481, in from_teacher_uri
return __attempt()
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1477, in __attempt
return __attempt(attempt=attempt + 1)
File "/usr/lib/python3.8/site-packages/nucypher/characters/lawful.py", line 1463, in __attempt
raise ConnectionRefusedError("Host {} Refused Connection".format(teacher_uri))
ConnectionRefusedError: Host https://mainnet.nucypher.network:9151 Refused Connection
|
ConnectionRefusedError
|
def up(
general_config,
staker_options,
config_file,
cloudprovider,
aws_profile,
remote_provider,
nucypher_image,
seed_network,
stakes,
wipe,
prometheus,
namespace,
envvars,
):
"""Creates workers for all stakes owned by the user for the given network."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use this command. (Please run 'pip install ansible'.)",
color="red",
)
return
STAKEHOLDER = staker_options.create_character(emitter, config_file)
stakers = STAKEHOLDER.get_stakers()
if not stakers:
emitter.echo("No staking accounts found.")
return
staker_addresses = filter_staker_addresses(stakers, stakes)
config_file = config_file or StakeHolderConfiguration.default_filepath()
deployer = CloudDeployers.get_deployer(cloudprovider)(
emitter,
STAKEHOLDER,
config_file,
remote_provider,
nucypher_image,
seed_network,
aws_profile,
prometheus,
namespace=namespace,
network=STAKEHOLDER.network,
envvars=envvars,
)
if staker_addresses:
config = deployer.create_nodes(staker_addresses)
if config.get("instances") and len(config.get("instances")) >= len(
staker_addresses
):
emitter.echo("Nodes exist for all requested stakes", color="yellow")
deployer.deploy_nucypher_on_existing_nodes(staker_addresses, wipe_nucypher=wipe)
|
def up(
general_config,
staker_options,
config_file,
cloudprovider,
aws_profile,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
stakes,
wipe,
prometheus,
namespace,
):
"""Creates workers for all stakes owned by the user for the given network."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use this command. (Please run 'pip install ansible'.)",
color="red",
)
return
STAKEHOLDER = staker_options.create_character(emitter, config_file)
stakers = STAKEHOLDER.get_stakers()
if not stakers:
emitter.echo("No staking accounts found.")
return
staker_addresses = filter_staker_addresses(stakers, stakes)
config_file = config_file or StakeHolderConfiguration.default_filepath()
deployer = CloudDeployers.get_deployer(cloudprovider)(
emitter,
STAKEHOLDER,
config_file,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
aws_profile,
prometheus,
namespace=namespace,
network=STAKEHOLDER.network,
)
if staker_addresses:
config = deployer.create_nodes(staker_addresses)
if config.get("instances") and len(config.get("instances")) >= len(
staker_addresses
):
emitter.echo("Nodes exist for all requested stakes", color="yellow")
deployer.deploy_nucypher_on_existing_nodes(staker_addresses, wipe_nucypher=wipe)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def create(
general_config,
cloudprovider,
aws_profile,
remote_provider,
nucypher_image,
seed_network,
prometheus,
count,
namespace,
network,
envvars,
):
"""Creates the required number of workers to be staked later under a namespace"""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use this command. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer(cloudprovider)(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
aws_profile,
prometheus,
namespace=namespace,
network=network,
envvars=envvars,
)
names = []
i = 1
while len(names) < count:
name = f"{namespace}-{network}-{i}"
if name not in deployer.config.get("instances", {}):
names.append(name)
i += 1
config = deployer.create_nodes(names, unstaked=True)
if config.get("instances") and len(config.get("instances")) >= count:
emitter.echo("The requested number of nodes now exist", color="green")
deployer.deploy_nucypher_on_existing_nodes(names)
|
def create(
general_config,
cloudprovider,
aws_profile,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
prometheus,
count,
namespace,
network,
):
"""Creates the required number of workers to be staked later under a namespace"""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use this command. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer(cloudprovider)(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
aws_profile,
prometheus,
namespace=namespace,
network=network,
)
if not namespace:
emitter.echo(
"A namespace is required. Choose something to help differentiate between hosts, such as their specific purpose, or even just today's date.",
color="red",
)
return
names = []
i = 1
while len(names) < count:
name = f"{namespace}-{network}-{i}"
if name not in deployer.config.get("instances", {}):
names.append(name)
i += 1
config = deployer.create_nodes(names, unstaked=True)
if config.get("instances") and len(config.get("instances")) >= count:
emitter.echo("The requested number of nodes now exist", color="green")
deployer.deploy_nucypher_on_existing_nodes(names)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def add(
general_config,
host_address,
login_name,
key_path,
ssh_port,
host_nickname,
namespace,
network,
):
"""Adds an existing node to the local config for future management."""
emitter = setup_emitter(general_config)
name = host_nickname
deployer = CloudDeployers.get_deployer("generic")(
emitter, None, None, namespace=namespace, network=network, action="add"
)
config = deployer.create_nodes([name], host_address, login_name, key_path, ssh_port)
emitter.echo(
f"Success. Now run `nucypher cloudworkers deploy --namespace {namespace} --remote-provider <an eth provider>` to deploy Nucypher on this node.",
color="green",
)
|
def add(
general_config,
host_address,
login_name,
key_path,
ssh_port,
host_nickname,
namespace,
network,
):
"""Adds an existing node to the local config for future management."""
emitter = setup_emitter(general_config)
name = f"{namespace}-{network}-{host_nickname}"
deployer = CloudDeployers.get_deployer("generic")(
emitter, None, None, namespace=namespace, network=network, action="add"
)
config = deployer.create_nodes([name], host_address, login_name, key_path, ssh_port)
emitter.echo(
f"Success. Now run `nucypher cloudworkers deploy --namespace {namespace} --remote-provider <an eth provider>` to deploy Nucypher on this node.",
color="green",
)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def add_for_stake(
general_config,
staker_options,
config_file,
staker_address,
host_address,
login_name,
key_path,
ssh_port,
namespace,
):
"""Sets an existing node as the host for the given staker address."""
emitter = setup_emitter(general_config)
STAKEHOLDER = staker_options.create_character(emitter, config_file)
stakers = STAKEHOLDER.get_stakers()
if not stakers:
emitter.echo("No staking accounts found.")
return
staker_addresses = filter_staker_addresses(stakers, [staker_address])
if not staker_addresses:
emitter.echo(
f"Could not find staker address: {staker_address} among your stakes. (try `nucypher stake --list`)",
color="red",
)
return
config_file = config_file or StakeHolderConfiguration.default_filepath()
deployer = CloudDeployers.get_deployer("generic")(
emitter,
STAKEHOLDER,
config_file,
namespace=namespace,
network=STAKEHOLDER.network,
action="add_for_stake",
)
config = deployer.create_nodes(
staker_addresses, host_address, login_name, key_path, ssh_port
)
|
def add_for_stake(
general_config,
staker_address,
host_address,
login_name,
key_path,
ssh_port,
namespace,
):
"""Sets an existing node as the host for the given staker address."""
emitter = setup_emitter(general_config)
STAKEHOLDER = staker_options.create_character(emitter, config_file)
stakers = STAKEHOLDER.get_stakers()
if not stakers:
emitter.echo("No staking accounts found.")
return
staker_addresses = filter_staker_addresses(stakers, [staker_address])
if not staker_addresses:
emitter.echo(
f"Could not find staker address: {staker_address} among your stakes. (try `nucypher stake --list`)",
color="red",
)
return
config_file = config_file or StakeHolderConfiguration.default_filepath()
deployer = CloudDeployers.get_deployer("generic")(
emitter,
STAKEHOLDER,
config_file,
namespace=namespace,
network=STAKEHOLDER.network,
action="add_for_stake",
)
config = deployer.create_nodes(
staker_addresses, host_address, login_name, key_path, ssh_port
)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def deploy(
general_config,
remote_provider,
nucypher_image,
seed_network,
wipe,
prometheus,
namespace,
network,
gas_strategy,
include_hosts,
envvars,
):
"""Deploys NuCypher on managed hosts."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer("generic")(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
prometheus=prometheus,
namespace=namespace,
network=network,
gas_strategy=gas_strategy,
envvars=envvars,
)
hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
for name, hostdata in [
(n, d) for n, d in deployer.config["instances"].items() if n in hostnames
]:
emitter.echo(f"\t{name}: {hostdata['publicaddress']}", color="yellow")
deployer.deploy_nucypher_on_existing_nodes(hostnames, wipe_nucypher=wipe)
|
def deploy(
general_config,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
wipe,
prometheus,
namespace,
network,
gas_strategy,
include_hosts,
):
"""Deploys NuCypher on managed hosts."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer("generic")(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
prometheus=prometheus,
namespace=namespace,
network=network,
gas_strategy=gas_strategy,
)
hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
for name, hostdata in [
(n, d) for n, d in deployer.config["instances"].items() if n in hostnames
]:
emitter.echo(f"\t{name}: {hostdata['publicaddress']}", color="yellow")
deployer.deploy_nucypher_on_existing_nodes(hostnames, wipe_nucypher=wipe)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def update(
general_config,
remote_provider,
nucypher_image,
seed_network,
wipe,
prometheus,
namespace,
network,
gas_strategy,
include_hosts,
envvars,
):
"""Updates existing installations of Nucypher on existing managed remote hosts."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer("generic")(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
prometheus=prometheus,
namespace=namespace,
network=network,
gas_strategy=gas_strategy,
envvars=envvars,
)
emitter.echo(f"updating the following existing hosts:")
hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
for name, hostdata in [
(n, d) for n, d in deployer.config["instances"].items() if n in hostnames
]:
emitter.echo(f"\t{name}: {hostdata['publicaddress']}", color="yellow")
deployer.update_nucypher_on_existing_nodes(hostnames)
|
def update(
general_config,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
wipe,
prometheus,
namespace,
network,
gas_strategy,
include_hosts,
):
"""Updates existing installations of Nucypher on existing managed remote hosts."""
emitter = setup_emitter(general_config)
if not CloudDeployers:
emitter.echo(
"Ansible is required to use `nucypher cloudworkers *` commands. (Please run 'pip install ansible'.)",
color="red",
)
return
deployer = CloudDeployers.get_deployer("generic")(
emitter,
None,
None,
remote_provider,
nucypher_image,
seed_network,
sentry_dsn,
prometheus=prometheus,
namespace=namespace,
network=network,
gas_strategy=gas_strategy,
)
emitter.echo(f"found deploying {nucypher_image} on the following existing hosts:")
hostnames = deployer.config["instances"].keys()
if include_hosts:
hostnames = include_hosts
for name, hostdata in [
(n, d) for n, d in deployer.config["instances"].items() if n in hostnames
]:
emitter.echo(f"\t{name}: {hostdata['publicaddress']}", color="yellow")
deployer.update_nucypher_on_existing_nodes(hostnames)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def __init__(
self,
emitter,
stakeholder,
stakeholder_config_path,
blockchain_provider=None,
nucypher_image=None,
seed_network=False,
profile=None,
prometheus=False,
pre_config=False,
network=None,
namespace=None,
gas_strategy=None,
action=None,
envvars=None,
):
self.emitter = emitter
self.stakeholder = stakeholder
self.network = network
self.namespace = namespace or "local-stakeholders"
self.action = action
self.envvars = envvars or []
if self.envvars:
if not all([(len(v.split("=")) == 2) for v in self.envvars]):
raise ValueError(
"Improperly specified environment variables: --env variables must be specified in pairs as `<name>=<value>`"
)
self.envvars = [v.split("=") for v in (self.envvars)]
self.config_filename = f"{self.network}-{self.namespace}.json"
self.created_new_nodes = False
# the keys in this dict are used as search patterns by the anisble result collector and it will return
# these values for each node if it happens upon them in some output
self.output_capture = {
"worker address": [],
"rest url": [],
"nucypher version": [],
"nickname": [],
}
if pre_config:
self.config = pre_config
self.namespace_network = self.config.get("namespace")
return
# where we save our state data so we can remember the resources we created for future use
self.config_path = os.path.join(
self.network_config_path, self.namespace, self.config_filename
)
self.config_dir = os.path.dirname(self.config_path)
if os.path.exists(self.config_path):
self.config = json.load(open(self.config_path))
self.namespace_network = self.config["namespace"]
else:
self.namespace_network = (
f"{self.network}-{self.namespace}-{maya.now().date.isoformat()}"
)
self.emitter.echo(
f"Configuring Cloud Deployer with namespace: '{self.namespace_network}'"
)
time.sleep(3)
self.config = {
"namespace": self.namespace_network,
"keyringpassword": b64encode(os.urandom(64)).decode("utf-8"),
"ethpassword": b64encode(os.urandom(64)).decode("utf-8"),
}
# configure provider specific attributes
self._configure_provider_params(profile)
# if certain config options have been specified with this invocation,
# save these to update host specific variables before deployment
# to allow for individual host config differentiation
self.host_level_overrides = {
"blockchain_provider": blockchain_provider,
"nucypher_image": nucypher_image,
"gas_strategy": f"--gas-strategy {gas_strategy}" if gas_strategy else "",
}
self.config["blockchain_provider"] = (
blockchain_provider
or self.config.get("blockchain_provider")
or f"/root/.local/share/geth/.ethereum/{self.chain_name}/geth.ipc"
) # the default for nodes that run their own geth container
self.config["nucypher_image"] = (
nucypher_image
or self.config.get("nucypher_image")
or "nucypher/nucypher:latest"
)
self.config["gas_strategy"] = (
f"--gas-strategy {gas_strategy}"
if gas_strategy
else self.config.get("gas-strategy", "")
)
self.config["seed_network"] = (
seed_network if seed_network is not None else self.config.get("seed_network")
)
if not self.config["seed_network"]:
self.config.pop("seed_node", None)
self.nodes_are_decentralized = "geth.ipc" in self.config["blockchain_provider"]
self.config["stakeholder_config_file"] = stakeholder_config_path
self.config["use-prometheus"] = prometheus
# add instance key as host_nickname for use in inventory
if self.config.get("instances"):
for k, v in self.config["instances"].items():
self.config["instances"][k]["host_nickname"] = k
self._write_config()
|
def __init__(
self,
emitter,
stakeholder,
stakeholder_config_path,
blockchain_provider=None,
nucypher_image=None,
seed_network=False,
sentry_dsn=None,
profile=None,
prometheus=False,
pre_config=False,
network=None,
namespace=None,
gas_strategy=None,
action=None,
):
self.emitter = emitter
self.stakeholder = stakeholder
self.network = network
self.namespace = namespace or "local-stakeholders"
self.action = action
self.config_filename = f"{self.network}-{self.namespace}.json"
self.created_new_nodes = False
# the keys in this dict are used as search patterns by the anisble result collector and it will return
# these values for each node if it happens upon them in some output
self.output_capture = {
"worker address": [],
"rest url": [],
"nucypher version": [],
"nickname": [],
}
if pre_config:
self.config = pre_config
self.namespace_network = self.config.get("namespace")
return
# where we save our state data so we can remember the resources we created for future use
self.config_path = os.path.join(
self.network_config_path, self.namespace, self.config_filename
)
self.config_dir = os.path.dirname(self.config_path)
if os.path.exists(self.config_path):
self.config = json.load(open(self.config_path))
self.namespace_network = self.config["namespace"]
else:
self.namespace_network = (
f"{self.network}-{self.namespace}-{maya.now().date.isoformat()}"
)
self.emitter.echo(
f"Configuring Cloud Deployer with namespace: '{self.namespace_network}'"
)
time.sleep(3)
self.config = {
"namespace": self.namespace_network,
"keyringpassword": b64encode(os.urandom(64)).decode("utf-8"),
"ethpassword": b64encode(os.urandom(64)).decode("utf-8"),
}
# configure provider specific attributes
self._configure_provider_params(profile)
# if certain config options have been specified with this invocation,
# save these to update host specific variables before deployment
# to allow for individual host config differentiation
self.host_level_overrides = {
"blockchain_provider": blockchain_provider,
"nucypher_image": nucypher_image,
"sentry_dsn": sentry_dsn,
"gas_strategy": f"--gas-strategy {gas_strategy}" if gas_strategy else "",
}
self.config["blockchain_provider"] = (
blockchain_provider
or self.config.get("blockchain_provider")
or f"/root/.local/share/geth/.ethereum/{self.chain_name}/geth.ipc"
) # the default for nodes that run their own geth container
self.config["nucypher_image"] = (
nucypher_image
or self.config.get("nucypher_image")
or "nucypher/nucypher:latest"
)
self.config["sentry_dsn"] = sentry_dsn or self.config.get("sentry_dsn")
self.config["gas_strategy"] = (
f"--gas-strategy {gas_strategy}"
if gas_strategy
else self.config.get("gas-strategy", "")
)
self.config["seed_network"] = (
seed_network if seed_network is not None else self.config.get("seed_network")
)
if not self.config["seed_network"]:
self.config.pop("seed_node", None)
self.nodes_are_decentralized = "geth.ipc" in self.config["blockchain_provider"]
self.config["stakeholder_config_file"] = stakeholder_config_path
self.config["use-prometheus"] = prometheus
# add instance key as host_nickname for use in inventory
if self.config.get("instances"):
for k, v in self.config["instances"].items():
self.config["instances"][k]["host_nickname"] = k
self._write_config()
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def network_config_path(self):
return Path(DEFAULT_CONFIG_ROOT).joinpath(NODE_CONFIG_STORAGE_KEY, self.network)
|
def network_config_path(self):
return os.path.join(DEFAULT_CONFIG_ROOT, NODE_CONFIG_STORAGE_KEY, self.network)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def inventory_path(self):
return str(
Path(DEFAULT_CONFIG_ROOT).joinpath(
NODE_CONFIG_STORAGE_KEY, f"{self.namespace_network}.ansible_inventory.yml"
)
)
|
def inventory_path(self):
return os.path.join(
DEFAULT_CONFIG_ROOT,
NODE_CONFIG_STORAGE_KEY,
f"{self.namespace_network}.ansible_inventory.yml",
)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def deploy_nucypher_on_existing_nodes(self, node_names, wipe_nucypher=False):
playbook = Path(DEPLOY_DIR).joinpath("ansible/worker/setup_remote_workers.yml")
# first update any specified input in our node config
for k, input_specified_value in self.host_level_overrides.items():
for node_name in node_names:
if self.config["instances"].get(node_name):
# if an instance already has a specified value, we only override
# it if that value was input for this command invocation
if input_specified_value:
self.config["instances"][node_name][k] = input_specified_value
elif not self.config["instances"][node_name].get(k):
self.config["instances"][node_name][k] = self.config[k]
self._write_config()
if self.created_new_nodes:
self.emitter.echo("--- Giving newly created nodes some time to get ready ----")
time.sleep(30)
self.emitter.echo(
"Running ansible deployment for all running nodes.", color="green"
)
if self.config.get("seed_network") is True and not self.config.get("seed_node"):
self.config["seed_node"] = list(self.config["instances"].values())[0][
"publicaddress"
]
self._write_config()
self.update_generate_inventory(node_names, wipe_nucypher=wipe_nucypher)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
def deploy_nucypher_on_existing_nodes(self, node_names, wipe_nucypher=False):
playbook = "deploy/ansible/worker/setup_remote_workers.yml"
# first update any specified input in our node config
for k, input_specified_value in self.host_level_overrides.items():
for node_name in node_names:
if self.config["instances"].get(node_name):
# if an instance already has a specified value, we only override
# it if that value was input for this command invocation
if input_specified_value:
self.config["instances"][node_name][k] = input_specified_value
elif not self.config["instances"][node_name].get(k):
self.config["instances"][node_name][k] = self.config[k]
self._write_config()
if self.created_new_nodes:
self.emitter.echo("--- Giving newly created nodes some time to get ready ----")
time.sleep(30)
self.emitter.echo(
"Running ansible deployment for all running nodes.", color="green"
)
if self.config.get("seed_network") is True and not self.config.get("seed_node"):
self.config["seed_node"] = list(self.config["instances"].values())[0][
"publicaddress"
]
self._write_config()
self.generate_ansible_inventory(node_names, wipe_nucypher=wipe_nucypher)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def update_nucypher_on_existing_nodes(self, node_names):
playbook = Path(DEPLOY_DIR).joinpath("ansible/worker/update_remote_workers.yml")
# first update any specified input in our node config
for k, input_specified_value in self.host_level_overrides.items():
for node_name in node_names:
if self.config["instances"].get(node_name):
# if an instance already has a specified value, we only override
# it if that value was input for this command invocation
if input_specified_value:
self.config["instances"][node_name][k] = input_specified_value
elif not self.config["instances"][node_name].get(k):
self.config["instances"][node_name][k] = self.config[k]
self._write_config()
if self.config.get("seed_network") is True and not self.config.get("seed_node"):
self.config["seed_node"] = list(self.config["instances"].values())[0][
"publicaddress"
]
self._write_config()
self.update_generate_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
def update_nucypher_on_existing_nodes(self, node_names):
playbook = "deploy/ansible/worker/update_remote_workers.yml"
# first update any specified input in our node config
for k, input_specified_value in self.host_level_overrides.items():
for node_name in node_names:
if self.config["instances"].get(node_name):
# if an instance already has a specified value, we only override
# it if that value was input for this command invocation
if input_specified_value:
self.config["instances"][node_name][k] = input_specified_value
elif not self.config["instances"][node_name].get(k):
self.config["instances"][node_name][k] = self.config[k]
self._write_config()
if self.config.get("seed_network") is True and not self.config.get("seed_node"):
self.config["seed_node"] = list(self.config["instances"].values())[0][
"publicaddress"
]
self._write_config()
self.generate_ansible_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def get_worker_status(self, node_names):
playbook = Path(DEPLOY_DIR).joinpath("ansible/worker/get_workers_status.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter,
return_results=self.output_capture,
filter_output=["Print Ursula Status Data", "Print Last Log Line"],
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, playbook=playbook)
|
def get_worker_status(self, node_names):
playbook = "deploy/ansible/worker/get_workers_status.yml"
self.generate_ansible_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter,
return_results=self.output_capture,
filter_output=["Print Ursula Status Data", "Print Last Log Line"],
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def print_worker_logs(self, node_names):
playbook = Path(DEPLOY_DIR).joinpath("ansible/worker/get_worker_logs.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, playbook=playbook)
|
def print_worker_logs(self, node_names):
playbook = "deploy/ansible/worker/get_worker_logs.yml"
self.generate_ansible_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.update_captured_instance_data(self.output_capture)
self.give_helpful_hints(node_names, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def backup_remote_data(self, node_names):
playbook = Path(DEPLOY_DIR).joinpath("ansible/worker/backup_remote_workers.yml")
self.update_generate_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
def backup_remote_data(self, node_names):
playbook = "deploy/ansible/worker/backup_remote_workers.yml"
self.generate_ansible_inventory(node_names)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.give_helpful_hints(node_names, backup=True, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def restore_from_backup(self, target_host, source_path):
playbook = Path(DEPLOY_DIR).joinpath(
"ansible/worker/restore_ursula_from_backup.yml"
)
self.update_generate_inventory([target_host], restore_path=source_path)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.give_helpful_hints([target_host], backup=True, playbook=playbook)
|
def restore_from_backup(self, target_host, source_path):
playbook = "deploy/ansible/worker/restore_ursula_from_backup.yml"
self.generate_ansible_inventory([target_host], restore_path=source_path)
loader = DataLoader()
inventory = InventoryManager(loader=loader, sources=self.inventory_path)
callback = AnsiblePlayBookResultsCollector(
sock=self.emitter, return_results=self.output_capture
)
variable_manager = VariableManager(loader=loader, inventory=inventory)
executor = PlaybookExecutor(
playbooks=[playbook],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
passwords=dict(),
)
executor._tqm._stdout_callback = callback
executor.run()
self.give_helpful_hints([target_host], backup=True, playbook=playbook)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def give_helpful_hints(self, node_names, backup=False, playbook=None):
self.emitter.echo("some relevant info:")
self.emitter.echo(f' config file: "{self.config_path}"')
self.emitter.echo(f" inventory file: {self.inventory_path}", color="yellow")
if self.config.get("keypair_path"):
self.emitter.echo(
f" keypair file: {self.config['keypair_path']}", color="yellow"
)
if playbook:
self.emitter.echo(
" If you like, you can run the same playbook directly in ansible with the following:"
)
self.emitter.echo(f'\tansible-playbook -i "{self.inventory_path}" "{playbook}"')
self.emitter.echo(" You may wish to ssh into your running hosts:")
for node_name, host_data in [h for h in self.get_all_hosts() if h[0] in node_names]:
dep = CloudDeployers.get_deployer(host_data["provider"])(
self.emitter,
self.stakeholder,
self.config["stakeholder_config_file"],
pre_config=self.config,
namespace=self.namespace,
network=self.network,
)
self.emitter.echo(f"\t{dep.format_ssh_cmd(host_data)}", color="yellow")
if backup:
self.emitter.echo(
" *** Local backups containing sensitive data may have been created. ***",
color="red",
)
self.emitter.echo(
f" Backup data can be found here: {self.config_dir}/remote_worker_backups/"
)
|
def give_helpful_hints(self, node_names, backup=False, playbook=None):
self.emitter.echo("some relevant info:")
self.emitter.echo(f' config file: "{self.config_path}"')
self.emitter.echo(f" inventory file: {self.inventory_path}", color="yellow")
if self.config.get("keypair_path"):
self.emitter.echo(
f" keypair file: {self.config['keypair_path']}", color="yellow"
)
if playbook:
self.emitter.echo(
" If you like, you can run the same playbook directly in ansible with the following:"
)
self.emitter.echo(f'\tansible-playbook -i "{self.inventory_path}" "{playbook}"')
self.emitter.echo(" You may wish to ssh into your running hosts:")
for node_name, host_data in [h for h in self.get_all_hosts() if h[0] in node_names]:
dep = CloudDeployers.get_deployer(host_data["provider"])(
self.emitter,
self.stakeholder,
self.config["stakeholder_config_file"],
pre_config=self.config,
namespace=self.namespace,
network=self.network,
)
self.emitter.echo(f"\t{dep.format_ssh_cmd(host_data)}", color="yellow")
if backup:
self.emitter.echo(
" *** Local backups containing sensitive data have been created. ***",
color="red",
)
self.emitter.echo(
f" Backup data can be found here: {self.config_dir}/remote_worker_backups/"
)
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def _create_keypair(self):
new_keypair_data = self.ec2Client.create_key_pair(
KeyName=f"{self.namespace_network}"
)
outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(
NODE_CONFIG_STORAGE_KEY, f"{self.namespace_network}.awskeypair"
)
os.makedirs(os.path.dirname(outpath), exist_ok=True)
with open(outpath, "w") as outfile:
outfile.write(new_keypair_data["KeyMaterial"])
# set local keypair permissions https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
os.chmod(outpath, 0o400)
self.emitter.echo(
f"a new aws keypair was saved to {outpath}, keep it safe.", color="yellow"
)
return new_keypair_data["KeyName"], outpath
|
def _create_keypair(self):
new_keypair_data = self.ec2Client.create_key_pair(
KeyName=f"{self.namespace_network}"
)
outpath = os.path.join(
DEFAULT_CONFIG_ROOT,
NODE_CONFIG_STORAGE_KEY,
f"{self.namespace_network}.awskeypair",
)
os.makedirs(os.path.dirname(outpath), exist_ok=True)
with open(outpath, "w") as outfile:
outfile.write(new_keypair_data["KeyMaterial"])
# set local keypair permissions https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
os.chmod(outpath, 0o400)
self.emitter.echo(
f"a new aws keypair was saved to {outpath}, keep it safe.", color="yellow"
)
return new_keypair_data["KeyName"], outpath
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def _delete_keypair(self):
# only use self.namespace here to avoid accidental deletions of pre-existing keypairs
deleted_keypair_data = self.ec2Client.delete_key_pair(
KeyName=f"{self.namespace_network}"
)
if deleted_keypair_data["HTTPStatusCode"] == 200:
outpath = Path(DEFAULT_CONFIG_ROOT).joinpath(
NODE_CONFIG_STORAGE_KEY, f"{self.namespace_network}.awskeypair"
)
os.remove(outpath)
self.emitter.echo(f"keypair at {outpath}, was deleted", color="yellow")
|
def _delete_keypair(self):
# only use self.namespace here to avoid accidental deletions of pre-existing keypairs
deleted_keypair_data = self.ec2Client.delete_key_pair(
KeyName=f"{self.namespace_network}"
)
if deleted_keypair_data["HTTPStatusCode"] == 200:
outpath = os.path.join(
DEFAULT_CONFIG_ROOT,
NODE_CONFIG_STORAGE_KEY,
f"{self.namespace_network}.awskeypair",
)
os.remove(outpath)
self.emitter.echo(f"keypair at {outpath}, was deleted", color="yellow")
|
https://github.com/nucypher/nucypher/issues/2473
|
Traceback (most recent call last):
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/k/.local/share/virtualenvs/nucypher-iNjwDRsw/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
File "/home/k/Git/nucypher/nucypher/cli/options.py", line 167, in wrapper
return func(**kwargs)
TypeError: add_for_stake() got an unexpected keyword argument 'config_file'
|
TypeError
|
def escape_format_string(cls, string):
"""
Escapes all curly braces from a PEP-3101's format string.
"""
escaped_string = string.replace("{", "{{").replace("}", "}}")
return escaped_string
|
def escape_format_string(cls, string):
"""
Escapes curly braces from a PEP-3101's format string when there's a sequence of odd length
"""
def escape_group_of_curly_braces(match):
curlies = match.group()
if len(curlies) % 2 == 1:
curlies += curlies
return curlies
escaped_string = cls.CURLY_BRACES_REGEX.sub(escape_group_of_curly_braces, string)
return escaped_string
|
https://github.com/nucypher/nucypher/issues/2404
|
crawler_1 | Temporarily disabling observer <twisted.logger._file.FileLogObserver object at 0x7f2956104ef0> due to exception: [Failure instance: Traceback: <class 'ValueError'>: Single '}' encountered in format string
crawler_1 | /usr/local/lib/python3.7/site-packages/monitor/crawler.py:323:learn_from_teacher_node
crawler_1 | /usr/local/lib/python3.7/site-packages/nucypher/network/nodes.py:794:learn_from_teacher_node
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_logger.py:238:warn
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_logger.py:144:emit
crawler_1 | --- <exception caught here> ---
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_observer.py:131:__call__
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_file.py:50:__call__
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_json.py:252:<lambda>
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_json.py:203:eventAsJSON
crawler_1 | /usr/local/lib/python3.7/site-packages/twisted/logger/_flatten.py:87:flattenEvent
crawler_1 | ]
crawler_1 | Traceback (most recent call last):
crawler_1 | File "/usr/local/lib/python3.7/site-packages/monitor/crawler.py", line 323, in learn_from_teacher_node
crawler_1 | new_nodes = super().learn_from_teacher_node(*args, **kwargs)
crawler_1 | File "/usr/local/lib/python3.7/site-packages/nucypher/network/nodes.py", line 794, in learn_from_teacher_node
crawler_1 | self.log.warn(f"Teacher {str(current_teacher)} is invalid: {bytes(current_teacher)}:{e}.")
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_logger.py", line 238, in warn
crawler_1 | self.emit(LogLevel.warn, format, **kwargs)
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_logger.py", line 144, in emit
crawler_1 | self.observer(event)
crawler_1 | --- <exception caught here> ---
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_observer.py", line 131, in __call__
crawler_1 | observer(event)
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_file.py", line 50, in __call__
crawler_1 | text = self.formatEvent(event)
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_json.py", line 252, in <lambda>
crawler_1 | lambda event: u"{0}{1}\n".format(recordSeparator, eventAsJSON(event))
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_json.py", line 203, in eventAsJSON
crawler_1 | flattenEvent(event)
crawler_1 | File "/usr/local/lib/python3.7/site-packages/twisted/logger/_flatten.py", line 87, in flattenEvent
crawler_1 | aFormatter.parse(event["log_format"])
crawler_1 | builtins.ValueError: Single '}' encountered in format string
|
ValueError
|
def set_min_rate(
general_config: GroupGeneralConfig,
transacting_staker_options: TransactingStakerOptions,
config_file,
force,
min_rate,
):
"""Staker sets the minimum acceptable fee rate for their associated worker."""
# Setup
emitter = setup_emitter(general_config)
STAKEHOLDER = transacting_staker_options.create_character(emitter, config_file)
blockchain = transacting_staker_options.get_blockchain()
client_account, staking_address = select_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=transacting_staker_options.staker_options.staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
if not min_rate:
paint_min_rate(emitter, STAKEHOLDER)
minimum, _default, maximum = STAKEHOLDER.policy_agent.get_fee_rate_range()
lower_bound_in_gwei = Web3.fromWei(minimum, "gwei")
upper_bound_in_gwei = Web3.fromWei(maximum, "gwei")
min_rate = click.prompt(
PROMPT_STAKER_MIN_POLICY_RATE,
type=DecimalRange(min=lower_bound_in_gwei, max=upper_bound_in_gwei),
)
min_rate = int(Web3.toWei(Decimal(min_rate), "gwei"))
if not force:
min_rate_in_gwei = Web3.fromWei(min_rate, "gwei")
click.confirm(
CONFIRM_NEW_MIN_POLICY_RATE.format(min_rate=min_rate_in_gwei), abort=True
)
password = get_password(
stakeholder=STAKEHOLDER,
blockchain=blockchain,
client_account=client_account,
hw_wallet=transacting_staker_options.hw_wallet,
)
STAKEHOLDER.assimilate(password=password)
receipt = STAKEHOLDER.set_min_fee_rate(min_rate=min_rate)
# Report Success
message = SUCCESSFUL_SET_MIN_POLICY_RATE.format(
min_rate=min_rate, staking_address=staking_address
)
emitter.echo(message, color="green")
paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=blockchain.client.chain_name,
transaction_type="set_min_rate",
)
|
def set_min_rate(
general_config: GroupGeneralConfig,
transacting_staker_options: TransactingStakerOptions,
config_file,
force,
min_rate,
):
"""Staker sets the minimum acceptable fee rate for their associated worker."""
# Setup
emitter = setup_emitter(general_config)
STAKEHOLDER = transacting_staker_options.create_character(emitter, config_file)
blockchain = transacting_staker_options.get_blockchain()
client_account, staking_address = select_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=transacting_staker_options.staker_options.staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=force,
)
if not min_rate:
paint_min_rate(
emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address
)
# TODO check range
min_rate = click.prompt(PROMPT_STAKER_MIN_POLICY_RATE, type=WEI)
if not force:
click.confirm(CONFIRM_NEW_MIN_POLICY_RATE.format(min_rate=min_rate), abort=True)
password = get_password(
stakeholder=STAKEHOLDER,
blockchain=blockchain,
client_account=client_account,
hw_wallet=transacting_staker_options.hw_wallet,
)
STAKEHOLDER.assimilate(password=password)
receipt = STAKEHOLDER.set_min_fee_rate(min_rate=min_rate)
# Report Success
message = SUCCESSFUL_SET_MIN_POLICY_RATE.format(
min_rate=min_rate, staking_address=staking_address
)
emitter.echo(message, color="green")
paint_receipt_summary(
emitter=emitter,
receipt=receipt,
chain_name=blockchain.client.chain_name,
transaction_type="set_min_rate",
)
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def config(general_config, config_options, config_file):
"""View and optionally update the Ursula node's configuration."""
emitter = setup_emitter(general_config, config_options.worker_address)
if not config_file:
config_file = select_config_file(
emitter=emitter,
checksum_address=config_options.worker_address,
config_class=UrsulaConfiguration,
)
updates = config_options.get_updates()
get_or_update_configuration(
emitter=emitter,
config_class=UrsulaConfiguration,
filepath=config_file,
updates=updates,
)
|
def config(general_config, config_options, config_file):
"""View and optionally update the Ursula node's configuration."""
emitter = setup_emitter(general_config, config_options.worker_address)
if not config_file:
config_file = select_config_file(
emitter=emitter,
checksum_address=config_options.worker_address,
config_class=UrsulaConfiguration,
)
emitter.echo(f"Ursula Configuration {config_file} \n {'=' * 55}")
updates = config_options.get_updates()
get_or_update_configuration(
emitter=emitter,
config_class=UrsulaConfiguration,
filepath=config_file,
updates=updates,
)
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def paint_min_rate(emitter, staker):
paint_fee_rate_range(emitter, staker.policy_agent)
minimum = staker.min_fee_rate
raw_minimum = staker.raw_min_fee_rate
rate_payload = f"""
Minimum acceptable fee rate (set by staker for their associated worker):
~ Previously set ....... {prettify_eth_amount(raw_minimum)}
~ Effective ............ {prettify_eth_amount(minimum)}"""
emitter.echo(rate_payload)
|
def paint_min_rate(emitter, registry, policy_agent, staker_address):
paint_fee_rate_range(emitter, policy_agent)
minimum = policy_agent.min_fee_rate(staker_address)
raw_minimum = policy_agent.raw_min_fee_rate(staker_address)
rate_payload = f"""
Minimum acceptable fee rate (set by staker for their associated worker):
~ Previously set ....... {prettify_eth_amount(raw_minimum)}
~ Effective ............ {prettify_eth_amount(minimum)}"""
emitter.echo(rate_payload)
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def keep_learning_about_nodes(self):
"""
Continually learn about new nodes.
"""
# TODO: Allow the user to set eagerness? 1712
# TODO: Also, if we do allow eager, don't even defer; block right here.
self._learning_deferred = Deferred(
canceller=self._discovery_canceller
) # TODO: No longer relevant.
def _discover_or_abort(_first_result):
# self.log.debug(f"{self} learning at {datetime.datetime.now()}") # 1712
result = self.learn_from_teacher_node(
eager=False, canceller=self._discovery_canceller
)
# self.log.debug(f"{self} finished learning at {datetime.datetime.now()}") # 1712
return result
self._learning_deferred.addCallback(_discover_or_abort)
self._learning_deferred.addErrback(self.handle_learning_errors)
# Instead of None, we might want to pass something useful about the context.
# Alternately, it might be nice for learn_from_teacher_node to (some or all of the time) return a Deferred.
reactor.callInThread(self._learning_deferred.callback, None)
return self._learning_deferred
|
def keep_learning_about_nodes(self):
"""
Continually learn about new nodes.
"""
# TODO: Allow the user to set eagerness? 1712
# TODO: Also, if we do allow eager, don't even defer; block right here.
self._learning_deferred = Deferred(
canceller=self._discovery_canceller
) # TODO: No longer relevant.
def _discover_or_abort(_first_result):
self.log.debug(f"{self} learning at {datetime.datetime.now()}")
result = self.learn_from_teacher_node(
eager=False, canceller=self._discovery_canceller
)
self.log.debug(f"{self} finished learning at {datetime.datetime.now()}")
return result
self._learning_deferred.addCallback(_discover_or_abort)
self._learning_deferred.addErrback(self.handle_learning_errors)
# Instead of None, we might want to pass something useful about the context.
# Alternately, it might be nice for learn_from_teacher_node to (some or all of the time) return a Deferred.
reactor.callInThread(self._learning_deferred.callback, None)
return self._learning_deferred
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def _discover_or_abort(_first_result):
# self.log.debug(f"{self} learning at {datetime.datetime.now()}") # 1712
result = self.learn_from_teacher_node(
eager=False, canceller=self._discovery_canceller
)
# self.log.debug(f"{self} finished learning at {datetime.datetime.now()}") # 1712
return result
|
def _discover_or_abort(_first_result):
self.log.debug(f"{self} learning at {datetime.datetime.now()}")
result = self.learn_from_teacher_node(
eager=False, canceller=self._discovery_canceller
)
self.log.debug(f"{self} finished learning at {datetime.datetime.now()}")
return result
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def _make_rest_app(
datastore: Datastore, this_node, serving_domain: str, log: Logger
) -> Flask:
from nucypher.characters.lawful import Alice, Ursula
_alice_class = Alice
_node_class = Ursula
rest_app = Flask("ursula-service")
rest_app.config["MAX_CONTENT_LENGTH"] = MAX_UPLOAD_CONTENT_LENGTH
@rest_app.route("/public_information")
def public_information():
"""
REST endpoint for public keys and address.
"""
response = Response(
response=bytes(this_node), mimetype="application/octet-stream"
)
return response
@rest_app.route("/ping", methods=["POST"])
def ping():
"""
Asks this node: "Can you access my public information endpoint"?
"""
try:
requesting_ursula = Ursula.from_bytes(request.data)
requesting_ursula.mature()
except ValueError: # (ValueError)
return Response({"error": "Invalid Ursula"}, status=400)
else:
initiator_address, initiator_port = tuple(requesting_ursula.rest_interface)
# Compare requester and posted Ursula information
request_address = request.environ["REMOTE_ADDR"]
if request_address != initiator_address:
return Response({"error": "Suspicious origin address"}, status=400)
#
# Make a Sandwich
#
try:
# Fetch and store initiator's teacher certificate.
certificate = this_node.network_middleware.get_certificate(
host=initiator_address, port=initiator_port
)
certificate_filepath = this_node.node_storage.store_node_certificate(
certificate=certificate
)
requesting_ursula_bytes = (
this_node.network_middleware.client.node_information(
host=initiator_address,
port=initiator_port,
certificate_filepath=certificate_filepath,
)
)
except NodeSeemsToBeDown:
return Response({"error": "Unreachable node"}, status=400) # ... toasted
except InvalidNodeCertificate:
return Response(
{"error": "Invalid TLS certificate - missing checksum address"},
status=400,
) # ... invalid
# Compare the results of the outer POST with the inner GET... yum
if requesting_ursula_bytes == request.data:
return Response(status=200)
else:
return Response({"error": "Suspicious node"}, status=400)
@rest_app.route("/node_metadata", methods=["GET"])
def all_known_nodes():
headers = {"Content-Type": "application/octet-stream"}
if (
this_node._learning_deferred is not RELAX
and not this_node._learning_task.running
):
# TODO: Is this every something we don't want to do?
this_node.start_learning_loop()
if this_node.known_nodes.checksum is NO_KNOWN_NODES:
return Response(b"", headers=headers, status=204)
known_nodes_bytestring = this_node.bytestring_of_known_nodes()
signature = this_node.stamp(known_nodes_bytestring)
return Response(bytes(signature) + known_nodes_bytestring, headers=headers)
@rest_app.route("/node_metadata", methods=["POST"])
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
# log.debug("Learner already knew fleet state {}; doing nothing.".format(learner_fleet_state)) # 1712
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
sprouts = _node_class.batch_from_bytes(request.data)
for node in sprouts:
this_node.remember_node(node)
# TODO: What's the right status code here? 202? Different if we already knew about the node(s)?
return all_known_nodes()
@rest_app.route("/consider_arrangement", methods=["POST"])
def consider_arrangement():
from nucypher.policy.policies import Arrangement
arrangement = Arrangement.from_bytes(request.data)
# TODO: Look at the expiration and figure out if we're even staking that long. 1701
with datastore.describe(
PolicyArrangement, arrangement.id.hex(), writeable=True
) as new_policy_arrangement:
new_policy_arrangement.arrangement_id = arrangement.id.hex().encode()
new_policy_arrangement.expiration = arrangement.expiration
new_policy_arrangement.alice_verifying_key = (
arrangement.alice.stamp.as_umbral_pubkey()
)
# TODO: Fine, we'll add the arrangement here, but if we never hear from Alice again to enact it,
# we need to prune it at some point. #1700
headers = {"Content-Type": "application/octet-stream"}
# TODO: Make this a legit response #234.
return Response(
b"This will eventually be an actual acceptance of the arrangement.",
headers=headers,
)
@rest_app.route("/kFrag/<id_as_hex>", methods=["POST"])
def set_policy(id_as_hex):
"""
REST endpoint for setting a kFrag.
"""
policy_message_kit = UmbralMessageKit.from_bytes(request.data)
alices_verifying_key = policy_message_kit.sender_verifying_key
alice = _alice_class.from_public_keys(verifying_key=alices_verifying_key)
try:
cleartext = this_node.verify_from(alice, policy_message_kit, decrypt=True)
except InvalidSignature:
# TODO: Perhaps we log this? Essentially 355.
return Response(status_code=400)
if not this_node.federated_only:
# This splitter probably belongs somewhere canonical.
transaction_splitter = BytestringSplitter(32)
tx, kfrag_bytes = transaction_splitter(cleartext, return_remainder=True)
try:
# Get all of the arrangements and verify that we'll be paid.
# TODO: We'd love for this part to be impossible to reduce the risk of collusion. #1274
arranged_addresses = (
this_node.policy_agent.fetch_arrangement_addresses_from_policy_txid(
tx, timeout=this_node.synchronous_query_timeout
)
)
except TimeExhausted:
# Alice didn't pay. Return response with that weird status code.
this_node.suspicious_activities_witnessed["freeriders"].append(
(alice, f"No transaction matching {tx}.")
)
return Response(status=402)
this_node_has_been_arranged = (
this_node.checksum_address in arranged_addresses
)
if not this_node_has_been_arranged:
this_node.suspicious_activities_witnessed["freeriders"].append(
(
alice,
f"The transaction {tx} does not list me as a Worker - it lists {arranged_addresses}.",
)
)
return Response(status=402)
else:
_tx = NO_BLOCKCHAIN_CONNECTION
kfrag_bytes = cleartext
kfrag = KFrag.from_bytes(kfrag_bytes)
if not kfrag.verify(signing_pubkey=alices_verifying_key):
raise InvalidSignature("{} is invalid".format(kfrag))
with datastore.describe(
PolicyArrangement, id_as_hex, writeable=True
) as policy_arrangement:
if (
not policy_arrangement.alice_verifying_key
== alice.stamp.as_umbral_pubkey()
):
raise alice.SuspiciousActivity
policy_arrangement.kfrag = kfrag
# TODO: Sign the arrangement here. #495
return "" # TODO: Return A 200, with whatever policy metadata.
@rest_app.route("/kFrag/<id_as_hex>", methods=["DELETE"])
def revoke_arrangement(id_as_hex):
"""
REST endpoint for revoking/deleting a KFrag from a node.
"""
from nucypher.policy.collections import Revocation
revocation = Revocation.from_bytes(request.data)
log.info(
"Received revocation: {} -- for arrangement {}".format(
bytes(revocation).hex(), id_as_hex
)
)
# Check that the request is the same for the provided revocation
if not id_as_hex == revocation.arrangement_id.hex():
log.debug("Couldn't identify an arrangement with id {}".format(id_as_hex))
return Response(status_code=400)
try:
with datastore.describe(
PolicyArrangement, id_as_hex, writeable=True
) as policy_arrangement:
if revocation.verify_signature(policy_arrangement.alice_verifying_key):
policy_arrangement.delete()
except (DatastoreTransactionError, InvalidSignature) as e:
log.debug("Exception attempting to revoke: {}".format(e))
return Response(
response="KFrag not found or revocation signature is invalid.",
status=404,
)
else:
log.info("KFrag successfully removed.")
return Response(response="KFrag deleted!", status=200)
@rest_app.route("/kFrag/<id_as_hex>/reencrypt", methods=["POST"])
def reencrypt_via_rest(id_as_hex):
# Get Policy Arrangement
try:
arrangement_id = binascii.unhexlify(id_as_hex)
except (binascii.Error, TypeError):
return Response(response=b"Invalid arrangement ID", status=405)
try:
# Get KFrag
# TODO: Yeah, well, what if this arrangement hasn't been enacted? 1702
with datastore.describe(PolicyArrangement, id_as_hex) as policy_arrangement:
kfrag = policy_arrangement.kfrag
alice_verifying_key = policy_arrangement.alice_verifying_key
except RecordNotFound:
return Response(response=arrangement_id, status=404)
# Get Work Order
from nucypher.policy.collections import WorkOrder # Avoid circular import
alice_address = canonical_address_from_umbral_key(alice_verifying_key)
work_order_payload = request.data
work_order = WorkOrder.from_rest_payload(
arrangement_id=arrangement_id,
rest_payload=work_order_payload,
ursula=this_node,
alice_address=alice_address,
)
log.info(
f"Work Order from {work_order.bob}, signed {work_order.receipt_signature}"
)
# Re-encrypt
response = this_node._reencrypt(
kfrag=kfrag, work_order=work_order, alice_verifying_key=alice_verifying_key
)
# Now, Ursula saves this workorder to her database...
# Note: we give the work order a random ID to store it under.
with datastore.describe(
Workorder, str(uuid.uuid4()), writeable=True
) as new_workorder:
new_workorder.arrangement_id = work_order.arrangement_id
new_workorder.bob_verifying_key = work_order.bob.stamp.as_umbral_pubkey()
new_workorder.bob_signature = work_order.receipt_signature
headers = {"Content-Type": "application/octet-stream"}
return Response(headers=headers, response=response)
@rest_app.route("/treasure_map/<identifier>")
def provide_treasure_map(identifier):
headers = {"Content-Type": "application/octet-stream"}
try:
with datastore.describe(TreasureMap, identifier) as stored_treasure_map:
response = Response(stored_treasure_map.treasure_map, headers=headers)
log.info(f"{this_node} providing TreasureMap {identifier}")
except RecordNotFound:
log.info(
f"{this_node} doesn't have requested TreasureMap under {identifier}"
)
response = Response(
f"No Treasure Map with identifier {identifier}",
status=404,
headers=headers,
)
return response
@rest_app.route("/treasure_map/", methods=["POST"])
def receive_treasure_map():
"""
Okay, so we've received a TreasureMap to store. We begin verifying
the treasure map by first validating the request and the received
treasure map itself.
We set the datastore identifier as the HRAC iff the node is running
as a decentralized node. Otherwise, we use the map_id in
federated mode.
"""
if not this_node.federated_only:
from nucypher.policy.collections import SignedTreasureMap as _MapClass
else:
from nucypher.policy.collections import TreasureMap as _MapClass
# Step 1: First, we verify the signature of the received treasure map.
# This step also deserializes the treasure map iff it's signed correctly.
try:
received_treasure_map = _MapClass.from_bytes(
bytes_representation=request.data, verify=True
)
except _MapClass.InvalidSignature:
log.info(
f"Bad TreasureMap HRAC Signature; not storing for HRAC {received_treasure_map._hrac.hex()}"
)
return Response(
"This TreasureMap's HRAC is not properly signed.", status=401
)
# Additionally, we determine the map identifier from the type of node.
# If the node is federated, we also set the expiration for a week.
if not this_node.federated_only:
map_identifier = received_treasure_map._hrac.hex()
else:
map_identifier = received_treasure_map.public_id()
expiration_date = MayaDT.from_datetime(
datetime.utcnow() + timedelta(days=7)
)
# Step 2: Check if we already have the treasure map.
try:
with datastore.describe(TreasureMap, map_identifier) as stored_treasure_map:
if (
_MapClass.from_bytes(stored_treasure_map.treasure_map)
== received_treasure_map
):
return Response("Already have this map.", status=303)
except RecordNotFound:
# This appears to be a new treasure map that we don't have!
pass
# Step 3: If the node is decentralized, we check that the received
# treasure map is valid pursuant to an active policy.
# We also set the expiration from the data on the blockchain here.
if not this_node.federated_only:
policy_data, alice_checksum_address = this_node.policy_agent.fetch_policy(
received_treasure_map._hrac, with_owner=True
)
# If the Policy doesn't exist, the policy_data is all zeros.
if not policy_data[5]:
log.info(
f"TreasureMap is for non-existent Policy; not storing {map_identifier}"
)
return Response(
"The Policy for this TreasureMap doesn't exist.", status=409
)
# Check that this treasure map is from Alice per the Policy.
if not received_treasure_map.verify_blockchain_signature(
checksum_address=alice_checksum_address
):
log.info(f"Bad TreasureMap ID; not storing {map_identifier}")
return Response(
"This TreasureMap doesn't match a paid Policy.", status=402
)
# Check that this treasure map is valid for the Policy datetime and that it's not disabled.
if policy_data[0] or datetime.utcnow() >= datetime.utcfromtimestamp(
policy_data[5]
):
log.info(
f"Received TreasureMap for an expired/disabled policy; not storing {map_identifier}"
)
return Response(
"This TreasureMap is for an expired/disabled policy.", status=402
)
expiration_date = MayaDT.from_datetime(
datetime.utcfromtimestamp(policy_data[5])
)
# Step 4: Finally, we store our treasure map under its identifier!
log.info(f"{this_node} storing TreasureMap {map_identifier}")
with datastore.describe(
TreasureMap, map_identifier, writeable=True
) as new_treasure_map:
new_treasure_map.treasure_map = bytes(received_treasure_map)
new_treasure_map.expiration = expiration_date
return Response("Treasure map stored!", status=201)
@rest_app.route("/status/", methods=["GET"])
def status():
if request.args.get("json"):
payload = this_node.abridged_node_details()
response = jsonify(payload)
return response
else:
headers = {"Content-Type": "text/html", "charset": "utf-8"}
previous_states = list(reversed(this_node.known_nodes.states.values()))[:5]
# Mature every known node before rendering.
for node in this_node.known_nodes:
node.mature()
try:
content = status_template.render(
this_node=this_node,
known_nodes=this_node.known_nodes,
previous_states=previous_states,
domain=serving_domain,
version=nucypher.__version__,
checksum_address=this_node.checksum_address,
)
except Exception as e:
text_error = mako_exceptions.text_error_template().render()
html_error = mako_exceptions.html_error_template().render()
log.debug("Template Rendering Exception:\n" + text_error)
return Response(response=html_error, headers=headers, status=500)
return Response(response=content, headers=headers)
return rest_app
|
def _make_rest_app(
datastore: Datastore, this_node, serving_domain: str, log: Logger
) -> Flask:
from nucypher.characters.lawful import Alice, Ursula
_alice_class = Alice
_node_class = Ursula
rest_app = Flask("ursula-service")
rest_app.config["MAX_CONTENT_LENGTH"] = MAX_UPLOAD_CONTENT_LENGTH
@rest_app.route("/public_information")
def public_information():
"""
REST endpoint for public keys and address.
"""
response = Response(
response=bytes(this_node), mimetype="application/octet-stream"
)
return response
@rest_app.route("/ping", methods=["POST"])
def ping():
"""
Asks this node: "Can you access my public information endpoint"?
"""
try:
requesting_ursula = Ursula.from_bytes(request.data)
requesting_ursula.mature()
except ValueError: # (ValueError)
return Response({"error": "Invalid Ursula"}, status=400)
else:
initiator_address, initiator_port = tuple(requesting_ursula.rest_interface)
# Compare requester and posted Ursula information
request_address = request.environ["REMOTE_ADDR"]
if request_address != initiator_address:
return Response({"error": "Suspicious origin address"}, status=400)
#
# Make a Sandwich
#
try:
# Fetch and store initiator's teacher certificate.
certificate = this_node.network_middleware.get_certificate(
host=initiator_address, port=initiator_port
)
certificate_filepath = this_node.node_storage.store_node_certificate(
certificate=certificate
)
requesting_ursula_bytes = (
this_node.network_middleware.client.node_information(
host=initiator_address,
port=initiator_port,
certificate_filepath=certificate_filepath,
)
)
except NodeSeemsToBeDown:
return Response({"error": "Unreachable node"}, status=400) # ... toasted
except InvalidNodeCertificate:
return Response(
{"error": "Invalid TLS certificate - missing checksum address"},
status=400,
) # ... invalid
# Compare the results of the outer POST with the inner GET... yum
if requesting_ursula_bytes == request.data:
return Response(status=200)
else:
return Response({"error": "Suspicious node"}, status=400)
@rest_app.route("/node_metadata", methods=["GET"])
def all_known_nodes():
headers = {"Content-Type": "application/octet-stream"}
if (
this_node._learning_deferred is not RELAX
and not this_node._learning_task.running
):
# TODO: Is this every something we don't want to do?
this_node.start_learning_loop()
if this_node.known_nodes.checksum is NO_KNOWN_NODES:
return Response(b"", headers=headers, status=204)
known_nodes_bytestring = this_node.bytestring_of_known_nodes()
signature = this_node.stamp(known_nodes_bytestring)
return Response(bytes(signature) + known_nodes_bytestring, headers=headers)
@rest_app.route("/node_metadata", methods=["POST"])
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
sprouts = _node_class.batch_from_bytes(request.data)
for node in sprouts:
this_node.remember_node(node)
# TODO: What's the right status code here? 202? Different if we already knew about the node(s)?
return all_known_nodes()
@rest_app.route("/consider_arrangement", methods=["POST"])
def consider_arrangement():
from nucypher.policy.policies import Arrangement
arrangement = Arrangement.from_bytes(request.data)
# TODO: Look at the expiration and figure out if we're even staking that long. 1701
with datastore.describe(
PolicyArrangement, arrangement.id.hex(), writeable=True
) as new_policy_arrangement:
new_policy_arrangement.arrangement_id = arrangement.id.hex().encode()
new_policy_arrangement.expiration = arrangement.expiration
new_policy_arrangement.alice_verifying_key = (
arrangement.alice.stamp.as_umbral_pubkey()
)
# TODO: Fine, we'll add the arrangement here, but if we never hear from Alice again to enact it,
# we need to prune it at some point. #1700
headers = {"Content-Type": "application/octet-stream"}
# TODO: Make this a legit response #234.
return Response(
b"This will eventually be an actual acceptance of the arrangement.",
headers=headers,
)
@rest_app.route("/kFrag/<id_as_hex>", methods=["POST"])
def set_policy(id_as_hex):
"""
REST endpoint for setting a kFrag.
"""
policy_message_kit = UmbralMessageKit.from_bytes(request.data)
alices_verifying_key = policy_message_kit.sender_verifying_key
alice = _alice_class.from_public_keys(verifying_key=alices_verifying_key)
try:
cleartext = this_node.verify_from(alice, policy_message_kit, decrypt=True)
except InvalidSignature:
# TODO: Perhaps we log this? Essentially 355.
return Response(status_code=400)
if not this_node.federated_only:
# This splitter probably belongs somewhere canonical.
transaction_splitter = BytestringSplitter(32)
tx, kfrag_bytes = transaction_splitter(cleartext, return_remainder=True)
try:
# Get all of the arrangements and verify that we'll be paid.
# TODO: We'd love for this part to be impossible to reduce the risk of collusion. #1274
arranged_addresses = (
this_node.policy_agent.fetch_arrangement_addresses_from_policy_txid(
tx, timeout=this_node.synchronous_query_timeout
)
)
except TimeExhausted:
# Alice didn't pay. Return response with that weird status code.
this_node.suspicious_activities_witnessed["freeriders"].append(
(alice, f"No transaction matching {tx}.")
)
return Response(status=402)
this_node_has_been_arranged = (
this_node.checksum_address in arranged_addresses
)
if not this_node_has_been_arranged:
this_node.suspicious_activities_witnessed["freeriders"].append(
(
alice,
f"The transaction {tx} does not list me as a Worker - it lists {arranged_addresses}.",
)
)
return Response(status=402)
else:
_tx = NO_BLOCKCHAIN_CONNECTION
kfrag_bytes = cleartext
kfrag = KFrag.from_bytes(kfrag_bytes)
if not kfrag.verify(signing_pubkey=alices_verifying_key):
raise InvalidSignature("{} is invalid".format(kfrag))
with datastore.describe(
PolicyArrangement, id_as_hex, writeable=True
) as policy_arrangement:
if (
not policy_arrangement.alice_verifying_key
== alice.stamp.as_umbral_pubkey()
):
raise alice.SuspiciousActivity
policy_arrangement.kfrag = kfrag
# TODO: Sign the arrangement here. #495
return "" # TODO: Return A 200, with whatever policy metadata.
@rest_app.route("/kFrag/<id_as_hex>", methods=["DELETE"])
def revoke_arrangement(id_as_hex):
"""
REST endpoint for revoking/deleting a KFrag from a node.
"""
from nucypher.policy.collections import Revocation
revocation = Revocation.from_bytes(request.data)
log.info(
"Received revocation: {} -- for arrangement {}".format(
bytes(revocation).hex(), id_as_hex
)
)
# Check that the request is the same for the provided revocation
if not id_as_hex == revocation.arrangement_id.hex():
log.debug("Couldn't identify an arrangement with id {}".format(id_as_hex))
return Response(status_code=400)
try:
with datastore.describe(
PolicyArrangement, id_as_hex, writeable=True
) as policy_arrangement:
if revocation.verify_signature(policy_arrangement.alice_verifying_key):
policy_arrangement.delete()
except (DatastoreTransactionError, InvalidSignature) as e:
log.debug("Exception attempting to revoke: {}".format(e))
return Response(
response="KFrag not found or revocation signature is invalid.",
status=404,
)
else:
log.info("KFrag successfully removed.")
return Response(response="KFrag deleted!", status=200)
@rest_app.route("/kFrag/<id_as_hex>/reencrypt", methods=["POST"])
def reencrypt_via_rest(id_as_hex):
# Get Policy Arrangement
try:
arrangement_id = binascii.unhexlify(id_as_hex)
except (binascii.Error, TypeError):
return Response(response=b"Invalid arrangement ID", status=405)
try:
# Get KFrag
# TODO: Yeah, well, what if this arrangement hasn't been enacted? 1702
with datastore.describe(PolicyArrangement, id_as_hex) as policy_arrangement:
kfrag = policy_arrangement.kfrag
alice_verifying_key = policy_arrangement.alice_verifying_key
except RecordNotFound:
return Response(response=arrangement_id, status=404)
# Get Work Order
from nucypher.policy.collections import WorkOrder # Avoid circular import
alice_address = canonical_address_from_umbral_key(alice_verifying_key)
work_order_payload = request.data
work_order = WorkOrder.from_rest_payload(
arrangement_id=arrangement_id,
rest_payload=work_order_payload,
ursula=this_node,
alice_address=alice_address,
)
log.info(
f"Work Order from {work_order.bob}, signed {work_order.receipt_signature}"
)
# Re-encrypt
response = this_node._reencrypt(
kfrag=kfrag, work_order=work_order, alice_verifying_key=alice_verifying_key
)
# Now, Ursula saves this workorder to her database...
# Note: we give the work order a random ID to store it under.
with datastore.describe(
Workorder, str(uuid.uuid4()), writeable=True
) as new_workorder:
new_workorder.arrangement_id = work_order.arrangement_id
new_workorder.bob_verifying_key = work_order.bob.stamp.as_umbral_pubkey()
new_workorder.bob_signature = work_order.receipt_signature
headers = {"Content-Type": "application/octet-stream"}
return Response(headers=headers, response=response)
@rest_app.route("/treasure_map/<identifier>")
def provide_treasure_map(identifier):
headers = {"Content-Type": "application/octet-stream"}
try:
with datastore.describe(TreasureMap, identifier) as stored_treasure_map:
response = Response(stored_treasure_map.treasure_map, headers=headers)
log.info(f"{this_node} providing TreasureMap {identifier}")
except RecordNotFound:
log.info(
f"{this_node} doesn't have requested TreasureMap under {identifier}"
)
response = Response(
f"No Treasure Map with identifier {identifier}",
status=404,
headers=headers,
)
return response
@rest_app.route("/treasure_map/", methods=["POST"])
def receive_treasure_map():
"""
Okay, so we've received a TreasureMap to store. We begin verifying
the treasure map by first validating the request and the received
treasure map itself.
We set the datastore identifier as the HRAC iff the node is running
as a decentralized node. Otherwise, we use the map_id in
federated mode.
"""
if not this_node.federated_only:
from nucypher.policy.collections import SignedTreasureMap as _MapClass
else:
from nucypher.policy.collections import TreasureMap as _MapClass
# Step 1: First, we verify the signature of the received treasure map.
# This step also deserializes the treasure map iff it's signed correctly.
try:
received_treasure_map = _MapClass.from_bytes(
bytes_representation=request.data, verify=True
)
except _MapClass.InvalidSignature:
log.info(
f"Bad TreasureMap HRAC Signature; not storing for HRAC {received_treasure_map._hrac.hex()}"
)
return Response(
"This TreasureMap's HRAC is not properly signed.", status=401
)
# Additionally, we determine the map identifier from the type of node.
# If the node is federated, we also set the expiration for a week.
if not this_node.federated_only:
map_identifier = received_treasure_map._hrac.hex()
else:
map_identifier = received_treasure_map.public_id()
expiration_date = MayaDT.from_datetime(
datetime.utcnow() + timedelta(days=7)
)
# Step 2: Check if we already have the treasure map.
try:
with datastore.describe(TreasureMap, map_identifier) as stored_treasure_map:
if (
_MapClass.from_bytes(stored_treasure_map.treasure_map)
== received_treasure_map
):
return Response("Already have this map.", status=303)
except RecordNotFound:
# This appears to be a new treasure map that we don't have!
pass
# Step 3: If the node is decentralized, we check that the received
# treasure map is valid pursuant to an active policy.
# We also set the expiration from the data on the blockchain here.
if not this_node.federated_only:
policy_data, alice_checksum_address = this_node.policy_agent.fetch_policy(
received_treasure_map._hrac, with_owner=True
)
# If the Policy doesn't exist, the policy_data is all zeros.
if not policy_data[5]:
log.info(
f"TreasureMap is for non-existent Policy; not storing {map_identifier}"
)
return Response(
"The Policy for this TreasureMap doesn't exist.", status=409
)
# Check that this treasure map is from Alice per the Policy.
if not received_treasure_map.verify_blockchain_signature(
checksum_address=alice_checksum_address
):
log.info(f"Bad TreasureMap ID; not storing {map_identifier}")
return Response(
"This TreasureMap doesn't match a paid Policy.", status=402
)
# Check that this treasure map is valid for the Policy datetime and that it's not disabled.
if policy_data[0] or datetime.utcnow() >= datetime.utcfromtimestamp(
policy_data[5]
):
log.info(
f"Received TreasureMap for an expired/disabled policy; not storing {map_identifier}"
)
return Response(
"This TreasureMap is for an expired/disabled policy.", status=402
)
expiration_date = MayaDT.from_datetime(
datetime.utcfromtimestamp(policy_data[5])
)
# Step 4: Finally, we store our treasure map under its identifier!
log.info(f"{this_node} storing TreasureMap {map_identifier}")
with datastore.describe(
TreasureMap, map_identifier, writeable=True
) as new_treasure_map:
new_treasure_map.treasure_map = bytes(received_treasure_map)
new_treasure_map.expiration = expiration_date
return Response("Treasure map stored!", status=201)
@rest_app.route("/status/", methods=["GET"])
def status():
if request.args.get("json"):
payload = this_node.abridged_node_details()
response = jsonify(payload)
return response
else:
headers = {"Content-Type": "text/html", "charset": "utf-8"}
previous_states = list(reversed(this_node.known_nodes.states.values()))[:5]
# Mature every known node before rendering.
for node in this_node.known_nodes:
node.mature()
try:
content = status_template.render(
this_node=this_node,
known_nodes=this_node.known_nodes,
previous_states=previous_states,
domain=serving_domain,
version=nucypher.__version__,
checksum_address=this_node.checksum_address,
)
except Exception as e:
text_error = mako_exceptions.text_error_template().render()
html_error = mako_exceptions.html_error_template().render()
log.debug("Template Rendering Exception:\n" + text_error)
return Response(response=html_error, headers=headers, status=500)
return Response(response=content, headers=headers)
return rest_app
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
# log.debug("Learner already knew fleet state {}; doing nothing.".format(learner_fleet_state)) # 1712
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
sprouts = _node_class.batch_from_bytes(request.data)
for node in sprouts:
this_node.remember_node(node)
# TODO: What's the right status code here? 202? Different if we already knew about the node(s)?
return all_known_nodes()
|
def node_metadata_exchange():
# If these nodes already have the same fleet state, no exchange is necessary.
learner_fleet_state = request.args.get("fleet")
if learner_fleet_state == this_node.known_nodes.checksum:
log.debug(
"Learner already knew fleet state {}; doing nothing.".format(
learner_fleet_state
)
)
headers = {"Content-Type": "application/octet-stream"}
payload = this_node.known_nodes.snapshot() + bytes(FLEET_STATES_MATCH)
signature = this_node.stamp(payload)
return Response(bytes(signature) + payload, headers=headers)
sprouts = _node_class.batch_from_bytes(request.data)
for node in sprouts:
this_node.remember_node(node)
# TODO: What's the right status code here? 202? Different if we already knew about the node(s)?
return all_known_nodes()
|
https://github.com/nucypher/nucypher/issues/2390
|
Traceback (most recent call last):
File "/home/tux/code/nucypher-prod/venv/bin/nucypher", line 33, in <module>
sys.exit(load_entry_point('nucypher', 'console_scripts', 'nucypher')())
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 829, in __call__
return self.main(*args, **kwargs)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 782, in main
rv = self.invoke(ctx)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1259, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 1066, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/tux/code/nucypher-prod/venv/lib/python3.8/site-packages/click/core.py", line 610, in invoke
return callback(*args, **kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
File "/home/tux/code/nucypher-prod/nucypher/cli/options.py", line 163, in wrapper
return func(**kwargs)
[Previous line repeated 1 more time]
File "/home/tux/code/nucypher-prod/nucypher/cli/commands/stake.py", line 1263, in set_min_rate
paint_min_rate(emitter, STAKEHOLDER.registry, STAKEHOLDER.policy_agent, staking_address)
File "/home/tux/code/nucypher-prod/nucypher/cli/painting/staking.py", line 229, in paint_min_rate
minimum = policy_agent.min_fee_rate(staker_address)
AttributeError: 'PolicyManagerAgent' object has no attribute 'min_fee_rate'
|
AttributeError
|
def __getitem__(self, event_name: str):
event_method = self.__get_web3_event_by_name(event_name)
def wrapper(from_block=None, to_block=None, **argument_filters):
if from_block is None:
from_block = 0 # TODO: we can do better. Get contract creation block.
if to_block is None:
to_block = "latest"
entries = event_method.getLogs(
fromBlock=from_block, toBlock=to_block, argument_filters=argument_filters
)
for entry in entries:
yield EventRecord(entry)
return wrapper
|
def __getitem__(self, event_name: str):
event_method = self.__get_web3_event_by_name(event_name)
def wrapper(from_block=None, to_block=None, **argument_filters):
if from_block is None:
from_block = 0 # TODO: we can do better. Get contract creation block.
if to_block is None:
to_block = "latest"
event_filter = event_method.createFilter(
fromBlock=from_block, toBlock=to_block, argument_filters=argument_filters
)
entries = event_filter.get_all_entries()
for entry in entries:
yield EventRecord(entry)
return wrapper
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def wrapper(from_block=None, to_block=None, **argument_filters):
if from_block is None:
from_block = 0 # TODO: we can do better. Get contract creation block.
if to_block is None:
to_block = "latest"
entries = event_method.getLogs(
fromBlock=from_block, toBlock=to_block, argument_filters=argument_filters
)
for entry in entries:
yield EventRecord(entry)
|
def wrapper(from_block=None, to_block=None, **argument_filters):
if from_block is None:
from_block = 0 # TODO: we can do better. Get contract creation block.
if to_block is None:
to_block = "latest"
event_filter = event_method.createFilter(
fromBlock=from_block, toBlock=to_block, argument_filters=argument_filters
)
entries = event_filter.get_all_entries()
for entry in entries:
yield EventRecord(entry)
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def __init__(
self,
agent: "EthereumContractAgent",
event_name: str,
from_block: int,
to_block: int = None, # defaults to latest block
max_blocks_per_call: int = DEFAULT_MAX_BLOCKS_PER_CALL,
**argument_filters,
):
self.event_filter = agent.events[event_name]
self.from_block = from_block
self.to_block = (
to_block if to_block is not None else agent.blockchain.client.block_number
)
# validity check of block range
if self.to_block < self.from_block:
raise ValueError(
f"Invalid events block range: to_block {self.to_block} must be greater than or equal "
f"to from_block {self.from_block}"
)
self.max_blocks_per_call = max_blocks_per_call
self.argument_filters = argument_filters
|
def __init__(
self,
agent: "EthereumContractAgent",
event_name: str,
from_block: int,
to_block: int = None, # defaults to latest block
max_blocks_per_call: int = DEFAULT_MAX_BLOCKS_PER_CALL,
**argument_filters,
):
if not agent:
raise ValueError(f"Contract agent must be provided")
if not event_name:
raise ValueError(f"Event name must be provided")
self.event_filter = agent.events[event_name]
self.from_block = from_block
self.to_block = to_block if to_block else agent.blockchain.client.block_number
# validity check of block range
if to_block <= from_block:
raise ValueError(f"Invalid block range provided ({from_block} - {to_block})")
self.max_blocks_per_call = max_blocks_per_call
self.argument_filters = argument_filters
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def __iter__(self):
current_from_block = self.from_block
current_to_block = min(self.from_block + self.max_blocks_per_call, self.to_block)
while current_from_block <= current_to_block:
for event_record in self.event_filter(
from_block=current_from_block,
to_block=current_to_block,
**self.argument_filters,
):
yield event_record
# previous block range is inclusive hence the increment
current_from_block = current_to_block + 1
# update the 'to block' to the lesser of either the next `max_blocks_per_call` blocks,
# or the remainder of blocks
current_to_block = min(
current_from_block + self.max_blocks_per_call, self.to_block
)
|
def __iter__(self):
current_from_block = self.from_block
current_to_block = min(self.from_block + self.max_blocks_per_call, self.to_block)
while current_from_block < current_to_block:
for event_record in self.event_filter(
from_block=current_from_block,
to_block=current_to_block,
**self.argument_filters,
):
yield event_record
current_from_block = current_to_block
# update the 'to block' to the lesser of either the next `max_blocks_per_call` blocks,
# or the remainder of blocks
current_to_block = min(
current_from_block + self.max_blocks_per_call, self.to_block
)
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def events(general_config, staker_options, config_file, event_name):
"""See blockchain events associated to a staker"""
# Setup
emitter = setup_emitter(general_config)
STAKEHOLDER = staker_options.create_character(emitter, config_file)
_client_account, staking_address = select_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staker_options.staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=True,
)
title = f" {STAKEHOLDER.staking_agent.contract_name} Events ".center(40, "-")
emitter.echo(f"\n{title}\n", bold=True, color="green")
if event_name:
events = [STAKEHOLDER.staking_agent.contract.events[event_name]]
else:
raise click.BadOptionUsage(
message="You must specify an event name with --event-name"
)
# TODO: Doesn't work for the moment
# event_names = STAKEHOLDER.staking_agent.events.names
# events = [STAKEHOLDER.staking_agent.contract.events[e] for e in event_names]
# events = [e for e in events if 'staker' in e.argument_names]
for event in events:
emitter.echo(f"{event.event_name}:", bold=True, color="yellow")
entries = event.getLogs(
fromBlock=0, toBlock="latest", argument_filters={"staker": staking_address}
)
for event_record in entries:
emitter.echo(f" - {EventRecord(event_record)}")
|
def events(general_config, staker_options, config_file, event_name):
"""See blockchain events associated to a staker"""
# Setup
emitter = setup_emitter(general_config)
STAKEHOLDER = staker_options.create_character(emitter, config_file)
_client_account, staking_address = select_client_account_for_staking(
emitter=emitter,
stakeholder=STAKEHOLDER,
staking_address=staker_options.staking_address,
individual_allocation=STAKEHOLDER.individual_allocation,
force=True,
)
title = f" {STAKEHOLDER.staking_agent.contract_name} Events ".center(40, "-")
emitter.echo(f"\n{title}\n", bold=True, color="green")
if event_name:
events = [STAKEHOLDER.staking_agent.contract.events[event_name]]
else:
raise click.BadOptionUsage(
message="You must specify an event name with --event-name"
)
# TODO: Doesn't work for the moment
# event_names = STAKEHOLDER.staking_agent.events.names
# events = [STAKEHOLDER.staking_agent.contract.events[e] for e in event_names]
# events = [e for e in events if 'staker' in e.argument_names]
for event in events:
emitter.echo(f"{event.event_name}:", bold=True, color="yellow")
event_filter = event.createFilter(
fromBlock=0, toBlock="latest", argument_filters={"staker": staking_address}
)
entries = event_filter.get_all_entries()
for event_record in entries:
emitter.echo(f" - {EventRecord(event_record)}")
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def __init__(
self,
event_name: str,
event_args_config: Dict[str, tuple],
argument_filters: Dict[str, str],
contract_agent: ContractAgents,
):
super().__init__()
self.event_name = event_name
self.contract_agent = contract_agent
# this way we don't have to deal with 'latest' at all
self.filter_current_from_block = self.contract_agent.blockchain.client.block_number
self.filter_arguments = argument_filters
self.event_args_config = event_args_config
|
def __init__(
self,
event_name: str,
event_args_config: Dict[str, tuple],
argument_filters: Dict[str, str],
contract_agent: ContractAgents,
):
super().__init__()
self.event_name = event_name
self.contract_agent = contract_agent
self.event_filter = contract_agent.contract.events[event_name].createFilter(
fromBlock="latest", argument_filters=argument_filters
)
self.event_args_config = event_args_config
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def _collect_internal(self) -> None:
from_block = self.filter_current_from_block
to_block = self.contract_agent.blockchain.client.block_number
if from_block >= to_block:
# we've already checked the latest block and waiting for a new block
# nothing to see here
return
events_throttler = ContractEventsThrottler(
agent=self.contract_agent,
event_name=self.event_name,
from_block=from_block,
to_block=to_block,
**self.filter_arguments,
)
for event_record in events_throttler:
self._event_occurred(event_record.raw_event)
# update last block checked for the next round - from/to block range is inclusive
self.filter_current_from_block = to_block + 1
|
def _collect_internal(self) -> None:
events = self.event_filter.get_new_entries()
for event in events:
self._event_occurred(event)
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def initialize(self, metrics_prefix: str, registry: CollectorRegistry) -> None:
super().initialize(metrics_prefix=metrics_prefix, registry=registry)
missing_commitments = self.contract_agent.get_missing_commitments(
checksum_address=self.staker_address
)
if missing_commitments == 0:
# has either already committed to this period or the next period
# use local event filter for initial data
last_committed_period = self.contract_agent.get_last_committed_period(
staker_address=self.staker_address
)
arg_filters = {"staker": self.staker_address, "period": last_committed_period}
latest_block = self.contract_agent.blockchain.client.block_number
previous_period = self.contract_agent.get_current_period() - 1 # just in case
# we estimate the block number for the previous period to start search from since either
# - commitment made during previous period for current period, OR
# - commitment made during current period for next period
block_number_for_previous_period = estimate_block_number_for_period(
period=previous_period,
seconds_per_period=self.contract_agent.staking_parameters()[0],
latest_block=latest_block,
)
events_throttler = ContractEventsThrottler(
agent=self.contract_agent,
event_name=self.event_name,
from_block=block_number_for_previous_period,
to_block=latest_block,
**arg_filters,
)
for event_record in events_throttler:
self._event_occurred(event_record.raw_event)
# update last block checked since we just looked for this event up to and including latest block
# block range is inclusive, hence the increment
self.filter_current_from_block = latest_block + 1
|
def initialize(self, metrics_prefix: str, registry: CollectorRegistry) -> None:
super().initialize(metrics_prefix=metrics_prefix, registry=registry)
missing_commitments = self.contract_agent.get_missing_commitments(
checksum_address=self.staker_address
)
if missing_commitments == 0:
# has either already committed to this period or the next period
# use local event filter for initial data
last_committed_period = self.contract_agent.get_last_committed_period(
staker_address=self.staker_address
)
arg_filters = {"staker": self.staker_address, "period": last_committed_period}
latest_block = self.contract_agent.blockchain.client.block_number
previous_period = self.contract_agent.get_current_period() - 1 # just in case
# we estimate the block number for the previous period to start search from since either
# - commitment made during previous period for current period, OR
# - commitment made during current period for next period
block_number_for_previous_period = estimate_block_number_for_period(
period=previous_period,
seconds_per_period=self.contract_agent.staking_parameters()[0],
latest_block=latest_block,
)
events_throttler = ContractEventsThrottler(
agent=self.contract_agent,
event_name=self.event_name,
from_block=block_number_for_previous_period,
to_block=latest_block,
**arg_filters,
)
for event_record in events_throttler:
self._event_occurred(event_record.raw_event)
|
https://github.com/nucypher/nucypher/issues/2291
|
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical] Unhandled error in Deferred:
2020-09-23T14:29:04+0000 [twisted.internet.defer#critical]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1283, in run
self.mainLoop()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 1292, in mainLoop
self.runUntilCurrent()
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/base.py", line 913, in runUntilCurrent
call.func(*call.args, call.kw)
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/task.py", line 239, in call
d = defer.maybeDeferred(self.f, *self.a, self.kw)
--- <exception caught here> ---
File "/usr/local/lib/python3.6/dist-packages/twisted/internet/defer.py", line 151, in maybeDeferred
result = f(*args, **kw)
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/metrics.py", line 147, in collect_prometheus_metrics
collector.collect()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 77, in collect
self._collect_internal()
File "/usr/local/lib/python3.6/dist-packages/nucypher/utilities/prometheus/collector.py", line 303, in _collect_internal
events = self.event_filter.get_new_entries()
File "/usr/local/lib/python3.6/dist-packages/web3/_utils/filters.py", line 159, in get_new_entries
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
File "/usr/local/lib/python3.6/dist-packages/web3/eth.py", line 493, in getFilterChanges
RPC.eth_getFilterChanges, [filter_id],
File "/usr/local/lib/python3.6/dist-packages/web3/manager.py", line 153, in request_blocking
raise ValueError(response["error"])
builtins.ValueError: {'code': -32000, 'message': 'filter not found'}
|
builtins.ValueError
|
def block_until_ready(self, poll_rate: int = None, timeout: int = None):
"""
Polls the staking_agent and blocks until the staking address is not
a null address for the given worker_address. Once the worker is bonded, it returns the staker address.
"""
if not self.__worker_address:
raise RuntimeError("No worker address available")
timeout = timeout or self.BONDING_TIMEOUT
poll_rate = poll_rate or self.BONDING_POLL_RATE
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=self.registry)
client = staking_agent.blockchain.client
start = maya.now()
emitter = StdoutEmitter() # TODO: Make injectable, or embed this logic into Ursula
emitter.message("Waiting for bonding and funding...", color="yellow")
funded, bonded = False, False
while True:
# Read
staking_address = staking_agent.get_staker_from_worker(self.__worker_address)
ether_balance = client.get_balance(self.__worker_address)
# Bonding
if (not bonded) and (staking_address != BlockchainInterface.NULL_ADDRESS):
bonded = True
emitter.message(
f"Worker is bonded to ({staking_address})!", color="green", bold=True
)
# Balance
if ether_balance and (not funded):
funded, balance = True, Web3.fromWei(ether_balance, "ether")
emitter.message(
f"Worker is funded with {balance} ETH!", color="green", bold=True
)
# Success and Escape
if staking_address != BlockchainInterface.NULL_ADDRESS and ether_balance:
self._checksum_address = staking_address
# TODO: #1823 - Workaround for new nickname every restart
self.nickname, self.nickname_metadata = nickname_from_seed(
self.checksum_address
)
break
# Crash on Timeout
if timeout:
now = maya.now()
delta = now - start
if delta.total_seconds() >= timeout:
if staking_address == BlockchainInterface.NULL_ADDRESS:
raise self.DetachedWorker(
f"Worker {self.__worker_address} not bonded after waiting {timeout} seconds."
)
elif not ether_balance:
raise RuntimeError(
f"Worker {self.__worker_address} has no ether after waiting {timeout} seconds."
)
# Increment
time.sleep(poll_rate)
|
def block_until_ready(self, poll_rate: int = None, timeout: int = None):
"""
Polls the staking_agent and blocks until the staking address is not
a null address for the given worker_address. Once the worker is bonded, it returns the staker address.
"""
if not self.__worker_address:
raise RuntimeError("No worker address available")
timeout = timeout or self.BONDING_TIMEOUT
poll_rate = poll_rate or self.BONDING_POLL_RATE
staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=self.registry)
client = staking_agent.blockchain.client
start = maya.now()
emitter = StdoutEmitter() # TODO: Make injectable, or embed this logic into Ursula
emitter.message("Waiting for bonding and funding...")
funded, bonded = False, False
while True:
# Read
staking_address = staking_agent.get_staker_from_worker(self.__worker_address)
ether_balance = client.get_balance(self.__worker_address)
# Bonding
if (not bonded) and (staking_address != BlockchainInterface.NULL_ADDRESS):
bonded = True
emitter.message(
f"Worker is bonded to ({staking_address})!", color="green", bold=True
)
# Balance
if ether_balance and (not funded):
funded, balance = True, Web3.fromWei(ether_balance, "ether")
emitter.message(
f"Worker is funded with {balance} ETH!", color="green", bold=True
)
# Success and Escape
if staking_address != BlockchainInterface.NULL_ADDRESS and ether_balance:
self._checksum_address = staking_address
# TODO: #1823 - Workaround for new nickname every restart
self.nickname, self.nickname_metadata = nickname_from_seed(
self.checksum_address
)
emitter.message(f"Starting services...", color="yellow", bold=True)
break
# Crash on Timeout
if timeout:
now = maya.now()
delta = now - start
if delta.total_seconds() >= timeout:
if staking_address == BlockchainInterface.NULL_ADDRESS:
raise self.DetachedWorker(
f"Worker {self.__worker_address} not bonded after waiting {timeout} seconds."
)
elif not ether_balance:
raise RuntimeError(
f"Worker {self.__worker_address} has no ether after waiting {timeout} seconds."
)
# Increment
time.sleep(poll_rate)
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def __init__(
self,
# Ursula
rest_host: str,
rest_port: int,
domains: Set = None, # For now, serving and learning domains will be the same.
certificate: Certificate = None,
certificate_filepath: str = None,
db_filepath: str = None,
is_me: bool = True,
interface_signature=None,
timestamp=None,
availability_check: bool = True,
prune_datastore: bool = True,
metrics_port: int = None,
# Blockchain
decentralized_identity_evidence: bytes = constants.NOT_SIGNED,
checksum_address: str = None,
worker_address: str = None, # TODO: deprecate, and rename to "checksum_address"
work_tracker: WorkTracker = None,
start_working_now: bool = True,
client_password: str = None,
# Character
abort_on_learning_error: bool = False,
federated_only: bool = False,
start_learning_now: bool = None,
crypto_power=None,
tls_curve: EllipticCurve = None,
known_nodes: Iterable = None,
**character_kwargs,
) -> None:
#
# Character
#
if domains is None:
# TODO: Move defaults to configuration, Off character.
from nucypher.config.node import CharacterConfiguration
domains = {CharacterConfiguration.DEFAULT_DOMAIN}
if is_me:
# If we're federated only, we assume that all other nodes in our domain are as well.
self.set_federated_mode(federated_only)
Character.__init__(
self,
is_me=is_me,
checksum_address=checksum_address,
start_learning_now=False, # Handled later in this function to avoid race condition
federated_only=self._federated_only_instances, # TODO: 'Ursula' object has no attribute '_federated_only_instances' if an is_me Ursula is not inited prior to this moment NRN
crypto_power=crypto_power,
abort_on_learning_error=abort_on_learning_error,
known_nodes=known_nodes,
domains=domains,
known_node_class=Ursula,
**character_kwargs,
)
if is_me:
# In-Memory TreasureMap tracking
self._stored_treasure_maps = dict()
# Learner
self._start_learning_now = start_learning_now
# Self-Health Checks
self._availability_check = availability_check
self._availability_tracker = AvailabilityTracker(ursula=self)
# Arrangement Pruning
self.__pruning_task = None
self._prune_datastore = prune_datastore
self._arrangement_pruning_task = LoopingCall(f=self.__prune_arrangements)
# Prometheus / Metrics
self._metrics_port = metrics_port
#
# Ursula the Decentralized Worker (Self)
#
if is_me and not federated_only: # TODO: #429
# Prepare a TransactingPower from worker node's transacting keys
self.transacting_power = TransactingPower(
account=worker_address,
password=client_password,
signer=self.signer,
cache=True,
)
self._crypto_power.consume_power_up(self.transacting_power)
# Use this power to substantiate the stamp
self.substantiate_stamp()
self.log.debug(
f"Created decentralized identity evidence: {self.decentralized_identity_evidence[:10].hex()}"
)
decentralized_identity_evidence = self.decentralized_identity_evidence
Worker.__init__(
self,
is_me=is_me,
registry=self.registry,
checksum_address=checksum_address,
worker_address=worker_address,
work_tracker=work_tracker,
start_working_now=start_working_now,
)
if not crypto_power or (TLSHostingPower not in crypto_power):
#
# Development Ursula
#
if is_me:
self.suspicious_activities_witnessed = {
"vladimirs": [],
"bad_treasure_maps": [],
"freeriders": [],
}
# REST Server (Ephemeral Self-Ursula)
rest_app, datastore = make_rest_app(
this_node=self,
db_filepath=db_filepath,
serving_domains=domains,
)
# TLSHostingPower (Ephemeral Powers and Private Keys)
tls_hosting_keypair = HostingKeypair(
curve=tls_curve, host=rest_host, checksum_address=self.checksum_address
)
tls_hosting_power = TLSHostingPower(
keypair=tls_hosting_keypair, host=rest_host
)
self.rest_server = ProxyRESTServer(
rest_host=rest_host,
rest_port=rest_port,
rest_app=rest_app,
datastore=datastore,
hosting_power=tls_hosting_power,
)
#
# Stranger-Ursula
#
else:
# TLSHostingPower
if certificate or certificate_filepath:
tls_hosting_power = TLSHostingPower(
host=rest_host,
public_certificate_filepath=certificate_filepath,
public_certificate=certificate,
)
else:
tls_hosting_keypair = HostingKeypair(
curve=tls_curve, host=rest_host, generate_certificate=False
)
tls_hosting_power = TLSHostingPower(
host=rest_host, keypair=tls_hosting_keypair
)
# REST Server
# Unless the caller passed a crypto power we'll make our own TLSHostingPower for this stranger.
self.rest_server = ProxyRESTServer(
rest_host=rest_host,
rest_port=rest_port,
hosting_power=tls_hosting_power,
)
# OK - Now we have a ProxyRestServer and a TLSHostingPower for some Ursula
self._crypto_power.consume_power_up(tls_hosting_power) # Consume!
#
# Teacher (Verifiable Node)
#
certificate_filepath = self._crypto_power.power_ups(
TLSHostingPower
).keypair.certificate_filepath
certificate = self._crypto_power.power_ups(TLSHostingPower).keypair.certificate
Teacher.__init__(
self,
domains=domains,
certificate=certificate,
certificate_filepath=certificate_filepath,
interface_signature=interface_signature,
timestamp=timestamp,
decentralized_identity_evidence=decentralized_identity_evidence,
)
if is_me:
self.known_nodes.record_fleet_state(
additional_nodes_to_track=[self]
) # Initial Impression
message = "THIS IS YOU: {}: {}".format(self.__class__.__name__, self)
self.log.info(message)
self.log.info(self.banner.format(self.nickname))
else:
message = "Initialized Stranger {} | {}".format(self.__class__.__name__, self)
self.log.debug(message)
|
def __init__(
self,
# Ursula
rest_host: str,
rest_port: int,
domains: Set = None, # For now, serving and learning domains will be the same.
certificate: Certificate = None,
certificate_filepath: str = None,
db_filepath: str = None,
is_me: bool = True,
interface_signature=None,
timestamp=None,
availability_check: bool = True,
prune_datastore: bool = True,
metrics_port: int = None,
# Blockchain
decentralized_identity_evidence: bytes = constants.NOT_SIGNED,
checksum_address: str = None,
worker_address: str = None, # TODO: deprecate, and rename to "checksum_address"
work_tracker: WorkTracker = None,
start_working_now: bool = True,
client_password: str = None,
# Character
abort_on_learning_error: bool = False,
federated_only: bool = False,
start_learning_now: bool = None,
crypto_power=None,
tls_curve: EllipticCurve = None,
known_nodes: Iterable = None,
**character_kwargs,
) -> None:
#
# Character
#
if domains is None:
# TODO: Move defaults to configuration, Off character.
from nucypher.config.node import CharacterConfiguration
domains = {CharacterConfiguration.DEFAULT_DOMAIN}
if is_me:
# If we're federated only, we assume that all other nodes in our domain are as well.
self.set_federated_mode(federated_only)
Character.__init__(
self,
is_me=is_me,
checksum_address=checksum_address,
start_learning_now=False, # Handled later in this function to avoid race condition
federated_only=self._federated_only_instances, # TODO: 'Ursula' object has no attribute '_federated_only_instances' if an is_me Ursula is not inited prior to this moment NRN
crypto_power=crypto_power,
abort_on_learning_error=abort_on_learning_error,
known_nodes=known_nodes,
domains=domains,
known_node_class=Ursula,
**character_kwargs,
)
if is_me:
# In-Memory TreasureMap tracking
self._stored_treasure_maps = dict()
# Learner
self._start_learning_now = start_learning_now
# Self-Health Checks
self._availability_check = availability_check
self._availability_sensor = AvailabilitySensor(ursula=self)
# Arrangement Pruning
self.__pruning_task = None
self._prune_datastore = prune_datastore
self._arrangement_pruning_task = LoopingCall(f=self.__prune_arrangements)
# Prometheus / Metrics
self._metrics_port = metrics_port
#
# Ursula the Decentralized Worker (Self)
#
if is_me and not federated_only: # TODO: #429
# Prepare a TransactingPower from worker node's transacting keys
self.transacting_power = TransactingPower(
account=worker_address,
password=client_password,
signer=self.signer,
cache=True,
)
self._crypto_power.consume_power_up(self.transacting_power)
# Use this power to substantiate the stamp
self.substantiate_stamp()
self.log.debug(
f"Created decentralized identity evidence: {self.decentralized_identity_evidence[:10].hex()}"
)
decentralized_identity_evidence = self.decentralized_identity_evidence
Worker.__init__(
self,
is_me=is_me,
registry=self.registry,
checksum_address=checksum_address,
worker_address=worker_address,
work_tracker=work_tracker,
start_working_now=start_working_now,
)
if not crypto_power or (TLSHostingPower not in crypto_power):
#
# Development Ursula
#
if is_me:
self.suspicious_activities_witnessed = {
"vladimirs": [],
"bad_treasure_maps": [],
"freeriders": [],
}
# REST Server (Ephemeral Self-Ursula)
rest_app, datastore = make_rest_app(
this_node=self,
db_filepath=db_filepath,
serving_domains=domains,
)
# TLSHostingPower (Ephemeral Powers and Private Keys)
tls_hosting_keypair = HostingKeypair(
curve=tls_curve, host=rest_host, checksum_address=self.checksum_address
)
tls_hosting_power = TLSHostingPower(
keypair=tls_hosting_keypair, host=rest_host
)
self.rest_server = ProxyRESTServer(
rest_host=rest_host,
rest_port=rest_port,
rest_app=rest_app,
datastore=datastore,
hosting_power=tls_hosting_power,
)
#
# Stranger-Ursula
#
else:
# TLSHostingPower
if certificate or certificate_filepath:
tls_hosting_power = TLSHostingPower(
host=rest_host,
public_certificate_filepath=certificate_filepath,
public_certificate=certificate,
)
else:
tls_hosting_keypair = HostingKeypair(
curve=tls_curve, host=rest_host, generate_certificate=False
)
tls_hosting_power = TLSHostingPower(
host=rest_host, keypair=tls_hosting_keypair
)
# REST Server
# Unless the caller passed a crypto power we'll make our own TLSHostingPower for this stranger.
self.rest_server = ProxyRESTServer(
rest_host=rest_host,
rest_port=rest_port,
hosting_power=tls_hosting_power,
)
# OK - Now we have a ProxyRestServer and a TLSHostingPower for some Ursula
self._crypto_power.consume_power_up(tls_hosting_power) # Consume!
#
# Teacher (Verifiable Node)
#
certificate_filepath = self._crypto_power.power_ups(
TLSHostingPower
).keypair.certificate_filepath
certificate = self._crypto_power.power_ups(TLSHostingPower).keypair.certificate
Teacher.__init__(
self,
domains=domains,
certificate=certificate,
certificate_filepath=certificate_filepath,
interface_signature=interface_signature,
timestamp=timestamp,
decentralized_identity_evidence=decentralized_identity_evidence,
)
if is_me:
self.known_nodes.record_fleet_state(
additional_nodes_to_track=[self]
) # Initial Impression
message = "THIS IS YOU: {}: {}".format(self.__class__.__name__, self)
self.log.info(message)
self.log.info(self.banner.format(self.nickname))
else:
message = "Initialized Stranger {} | {}".format(self.__class__.__name__, self)
self.log.debug(message)
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def run(
self,
emitter: StdoutEmitter = None,
hendrix: bool = True,
learning: bool = True,
availability: bool = True,
worker: bool = True,
pruning: bool = True,
interactive: bool = False,
prometheus: bool = False,
start_reactor: bool = True,
) -> None:
"""Schedule and start select ursula services, then optionally start the reactor."""
#
# Async loops ordered by schedule priority
#
if emitter:
emitter.message(f"Starting services...", color="yellow")
if pruning:
self.__pruning_task = self._arrangement_pruning_task.start(
interval=self._pruning_interval, now=True
)
if emitter:
emitter.message(f"✓ Database pruning", color="green")
if learning:
self.start_learning_loop(now=self._start_learning_now)
if emitter:
emitter.message(
f"✓ Node Discovery ({','.join(self.learning_domains)})", color="green"
)
if self._availability_check and availability:
self._availability_tracker.start(now=False) # wait...
if emitter:
emitter.message(f"✓ Availability Checks", color="green")
if worker and not self.federated_only:
self.work_tracker.start(
act_now=True, requirement_func=self._availability_tracker.status
)
if emitter:
emitter.message(f"✓ Work Tracking", color="green")
#
# Non-order dependant services
#
if prometheus:
# TODO: Integrate with Hendrix TLS Deploy?
# Local scoped to help prevent import without prometheus installed
from nucypher.utilities.metrics import initialize_prometheus_exporter
initialize_prometheus_exporter(ursula=self, port=self._metrics_port)
if emitter:
emitter.message(f"✓ Prometheus Exporter", color="green")
if interactive and emitter:
stdio.StandardIO(UrsulaCommandProtocol(ursula=self, emitter=emitter))
if hendrix:
if emitter:
emitter.message(
f"Starting Ursula on {self.rest_interface}", color="green", bold=True
)
deployer = self.get_deployer()
deployer.addServices()
deployer.catalogServers(deployer.hendrix)
if not start_reactor:
return
if emitter:
emitter.message("Working ~ Keep Ursula Online!", color="blue", bold=True)
try:
deployer.run() # <--- Blocking Call (Reactor)
except Exception as e:
self.log.critical(str(e))
if emitter:
emitter.message(f"{e.__class__.__name__} {e}", color="red", bold=True)
raise # Crash :-(
elif start_reactor: # ... without hendrix
reactor.run() # <--- Blocking Call (Reactor)
|
def run(
self,
emitter: StdoutEmitter = None,
hendrix: bool = True,
learning: bool = True,
availability: bool = True,
worker: bool = True,
pruning: bool = True,
interactive: bool = False,
prometheus: bool = False,
start_reactor: bool = True,
) -> None:
"""Schedule and start select ursula services, then optionally start the reactor."""
#
# Async loops ordered by schedule priority
#
if pruning:
self.__pruning_task = self._arrangement_pruning_task.start(
interval=self._pruning_interval, now=True
)
if learning:
if emitter:
emitter.message(
f"Connecting to {','.join(self.learning_domains)}",
color="green",
bold=True,
)
self.start_learning_loop(now=self._start_learning_now)
if self._availability_check and availability:
self._availability_sensor.start(now=False) # wait...
if worker and not self.federated_only:
self.work_tracker.start(
act_now=True, requirement_func=self._availability_sensor.status
)
#
# Non-order dependant services
#
if prometheus:
# TODO: Integrate with Hendrix TLS Deploy?
# Local scoped to help prevent import without prometheus installed
from nucypher.utilities.metrics import initialize_prometheus_exporter
initialize_prometheus_exporter(ursula=self, port=self._metrics_port)
if interactive and emitter:
stdio.StandardIO(UrsulaCommandProtocol(ursula=self, emitter=emitter))
if hendrix:
if emitter:
emitter.message(
f"Starting Ursula on {self.rest_interface}", color="green", bold=True
)
deployer = self.get_deployer()
deployer.addServices()
deployer.catalogServers(deployer.hendrix)
if not start_reactor:
return
if emitter:
emitter.message("Working ~ Keep Ursula Online!", color="blue", bold=True)
try:
deployer.run() # <--- Blocking Call (Reactor)
except Exception as e:
self.log.critical(str(e))
if emitter:
emitter.message(f"{e.__class__.__name__} {e}", color="red", bold=True)
raise # Crash :-(
elif start_reactor: # ... without hendrix
reactor.run() # <--- Blocking Call (Reactor)
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def stop(self, halt_reactor: bool = False) -> None:
"""Stop services"""
self._availability_tracker.stop()
if self._learning_task.running:
self.stop_learning_loop()
if not self.federated_only:
self.work_tracker.stop()
if self._arrangement_pruning_task.running:
self._arrangement_pruning_task.stop()
if halt_reactor:
reactor.stop()
|
def stop(self, halt_reactor: bool = False) -> None:
"""Stop services"""
self._availability_sensor.stop()
if self._learning_task.running:
self.stop_learning_loop()
if not self.federated_only:
self.work_tracker.stop()
if self._arrangement_pruning_task.running:
self._arrangement_pruning_task.stop()
if halt_reactor:
reactor.stop()
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def unlock_nucypher_keyring(
emitter, password: str, character_configuration: CharacterConfiguration
):
emitter.message(
f"Decrypting {character_configuration._NAME} keyring...", color="yellow"
)
if character_configuration.dev_mode:
return True # Dev accounts are always unlocked
# NuCypher
try:
character_configuration.attach_keyring()
character_configuration.keyring.unlock(
password=password
) # Takes ~3 seconds, ~1GB Ram
except CryptoError:
raise character_configuration.keyring.AuthenticationFailed
|
def unlock_nucypher_keyring(
emitter, password: str, character_configuration: CharacterConfiguration
):
emitter.message("Decrypting NuCypher keyring...", color="yellow")
if character_configuration.dev_mode:
return True # Dev accounts are always unlocked
# NuCypher
try:
character_configuration.attach_keyring()
character_configuration.keyring.unlock(
password=password
) # Takes ~3 seconds, ~1GB Ram
except CryptoError:
raise character_configuration.keyring.AuthenticationFailed
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def make_cli_character(
character_config,
emitter,
unlock_keyring: bool = True,
teacher_uri: str = None,
min_stake: int = 0,
load_preferred_teachers: bool = True,
**config_args,
):
#
# Pre-Init
#
# Handle Keyring
if unlock_keyring:
unlock_nucypher_keyring(
emitter,
character_configuration=character_config,
password=get_nucypher_password(confirm=False),
)
# Handle Teachers
teacher_nodes = list()
if load_preferred_teachers:
teacher_nodes = load_seednodes(
emitter,
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=character_config.federated_only,
network_domains=character_config.domains,
network_middleware=character_config.network_middleware,
registry=character_config.registry,
)
#
# Character Init
#
# Produce Character
try:
CHARACTER = character_config(
known_nodes=teacher_nodes,
network_middleware=character_config.network_middleware,
**config_args,
)
except (CryptoError, ValueError):
raise character_config.keyring.AuthenticationFailed(
f"Failed to unlock nucypher keyring. "
"Are you sure you provided the correct password?"
)
#
# Post-Init
#
if CHARACTER.controller is not NO_CONTROL_PROTOCOL:
CHARACTER.controller.emitter = (
emitter # TODO: set it on object creation? Or not set at all?
)
# Federated
if character_config.federated_only:
emitter.message("WARNING: Running in Federated mode", color="yellow")
return CHARACTER
|
def make_cli_character(
character_config,
emitter,
unlock_keyring: bool = True,
teacher_uri: str = None,
min_stake: int = 0,
load_preferred_teachers: bool = True,
**config_args,
):
#
# Pre-Init
#
# Handle Keyring
if unlock_keyring:
unlock_nucypher_keyring(
emitter,
character_configuration=character_config,
password=get_nucypher_password(confirm=False),
)
# Handle Teachers
teacher_nodes = list()
if load_preferred_teachers:
teacher_nodes = load_seednodes(
emitter,
teacher_uris=[teacher_uri] if teacher_uri else None,
min_stake=min_stake,
federated_only=character_config.federated_only,
network_domains=character_config.domains,
network_middleware=character_config.network_middleware,
registry=character_config.registry,
)
#
# Character Init
#
# Produce Character
try:
CHARACTER = character_config(
known_nodes=teacher_nodes,
network_middleware=character_config.network_middleware,
**config_args,
)
except CryptoError:
raise character_config.keyring.AuthenticationFailed(
"Failed to unlock keyring. Are you sure you provided the correct password?"
)
#
# Post-Init
#
if CHARACTER.controller is not NO_CONTROL_PROTOCOL:
CHARACTER.controller.emitter = (
emitter # TODO: set it on object creation? Or not set at all?
)
# Federated
if character_config.federated_only:
emitter.message("WARNING: Running in Federated mode", color="yellow")
return CHARACTER
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def paint_node_status(emitter, ursula, start_time):
ursula.mature() # Just to be sure
# Build Learning status line
learning_status = "Unknown"
if ursula._learning_task.running:
learning_status = "Learning at {}s Intervals".format(
ursula._learning_task.interval
)
elif not ursula._learning_task.running:
learning_status = "Not Learning"
teacher = "Current Teacher ..... No Teacher Connection"
if ursula._current_teacher_node:
teacher = "Current Teacher ..... {}".format(ursula._current_teacher_node)
# Build FleetState status line
fleet_state = build_fleet_state_status(ursula=ursula)
stats = [
"⇀URSULA {}↽".format(ursula.nickname_icon),
"{}".format(ursula),
"Uptime .............. {}".format(maya.now() - start_time),
"Start Time .......... {}".format(start_time.slang_time()),
"Fleet State.......... {}".format(fleet_state),
"Learning Status ..... {}".format(learning_status),
"Learning Round ...... Round #{}".format(ursula._learning_round),
"Operating Mode ...... {}".format(
"Federated" if ursula.federated_only else "Decentralized"
),
"Rest Interface ...... {}".format(ursula.rest_url()),
"Node Storage Type ... {}".format(ursula.node_storage._name.capitalize()),
"Known Nodes ......... {}".format(len(ursula.known_nodes)),
"Work Orders ......... {}".format(len(ursula.work_orders())),
teacher,
]
if not ursula.federated_only:
worker_address = "Worker Address ...... {}".format(ursula.worker_address)
current_period = (
f"Current Period ...... {ursula.staking_agent.get_current_period()}"
)
stats.extend([current_period, worker_address])
if ursula._availability_tracker:
if ursula._availability_tracker.running:
score = "Availability Score .. {} ({} responders)".format(
ursula._availability_tracker.score,
len(ursula._availability_tracker.responders),
)
else:
score = "Availability Score .. Disabled"
stats.append(score)
emitter.echo("\n" + "\n".join(stats) + "\n")
|
def paint_node_status(emitter, ursula, start_time):
ursula.mature() # Just to be sure
# Build Learning status line
learning_status = "Unknown"
if ursula._learning_task.running:
learning_status = "Learning at {}s Intervals".format(
ursula._learning_task.interval
)
elif not ursula._learning_task.running:
learning_status = "Not Learning"
teacher = "Current Teacher ..... No Teacher Connection"
if ursula._current_teacher_node:
teacher = "Current Teacher ..... {}".format(ursula._current_teacher_node)
# Build FleetState status line
fleet_state = build_fleet_state_status(ursula=ursula)
stats = [
"⇀URSULA {}↽".format(ursula.nickname_icon),
"{}".format(ursula),
"Uptime .............. {}".format(maya.now() - start_time),
"Start Time .......... {}".format(start_time.slang_time()),
"Fleet State.......... {}".format(fleet_state),
"Learning Status ..... {}".format(learning_status),
"Learning Round ...... Round #{}".format(ursula._learning_round),
"Operating Mode ...... {}".format(
"Federated" if ursula.federated_only else "Decentralized"
),
"Rest Interface ...... {}".format(ursula.rest_url()),
"Node Storage Type ... {}".format(ursula.node_storage._name.capitalize()),
"Known Nodes ......... {}".format(len(ursula.known_nodes)),
"Work Orders ......... {}".format(len(ursula.work_orders())),
teacher,
]
if not ursula.federated_only:
worker_address = "Worker Address ...... {}".format(ursula.worker_address)
current_period = (
f"Current Period ...... {ursula.staking_agent.get_current_period()}"
)
stats.extend([current_period, worker_address])
if ursula._availability_sensor:
if ursula._availability_sensor.running:
score = "Availability Score .. {} ({} responders)".format(
ursula._availability_sensor.score,
len(ursula._availability_sensor.responders),
)
else:
score = "Availability Score .. Disabled"
stats.append(score)
emitter.echo("\n" + "\n".join(stats) + "\n")
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def connectionMade(self):
self.emitter.echo("\nType 'help' or '?' for help")
self.transport.write(self.prompt)
|
def connectionMade(self):
message = "Attached {}@{}".format(
self.ursula.checksum_address, self.ursula.rest_url()
)
self.emitter.echo(message, color="green")
self.emitter.echo(
"{} | {}".format(self.ursula.nickname_icon, self.ursula.nickname),
color="blue",
bold=True,
)
self.emitter.echo("\nType 'help' or '?' for help")
self.transport.write(self.prompt)
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def get_certificate(
self,
host,
port,
timeout=3,
retry_attempts: int = 3,
retry_rate: int = 2,
current_attempt: int = 0,
):
socket.setdefaulttimeout(timeout) # Set Socket Timeout
try:
self.log.info(f"Fetching seednode {host}:{port} TLS certificate")
seednode_certificate = ssl.get_server_certificate(addr=(host, port))
except socket.timeout:
if current_attempt == retry_attempts:
message = f"No Response from seednode {host}:{port} after {retry_attempts} attempts"
self.log.info(message)
raise ConnectionRefusedError("No response from {}:{}".format(host, port))
self.log.info(
f"No Response from seednode {host}:{port}. Retrying in {retry_rate} seconds..."
)
time.sleep(retry_rate)
return self.get_certificate(
host, port, timeout, retry_attempts, retry_rate, current_attempt + 1
)
except OSError:
raise # TODO: #1835
else:
certificate = x509.load_pem_x509_certificate(
seednode_certificate.encode(), backend=default_backend()
)
return certificate
|
def get_certificate(
self,
host,
port,
timeout=3,
retry_attempts: int = 3,
retry_rate: int = 2,
current_attempt: int = 0,
):
socket.setdefaulttimeout(timeout) # Set Socket Timeout
try:
self.log.info(f"Fetching seednode {host}:{port} TLS certificate")
seednode_certificate = ssl.get_server_certificate(addr=(host, port))
except socket.timeout:
if current_attempt == retry_attempts:
message = f"No Response from seednode {host}:{port} after {retry_attempts} attempts"
self.log.info(message)
raise ConnectionRefusedError("No response from {}:{}".format(host, port))
self.log.info(
f"No Response from seednode {host}:{port}. Retrying in {retry_rate} seconds..."
)
time.sleep(retry_rate)
return self.get_certificate(
host, port, timeout, retry_attempts, retry_rate, current_attempt + 1
)
else:
certificate = x509.load_pem_x509_certificate(
seednode_certificate.encode(), backend=default_backend()
)
return certificate
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def check_rest_availability(self, initiator, responder):
response = self.client.post(
node_or_sprout=responder,
data=bytes(initiator),
path="ping",
timeout=6, # Two round trips are expected
)
return response
|
def check_rest_availability(
self, requesting_ursula, responding_ursula, certificate_filepath=None
):
response = self.client.post(
node_or_sprout=responding_ursula,
data=bytes(requesting_ursula),
path="ping",
timeout=4, # Two round trips are expected
certificate_filepath=certificate_filepath,
)
return response
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def stop_learning_loop(self, reason=None):
"""
Only for tests at this point. Maybe some day for graceful shutdowns.
"""
if self._learning_task.running:
self._learning_task.stop()
|
def stop_learning_loop(self, reason=None):
"""
Only for tests at this point. Maybe some day for graceful shutdowns.
"""
self._learning_task.stop()
|
https://github.com/nucypher/nucypher/issues/1833
|
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Unhandled error in Deferred:
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: Traceback (most recent call last):
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1283, in run
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.mainLoop()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 1292, in mainLoop
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.runUntilCurrent()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/base.py", line 913, in runUntilCurrent
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: call.func(*call.args, **call.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/task.py", line 239, in __call__
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: d = defer.maybeDeferred(self.f, *self.a, **self.kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: --- <exception caught here> ---
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 654, in _runCallbacks
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: current.result = callback(current.result, *args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 89, in handle_measurement_errors
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: failure.raiseException()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/python/failure.py", line 488, in raiseException
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise self.value.with_traceback(self.tb)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher-venv/lib/python3.6/site-packages/twisted/internet/defer.py", line 151, in maybeDeferred
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: result = f(*args, **kw)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 138, in maintain
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: self.measure()
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/sensors.py", line 191, in measure
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 248, in check_rest_availability
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: certificate_filepath=certificate_filepath)
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: File "/home/ubuntu/nucypher/nucypher/network/middleware.py", line 114, in method_wrapper
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: raise ValueError("Don't try to pass a node with a certificate_filepath while also passing a"
Apr 02 17:02:28 ip-172-31-19-8 nucypher[29582]: builtins.ValueError: Don't try to pass a node with a certificate_filepath while also passing a different certificate_filepath. What do you even expect?
Apr 03 00:02:29 ip-172-31-19-8 nucypher[29582]: Broadcasting CONFIRMACTIVITY Transaction (264369 gwei @ 3000000000)...
|
ValueError
|
def __init__(
self,
checksum_address: str,
is_transacting: bool = True,
client_password: str = None,
*args,
**kwargs,
):
super().__init__(checksum_address=checksum_address, *args, **kwargs)
self.worklock_agent = ContractAgency.get_agent(
WorkLockAgent, registry=self.registry
)
self.staking_agent = ContractAgency.get_agent(
StakingEscrowAgent, registry=self.registry
)
self.economics = EconomicsFactory.get_economics(registry=self.registry)
if is_transacting:
self.transacting_power = TransactingPower(
password=client_password, account=checksum_address
)
self.transacting_power.activate()
|
def __init__(self, checksum_address: str, client_password: str = None, *args, **kwargs):
super().__init__(checksum_address=checksum_address, *args, **kwargs)
self.worklock_agent = ContractAgency.get_agent(
WorkLockAgent, registry=self.registry
)
self.staking_agent = ContractAgency.get_agent(
StakingEscrowAgent, registry=self.registry
)
self.economics = EconomicsFactory.get_economics(registry=self.registry)
if client_password:
self.transacting_power = TransactingPower(
password=client_password, account=checksum_address
)
self.transacting_power.activate()
|
https://github.com/nucypher/nucypher/issues/1721
|
Traceback (most recent call last):
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/bin/nucypher", line 11, in <module>
load_entry_point('nucypher', 'console_scripts', 'nucypher')()
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/david/.local/share/virtualenvs/nucypher-cjt3uwkK/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/options.py", line 139, in wrapper
return func(**kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/cli/commands/worklock.py", line 139, in bid
receipt = bidder.place_bid(value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/actors.py", line 1557, in place_bid
receipt = self.worklock_agent.bid(checksum_address=self.checksum_address, value=value)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/agents.py", line 993, in bid
payload={'value': value})
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/decorators.py", line 71, in wrapped
return func(*args, **kwargs)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 489, in send_transaction
confirmations=confirmations)
File "/Users/david/NuCypher/dev/nucypher/nucypher/blockchain/eth/interfaces.py", line 408, in sign_and_broadcast_transaction
raise self.InterfaceError(str(READ_ONLY_INTERFACE))
nucypher.blockchain.eth.interfaces.InterfaceError: READ_ONLY_INTERFACE
|
nucypher.blockchain.eth.interfaces.InterfaceError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.